1 //===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the VirtRegMap class.
12 // It also contains implementations of the the Spiller interface, which, given a
13 // virtual register map and a machine function, eliminates all virtual
14 // references by replacing them with physical register references - adding spill
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "spiller"
20 #include "VirtRegMap.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/SSARegMap.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/Compiler.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallSet.h"
37 STATISTIC(NumSpills, "Number of register spills");
38 STATISTIC(NumReMats, "Number of re-materialization");
39 STATISTIC(NumDRM , "Number of re-materializable defs elided");
40 STATISTIC(NumStores, "Number of stores added");
41 STATISTIC(NumLoads , "Number of loads added");
42 STATISTIC(NumReused, "Number of values reused");
43 STATISTIC(NumDSE , "Number of dead stores elided");
44 STATISTIC(NumDCE , "Number of copies elided");
47 enum SpillerName { simple, local };
49 static cl::opt<SpillerName>
51 cl::desc("Spiller to use: (default: local)"),
53 cl::values(clEnumVal(simple, " simple spiller"),
54 clEnumVal(local, " local spiller"),
59 //===----------------------------------------------------------------------===//
60 // VirtRegMap implementation
61 //===----------------------------------------------------------------------===//
63 VirtRegMap::VirtRegMap(MachineFunction &mf)
64 : TII(*mf.getTarget().getInstrInfo()), MF(mf),
65 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
66 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
67 ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) {
71 void VirtRegMap::grow() {
72 unsigned LastVirtReg = MF.getSSARegMap()->getLastVirtReg();
73 Virt2PhysMap.grow(LastVirtReg);
74 Virt2StackSlotMap.grow(LastVirtReg);
75 Virt2ReMatIdMap.grow(LastVirtReg);
76 Virt2SplitMap.grow(LastVirtReg);
77 Virt2SpillPtsMap.grow(LastVirtReg);
78 ReMatMap.grow(LastVirtReg);
81 int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
82 assert(MRegisterInfo::isVirtualRegister(virtReg));
83 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
84 "attempt to assign stack slot to already spilled register");
85 const TargetRegisterClass* RC = MF.getSSARegMap()->getRegClass(virtReg);
86 int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
88 Virt2StackSlotMap[virtReg] = frameIndex;
93 void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) {
94 assert(MRegisterInfo::isVirtualRegister(virtReg));
95 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
96 "attempt to assign stack slot to already spilled register");
97 assert((frameIndex >= 0 ||
98 (frameIndex >= MF.getFrameInfo()->getObjectIndexBegin())) &&
99 "illegal fixed frame index");
100 Virt2StackSlotMap[virtReg] = frameIndex;
103 int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
104 assert(MRegisterInfo::isVirtualRegister(virtReg));
105 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
106 "attempt to assign re-mat id to already spilled register");
107 Virt2ReMatIdMap[virtReg] = ReMatId;
111 void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
112 assert(MRegisterInfo::isVirtualRegister(virtReg));
113 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
114 "attempt to assign re-mat id to already spilled register");
115 Virt2ReMatIdMap[virtReg] = id;
118 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
119 unsigned OpNo, MachineInstr *NewMI) {
120 // Move previous memory references folded to new instruction.
121 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
122 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
123 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) {
124 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second));
125 MI2VirtMap.erase(I++);
129 const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor();
130 if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 ||
131 TID->findTiedToSrcOperand(OpNo) != -1) {
132 // Folded a two-address operand.
134 } else if (OldMI->getOperand(OpNo).isDef()) {
140 // add new memory reference
141 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
144 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) {
145 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI);
146 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo)));
149 void VirtRegMap::print(std::ostream &OS) const {
150 const MRegisterInfo* MRI = MF.getTarget().getRegisterInfo();
152 OS << "********** REGISTER MAP **********\n";
153 for (unsigned i = MRegisterInfo::FirstVirtualRegister,
154 e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) {
155 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
156 OS << "[reg" << i << " -> " << MRI->getName(Virt2PhysMap[i]) << "]\n";
160 for (unsigned i = MRegisterInfo::FirstVirtualRegister,
161 e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i)
162 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT)
163 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n";
167 void VirtRegMap::dump() const {
172 //===----------------------------------------------------------------------===//
173 // Simple Spiller Implementation
174 //===----------------------------------------------------------------------===//
176 Spiller::~Spiller() {}
179 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller {
180 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM);
184 bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
185 DOUT << "********** REWRITE MACHINE CODE **********\n";
186 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
187 const TargetMachine &TM = MF.getTarget();
188 const MRegisterInfo &MRI = *TM.getRegisterInfo();
190 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
191 // each vreg once (in the case where a spilled vreg is used by multiple
192 // operands). This is always smaller than the number of operands to the
193 // current machine instr, so it should be small.
194 std::vector<unsigned> LoadedRegs;
196 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
198 DOUT << MBBI->getBasicBlock()->getName() << ":\n";
199 MachineBasicBlock &MBB = *MBBI;
200 for (MachineBasicBlock::iterator MII = MBB.begin(),
201 E = MBB.end(); MII != E; ++MII) {
202 MachineInstr &MI = *MII;
203 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
204 MachineOperand &MO = MI.getOperand(i);
205 if (MO.isRegister() && MO.getReg())
206 if (MRegisterInfo::isVirtualRegister(MO.getReg())) {
207 unsigned VirtReg = MO.getReg();
208 unsigned PhysReg = VRM.getPhys(VirtReg);
209 if (!VRM.isAssignedReg(VirtReg)) {
210 int StackSlot = VRM.getStackSlot(VirtReg);
211 const TargetRegisterClass* RC =
212 MF.getSSARegMap()->getRegClass(VirtReg);
215 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
216 == LoadedRegs.end()) {
217 MRI.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
218 LoadedRegs.push_back(VirtReg);
220 DOUT << '\t' << *prior(MII);
224 MRI.storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC);
228 MF.setPhysRegUsed(PhysReg);
229 MI.getOperand(i).setReg(PhysReg);
231 MF.setPhysRegUsed(MO.getReg());
242 //===----------------------------------------------------------------------===//
243 // Local Spiller Implementation
244 //===----------------------------------------------------------------------===//
247 class AvailableSpills;
249 /// LocalSpiller - This spiller does a simple pass over the machine basic
250 /// block to attempt to keep spills in registers as much as possible for
251 /// blocks that have low register pressure (the vreg may be spilled due to
252 /// register pressure in other blocks).
253 class VISIBILITY_HIDDEN LocalSpiller : public Spiller {
255 const MRegisterInfo *MRI;
256 const TargetInstrInfo *TII;
258 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
259 RegMap = MF.getSSARegMap();
260 MRI = MF.getTarget().getRegisterInfo();
261 TII = MF.getTarget().getInstrInfo();
262 DOUT << "\n**** Local spiller rewriting function '"
263 << MF.getFunction()->getName() << "':\n";
264 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!) ****\n";
267 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
269 RewriteMBB(*MBB, VRM);
271 DOUT << "**** Post Machine Instrs ****\n";
277 bool PrepForUnfoldOpti(MachineBasicBlock &MBB,
278 MachineBasicBlock::iterator &MII,
279 std::vector<MachineInstr*> &MaybeDeadStores,
280 AvailableSpills &Spills, BitVector &RegKills,
281 std::vector<MachineOperand*> &KillOps,
283 void SpillRegToStackSlot(MachineBasicBlock &MBB,
284 MachineBasicBlock::iterator &MII,
285 int Idx, unsigned PhysReg, int StackSlot,
286 const TargetRegisterClass *RC,
287 MachineInstr *&LastStore,
288 AvailableSpills &Spills,
289 SmallSet<MachineInstr*, 4> &ReMatDefs,
291 std::vector<MachineOperand*> &KillOps,
293 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM);
297 /// AvailableSpills - As the local spiller is scanning and rewriting an MBB from
298 /// top down, keep track of which spills slots or remat are available in each
301 /// Note that not all physregs are created equal here. In particular, some
302 /// physregs are reloads that we are allowed to clobber or ignore at any time.
303 /// Other physregs are values that the register allocated program is using that
304 /// we cannot CHANGE, but we can read if we like. We keep track of this on a
305 /// per-stack-slot / remat id basis as the low bit in the value of the
306 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
307 /// this bit and addAvailable sets it if.
309 class VISIBILITY_HIDDEN AvailableSpills {
310 const MRegisterInfo *MRI;
311 const TargetInstrInfo *TII;
313 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
314 // or remat'ed virtual register values that are still available, due to being
315 // loaded or stored to, but not invalidated yet.
316 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
318 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
319 // indicating which stack slot values are currently held by a physreg. This
320 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
321 // physreg is modified.
322 std::multimap<unsigned, int> PhysRegsAvailable;
324 void disallowClobberPhysRegOnly(unsigned PhysReg);
326 void ClobberPhysRegOnly(unsigned PhysReg);
328 AvailableSpills(const MRegisterInfo *mri, const TargetInstrInfo *tii)
329 : MRI(mri), TII(tii) {
332 const MRegisterInfo *getRegInfo() const { return MRI; }
334 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
335 /// available in a physical register, return that PhysReg, otherwise
337 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
338 std::map<int, unsigned>::const_iterator I =
339 SpillSlotsOrReMatsAvailable.find(Slot);
340 if (I != SpillSlotsOrReMatsAvailable.end()) {
341 return I->second >> 1; // Remove the CanClobber bit.
346 /// addAvailable - Mark that the specified stack slot / remat is available in
347 /// the specified physreg. If CanClobber is true, the physreg can be modified
348 /// at any time without changing the semantics of the program.
349 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg,
350 bool CanClobber = true) {
351 // If this stack slot is thought to be available in some other physreg,
352 // remove its record.
353 ModifyStackSlotOrReMat(SlotOrReMat);
355 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
356 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber;
358 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
359 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1;
361 DOUT << "Remembering SS#" << SlotOrReMat;
362 DOUT << " in physreg " << MRI->getName(Reg) << "\n";
365 /// canClobberPhysReg - Return true if the spiller is allowed to change the
366 /// value of the specified stackslot register if it desires. The specified
367 /// stack slot must be available in a physreg for this query to make sense.
368 bool canClobberPhysReg(int SlotOrReMat) const {
369 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
370 "Value not available!");
371 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
374 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
375 /// stackslot register. The register is still available but is no longer
376 /// allowed to be modifed.
377 void disallowClobberPhysReg(unsigned PhysReg);
379 /// ClobberPhysReg - This is called when the specified physreg changes
380 /// value. We use this to invalidate any info about stuff that lives in
381 /// it and any of its aliases.
382 void ClobberPhysReg(unsigned PhysReg);
384 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
385 /// slot changes. This removes information about which register the previous
386 /// value for this slot lives in (as the previous value is dead now).
387 void ModifyStackSlotOrReMat(int SlotOrReMat);
391 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
392 /// stackslot register. The register is still available but is no longer
393 /// allowed to be modifed.
394 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
395 std::multimap<unsigned, int>::iterator I =
396 PhysRegsAvailable.lower_bound(PhysReg);
397 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
398 int SlotOrReMat = I->second;
400 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
401 "Bidirectional map mismatch!");
402 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
403 DOUT << "PhysReg " << MRI->getName(PhysReg)
404 << " copied, it is available for use but can no longer be modified\n";
408 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
409 /// stackslot register and its aliases. The register and its aliases may
410 /// still available but is no longer allowed to be modifed.
411 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
412 for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS)
413 disallowClobberPhysRegOnly(*AS);
414 disallowClobberPhysRegOnly(PhysReg);
417 /// ClobberPhysRegOnly - This is called when the specified physreg changes
418 /// value. We use this to invalidate any info about stuff we thing lives in it.
419 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
420 std::multimap<unsigned, int>::iterator I =
421 PhysRegsAvailable.lower_bound(PhysReg);
422 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
423 int SlotOrReMat = I->second;
424 PhysRegsAvailable.erase(I++);
425 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
426 "Bidirectional map mismatch!");
427 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
428 DOUT << "PhysReg " << MRI->getName(PhysReg)
429 << " clobbered, invalidating ";
430 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
431 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
433 DOUT << "SS#" << SlotOrReMat << "\n";
437 /// ClobberPhysReg - This is called when the specified physreg changes
438 /// value. We use this to invalidate any info about stuff we thing lives in
439 /// it and any of its aliases.
440 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
441 for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS)
442 ClobberPhysRegOnly(*AS);
443 ClobberPhysRegOnly(PhysReg);
446 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
447 /// slot changes. This removes information about which register the previous
448 /// value for this slot lives in (as the previous value is dead now).
449 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
450 std::map<int, unsigned>::iterator It =
451 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
452 if (It == SpillSlotsOrReMatsAvailable.end()) return;
453 unsigned Reg = It->second >> 1;
454 SpillSlotsOrReMatsAvailable.erase(It);
456 // This register may hold the value of multiple stack slots, only remove this
457 // stack slot from the set of values the register contains.
458 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
460 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
461 "Map inverse broken!");
462 if (I->second == SlotOrReMat) break;
464 PhysRegsAvailable.erase(I);
469 /// InvalidateKills - MI is going to be deleted. If any of its operands are
470 /// marked kill, then invalidate the information.
471 static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
472 std::vector<MachineOperand*> &KillOps,
473 SmallVector<unsigned, 2> *KillRegs = NULL) {
474 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
475 MachineOperand &MO = MI.getOperand(i);
476 if (!MO.isRegister() || !MO.isUse() || !MO.isKill())
478 unsigned Reg = MO.getReg();
480 KillRegs->push_back(Reg);
481 if (KillOps[Reg] == &MO) {
488 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
489 /// (since it's spill instruction is removed), mark it isDead. Also checks if
490 /// the def MI has other definition operands that are not dead. Returns it by
492 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
493 MachineInstr &NewDef, unsigned Reg,
495 // Due to remat, it's possible this reg isn't being reused. That is,
496 // the def of this reg (by prev MI) is now dead.
497 MachineInstr *DefMI = I;
498 MachineOperand *DefOp = NULL;
499 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
500 MachineOperand &MO = DefMI->getOperand(i);
501 if (MO.isRegister() && MO.isDef()) {
502 if (MO.getReg() == Reg)
504 else if (!MO.isDead())
511 bool FoundUse = false, Done = false;
512 MachineBasicBlock::iterator E = NewDef;
514 for (; !Done && I != E; ++I) {
515 MachineInstr *NMI = I;
516 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
517 MachineOperand &MO = NMI->getOperand(j);
518 if (!MO.isRegister() || MO.getReg() != Reg)
522 Done = true; // Stop after scanning all the operands of this MI.
533 /// UpdateKills - Track and update kill info. If a MI reads a register that is
534 /// marked kill, then it must be due to register reuse. Transfer the kill info
536 static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
537 std::vector<MachineOperand*> &KillOps) {
538 const TargetInstrDescriptor *TID = MI.getInstrDescriptor();
539 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
540 MachineOperand &MO = MI.getOperand(i);
541 if (!MO.isRegister() || !MO.isUse())
543 unsigned Reg = MO.getReg();
548 // That can't be right. Register is killed but not re-defined and it's
549 // being reused. Let's fix that.
550 KillOps[Reg]->unsetIsKill();
551 if (i < TID->numOperands &&
552 TID->getOperandConstraint(i, TOI::TIED_TO) == -1)
553 // Unless it's a two-address operand, this is the new kill.
563 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
564 const MachineOperand &MO = MI.getOperand(i);
565 if (!MO.isRegister() || !MO.isDef())
567 unsigned Reg = MO.getReg();
574 // ReusedOp - For each reused operand, we keep track of a bit of information, in
575 // case we need to rollback upon processing a new operand. See comments below.
578 // The MachineInstr operand that reused an available value.
581 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
582 unsigned StackSlotOrReMat;
584 // PhysRegReused - The physical register the value was available in.
585 unsigned PhysRegReused;
587 // AssignedPhysReg - The physreg that was assigned for use by the reload.
588 unsigned AssignedPhysReg;
590 // VirtReg - The virtual register itself.
593 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
595 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
596 AssignedPhysReg(apr), VirtReg(vreg) {}
599 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
600 /// is reused instead of reloaded.
601 class VISIBILITY_HIDDEN ReuseInfo {
603 std::vector<ReusedOp> Reuses;
604 BitVector PhysRegsClobbered;
606 ReuseInfo(MachineInstr &mi, const MRegisterInfo *mri) : MI(mi) {
607 PhysRegsClobbered.resize(mri->getNumRegs());
610 bool hasReuses() const {
611 return !Reuses.empty();
614 /// addReuse - If we choose to reuse a virtual register that is already
615 /// available instead of reloading it, remember that we did so.
616 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
617 unsigned PhysRegReused, unsigned AssignedPhysReg,
619 // If the reload is to the assigned register anyway, no undo will be
621 if (PhysRegReused == AssignedPhysReg) return;
623 // Otherwise, remember this.
624 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
625 AssignedPhysReg, VirtReg));
628 void markClobbered(unsigned PhysReg) {
629 PhysRegsClobbered.set(PhysReg);
632 bool isClobbered(unsigned PhysReg) const {
633 return PhysRegsClobbered.test(PhysReg);
636 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
637 /// is some other operand that is using the specified register, either pick
638 /// a new register to use, or evict the previous reload and use this reg.
639 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
640 AvailableSpills &Spills,
641 std::vector<MachineInstr*> &MaybeDeadStores,
642 SmallSet<unsigned, 8> &Rejected,
644 std::vector<MachineOperand*> &KillOps,
646 if (Reuses.empty()) return PhysReg; // This is most often empty.
648 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
649 ReusedOp &Op = Reuses[ro];
650 // If we find some other reuse that was supposed to use this register
651 // exactly for its reload, we can change this reload to use ITS reload
652 // register. That is, unless its reload register has already been
653 // considered and subsequently rejected because it has also been reused
654 // by another operand.
655 if (Op.PhysRegReused == PhysReg &&
656 Rejected.count(Op.AssignedPhysReg) == 0) {
657 // Yup, use the reload register that we didn't use before.
658 unsigned NewReg = Op.AssignedPhysReg;
659 Rejected.insert(PhysReg);
660 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
661 RegKills, KillOps, VRM);
663 // Otherwise, we might also have a problem if a previously reused
664 // value aliases the new register. If so, codegen the previous reload
666 unsigned PRRU = Op.PhysRegReused;
667 const MRegisterInfo *MRI = Spills.getRegInfo();
668 if (MRI->areAliases(PRRU, PhysReg)) {
669 // Okay, we found out that an alias of a reused register
670 // was used. This isn't good because it means we have
671 // to undo a previous reuse.
672 MachineBasicBlock *MBB = MI->getParent();
673 const TargetRegisterClass *AliasRC =
674 MBB->getParent()->getSSARegMap()->getRegClass(Op.VirtReg);
676 // Copy Op out of the vector and remove it, we're going to insert an
677 // explicit load for it.
679 Reuses.erase(Reuses.begin()+ro);
681 // Ok, we're going to try to reload the assigned physreg into the
682 // slot that we were supposed to in the first place. However, that
683 // register could hold a reuse. Check to see if it conflicts or
684 // would prefer us to use a different register.
685 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
686 MI, Spills, MaybeDeadStores,
687 Rejected, RegKills, KillOps, VRM);
689 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
690 MRI->reMaterialize(*MBB, MI, NewPhysReg,
691 VRM.getReMaterializedMI(NewOp.VirtReg));
694 MRI->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
695 NewOp.StackSlotOrReMat, AliasRC);
696 // Any stores to this stack slot are not dead anymore.
697 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
700 Spills.ClobberPhysReg(NewPhysReg);
701 Spills.ClobberPhysReg(NewOp.PhysRegReused);
703 MI->getOperand(NewOp.Operand).setReg(NewPhysReg);
705 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg);
706 MachineBasicBlock::iterator MII = MI;
708 UpdateKills(*MII, RegKills, KillOps);
709 DOUT << '\t' << *MII;
711 DOUT << "Reuse undone!\n";
714 // Finally, PhysReg is now available, go ahead and use it.
722 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
723 /// 'Rejected' set to remember which registers have been considered and
724 /// rejected for the reload. This avoids infinite looping in case like
727 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
728 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
730 /// sees r1 is taken by t2, tries t2's reload register r0
731 /// sees r0 is taken by t3, tries t3's reload register r1
732 /// sees r1 is taken by t2, tries t2's reload register r0 ...
733 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
734 AvailableSpills &Spills,
735 std::vector<MachineInstr*> &MaybeDeadStores,
737 std::vector<MachineOperand*> &KillOps,
739 SmallSet<unsigned, 8> Rejected;
740 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected,
741 RegKills, KillOps, VRM);
746 /// PrepForUnfoldOpti - Turn a store folding instruction into a load folding
747 /// instruction. e.g.
749 /// movl %eax, -32(%ebp)
750 /// movl -36(%ebp), %eax
751 /// orl %eax, -32(%ebp)
754 /// orl -36(%ebp), %eax
755 /// mov %eax, -32(%ebp)
756 /// This enables unfolding optimization for a subsequent instruction which will
757 /// also eliminate the newly introduced store instruction.
758 bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
759 MachineBasicBlock::iterator &MII,
760 std::vector<MachineInstr*> &MaybeDeadStores,
761 AvailableSpills &Spills,
763 std::vector<MachineOperand*> &KillOps,
765 MachineFunction &MF = *MBB.getParent();
766 MachineInstr &MI = *MII;
767 unsigned UnfoldedOpc = 0;
768 unsigned UnfoldPR = 0;
769 unsigned UnfoldVR = 0;
770 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
771 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
772 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
773 // Only transform a MI that folds a single register.
776 UnfoldVR = I->second.first;
777 VirtRegMap::ModRef MR = I->second.second;
778 if (VRM.isAssignedReg(UnfoldVR))
780 // If this reference is not a use, any previous store is now dead.
781 // Otherwise, the store to this stack slot is not dead anymore.
782 FoldedSS = VRM.getStackSlot(UnfoldVR);
783 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
784 if (DeadStore && (MR & VirtRegMap::isModRef)) {
785 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
787 DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1)
790 UnfoldedOpc = MRI->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
798 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
799 MachineOperand &MO = MI.getOperand(i);
800 if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse())
802 unsigned VirtReg = MO.getReg();
803 if (MRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
805 if (VRM.isAssignedReg(VirtReg)) {
806 unsigned PhysReg = VRM.getPhys(VirtReg);
807 if (PhysReg && MRI->regsOverlap(PhysReg, UnfoldPR))
809 } else if (VRM.isReMaterialized(VirtReg))
811 int SS = VRM.getStackSlot(VirtReg);
812 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
814 if (MRI->regsOverlap(PhysReg, UnfoldPR))
818 PhysReg = VRM.getPhys(VirtReg);
819 if (!MRI->regsOverlap(PhysReg, UnfoldPR))
822 // Ok, we'll need to reload the value into a register which makes
823 // it impossible to perform the store unfolding optimization later.
824 // Let's see if it is possible to fold the load if the store is
825 // unfolded. This allows us to perform the store unfolding
827 SmallVector<MachineInstr*, 4> NewMIs;
828 if (MRI->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
829 assert(NewMIs.size() == 1);
830 MachineInstr *NewMI = NewMIs.back();
832 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
834 MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS);
836 if (!VRM.hasPhys(UnfoldVR))
837 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
838 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
839 MII = MBB.insert(MII, FoldedMI);
840 VRM.RemoveFromFoldedVirtMap(&MI);
850 /// findSuperReg - Find the SubReg's super-register of given register class
851 /// where its SubIdx sub-register is SubReg.
852 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
853 unsigned SubIdx, const MRegisterInfo *MRI) {
854 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
857 if (MRI->getSubReg(Reg, SubIdx) == SubReg)
863 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
864 /// the last store to the same slot is now dead. If so, remove the last store.
865 void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
866 MachineBasicBlock::iterator &MII,
867 int Idx, unsigned PhysReg, int StackSlot,
868 const TargetRegisterClass *RC,
869 MachineInstr *&LastStore,
870 AvailableSpills &Spills,
871 SmallSet<MachineInstr*, 4> &ReMatDefs,
873 std::vector<MachineOperand*> &KillOps,
875 MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot, RC);
876 DOUT << "Store:\t" << *next(MII);
878 // If there is a dead store to this stack slot, nuke it now.
880 DOUT << "Removed dead store:\t" << *LastStore;
882 SmallVector<unsigned, 2> KillRegs;
883 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
884 MachineBasicBlock::iterator PrevMII = LastStore;
885 bool CheckDef = PrevMII != MBB.begin();
888 MBB.erase(LastStore);
889 VRM.RemoveFromFoldedVirtMap(LastStore);
891 // Look at defs of killed registers on the store. Mark the defs
892 // as dead since the store has been deleted and they aren't
894 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
895 bool HasOtherDef = false;
896 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
897 MachineInstr *DeadDef = PrevMII;
898 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
899 // FIXME: This assumes a remat def does not have side
902 VRM.RemoveFromFoldedVirtMap(DeadDef);
910 LastStore = next(MII);
912 // If the stack slot value was previously available in some other
913 // register, change it now. Otherwise, make the register available,
915 Spills.ModifyStackSlotOrReMat(StackSlot);
916 Spills.ClobberPhysReg(PhysReg);
917 Spills.addAvailable(StackSlot, LastStore, PhysReg);
921 /// rewriteMBB - Keep track of which spills are available even after the
922 /// register allocator is done with them. If possible, avid reloading vregs.
923 void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
924 DOUT << MBB.getBasicBlock()->getName() << ":\n";
926 MachineFunction &MF = *MBB.getParent();
928 // Spills - Keep track of which spilled values are available in physregs so
929 // that we can choose to reuse the physregs instead of emitting reloads.
930 AvailableSpills Spills(MRI, TII);
932 // MaybeDeadStores - When we need to write a value back into a stack slot,
933 // keep track of the inserted store. If the stack slot value is never read
934 // (because the value was used from some available register, for example), and
935 // subsequently stored to, the original store is dead. This map keeps track
936 // of inserted stores that are not used. If we see a subsequent store to the
937 // same stack slot, the original store is deleted.
938 std::vector<MachineInstr*> MaybeDeadStores;
939 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
941 // ReMatDefs - These are rematerializable def MIs which are not deleted.
942 SmallSet<MachineInstr*, 4> ReMatDefs;
944 // ReloadedSplits - Splits must be reloaded once per MBB. This keeps track
945 // which have been reloaded.
946 SmallSet<unsigned, 8> ReloadedSplits;
948 // Keep track of kill information.
949 BitVector RegKills(MRI->getNumRegs());
950 std::vector<MachineOperand*> KillOps;
951 KillOps.resize(MRI->getNumRegs(), NULL);
953 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
955 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
957 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
959 bool BackTracked = false;
960 if (PrepForUnfoldOpti(MBB, MII,
961 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
964 MachineInstr &MI = *MII;
965 const TargetInstrDescriptor *TID = MI.getInstrDescriptor();
967 // Insert spills here if asked to.
968 std::vector<unsigned> SpillRegs = VRM.getSpillPtSpills(&MI);
969 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
970 unsigned VirtReg = SpillRegs[i];
971 const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg);
972 unsigned Phys = VRM.getPhys(VirtReg);
973 int StackSlot = VRM.getStackSlot(VirtReg);
974 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
975 SpillRegToStackSlot(MBB, MII, i, Phys, StackSlot, RC,
976 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
979 /// ReusedOperands - Keep track of operand reuse in case we need to undo
981 ReuseInfo ReusedOperands(MI, MRI);
982 // Process all of the spilled uses and all non spilled reg references.
983 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
984 MachineOperand &MO = MI.getOperand(i);
985 if (!MO.isRegister() || MO.getReg() == 0)
986 continue; // Ignore non-register operands.
988 unsigned VirtReg = MO.getReg();
989 if (MRegisterInfo::isPhysicalRegister(VirtReg)) {
990 // Ignore physregs for spilling, but remember that it is used by this
992 MF.setPhysRegUsed(VirtReg);
996 assert(MRegisterInfo::isVirtualRegister(VirtReg) &&
997 "Not a virtual or a physical register?");
999 unsigned SubIdx = MO.getSubReg();
1000 if (VRM.isAssignedReg(VirtReg)) {
1001 // This virtual register was assigned a physreg!
1002 unsigned Phys = VRM.getPhys(VirtReg);
1003 MF.setPhysRegUsed(Phys);
1005 ReusedOperands.markClobbered(Phys);
1007 // If it's a split live interval, insert a reload for the first use
1008 // unless it's previously defined in the MBB.
1009 unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
1011 if (ReloadedSplits.insert(VirtReg)) {
1012 bool HasUse = MO.isUse();
1013 // If it's a def, we don't need to reload the value unless it's
1014 // a two-address code.
1016 for (unsigned j = i+1; j != e; ++j) {
1017 MachineOperand &MOJ = MI.getOperand(j);
1018 if (MOJ.isRegister() && MOJ.getReg() == VirtReg) {
1026 if (VRM.isReMaterialized(VirtReg)) {
1027 MRI->reMaterialize(MBB, &MI, Phys,
1028 VRM.getReMaterializedMI(VirtReg));
1031 const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
1032 MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), RC);
1035 // This invalidates Phys.
1036 Spills.ClobberPhysReg(Phys);
1037 UpdateKills(*prior(MII), RegKills, KillOps);
1038 DOUT << '\t' << *prior(MII);
1043 unsigned RReg = SubIdx ? MRI->getSubReg(Phys, SubIdx) : Phys;
1044 MI.getOperand(i).setReg(RReg);
1048 // This virtual register is now known to be a spilled value.
1050 continue; // Handle defs in the loop below (handle use&def here though)
1052 bool DoReMat = VRM.isReMaterialized(VirtReg);
1053 int SSorRMId = DoReMat
1054 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1055 int ReuseSlot = SSorRMId;
1057 // Check to see if this stack slot is available.
1058 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1059 if (!PhysReg && DoReMat) {
1060 // This use is rematerializable. But perhaps the value is available in
1061 // a register if the definition is not deleted. If so, check if we can
1063 ReuseSlot = VRM.getStackSlot(VirtReg);
1064 if (ReuseSlot != VirtRegMap::NO_STACK_SLOT)
1065 PhysReg = Spills.getSpillSlotOrReMatPhysReg(ReuseSlot);
1068 // If this is a sub-register use, make sure the reuse register is in the
1069 // right register class. For example, for x86 not all of the 32-bit
1070 // registers have accessible sub-registers.
1071 // Similarly so for EXTRACT_SUBREG. Consider this:
1073 // MOV32_mr fi#1, EDI
1075 // = EXTRACT_SUBREG fi#1
1076 // fi#1 is available in EDI, but it cannot be reused because it's not in
1077 // the right register file.
1079 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1080 const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
1081 if (!RC->contains(PhysReg))
1086 // This spilled operand might be part of a two-address operand. If this
1087 // is the case, then changing it will necessarily require changing the
1088 // def part of the instruction as well. However, in some cases, we
1089 // aren't allowed to modify the reused register. If none of these cases
1091 bool CanReuse = true;
1092 int ti = TID->getOperandConstraint(i, TOI::TIED_TO);
1094 MI.getOperand(ti).isRegister() &&
1095 MI.getOperand(ti).getReg() == VirtReg) {
1096 // Okay, we have a two address operand. We can reuse this physreg as
1097 // long as we are allowed to clobber the value and there isn't an
1098 // earlier def that has already clobbered the physreg.
1099 CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
1100 !ReusedOperands.isClobbered(PhysReg);
1104 // If this stack slot value is already available, reuse it!
1105 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1106 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1108 DOUT << "Reusing SS#" << ReuseSlot;
1109 DOUT << " from physreg "
1110 << MRI->getName(PhysReg) << " for vreg"
1111 << VirtReg <<" instead of reloading into physreg "
1112 << MRI->getName(VRM.getPhys(VirtReg)) << "\n";
1113 unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1114 MI.getOperand(i).setReg(RReg);
1116 // The only technical detail we have is that we don't know that
1117 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1118 // later in the instruction. In particular, consider 'op V1, V2'.
1119 // If V1 is available in physreg R0, we would choose to reuse it
1120 // here, instead of reloading it into the register the allocator
1121 // indicated (say R1). However, V2 might have to be reloaded
1122 // later, and it might indicate that it needs to live in R0. When
1123 // this occurs, we need to have information available that
1124 // indicates it is safe to use R1 for the reload instead of R0.
1126 // To further complicate matters, we might conflict with an alias,
1127 // or R0 and R1 might not be compatible with each other. In this
1128 // case, we actually insert a reload for V1 in R1, ensuring that
1129 // we can get at R0 or its alias.
1130 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1131 VRM.getPhys(VirtReg), VirtReg);
1133 // Only mark it clobbered if this is a use&def operand.
1134 ReusedOperands.markClobbered(PhysReg);
1137 if (MI.getOperand(i).isKill() &&
1138 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1139 // This was the last use and the spilled value is still available
1140 // for reuse. That means the spill was unnecessary!
1141 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot];
1143 DOUT << "Removed dead store:\t" << *DeadStore;
1144 InvalidateKills(*DeadStore, RegKills, KillOps);
1145 VRM.RemoveFromFoldedVirtMap(DeadStore);
1146 MBB.erase(DeadStore);
1147 MaybeDeadStores[ReuseSlot] = NULL;
1154 // Otherwise we have a situation where we have a two-address instruction
1155 // whose mod/ref operand needs to be reloaded. This reload is already
1156 // available in some register "PhysReg", but if we used PhysReg as the
1157 // operand to our 2-addr instruction, the instruction would modify
1158 // PhysReg. This isn't cool if something later uses PhysReg and expects
1159 // to get its initial value.
1161 // To avoid this problem, and to avoid doing a load right after a store,
1162 // we emit a copy from PhysReg into the designated register for this
1164 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1165 assert(DesignatedReg && "Must map virtreg to physreg!");
1167 // Note that, if we reused a register for a previous operand, the
1168 // register we want to reload into might not actually be
1169 // available. If this occurs, use the register indicated by the
1171 if (ReusedOperands.hasReuses())
1172 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1173 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1175 // If the mapped designated register is actually the physreg we have
1176 // incoming, we don't need to inserted a dead copy.
1177 if (DesignatedReg == PhysReg) {
1178 // If this stack slot value is already available, reuse it!
1179 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1180 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1182 DOUT << "Reusing SS#" << ReuseSlot;
1183 DOUT << " from physreg " << MRI->getName(PhysReg) << " for vreg"
1185 << " instead of reloading into same physreg.\n";
1186 unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1187 MI.getOperand(i).setReg(RReg);
1188 ReusedOperands.markClobbered(RReg);
1193 const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
1194 MF.setPhysRegUsed(DesignatedReg);
1195 ReusedOperands.markClobbered(DesignatedReg);
1196 MRI->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1198 MachineInstr *CopyMI = prior(MII);
1199 UpdateKills(*CopyMI, RegKills, KillOps);
1201 // This invalidates DesignatedReg.
1202 Spills.ClobberPhysReg(DesignatedReg);
1204 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg);
1206 SubIdx ? MRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1207 MI.getOperand(i).setReg(RReg);
1208 DOUT << '\t' << *prior(MII);
1213 // Otherwise, reload it and remember that we have it.
1214 PhysReg = VRM.getPhys(VirtReg);
1215 assert(PhysReg && "Must map virtreg to physreg!");
1217 // Note that, if we reused a register for a previous operand, the
1218 // register we want to reload into might not actually be
1219 // available. If this occurs, use the register indicated by the
1221 if (ReusedOperands.hasReuses())
1222 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1223 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1225 MF.setPhysRegUsed(PhysReg);
1226 ReusedOperands.markClobbered(PhysReg);
1228 MRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg));
1231 const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
1232 MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1235 // This invalidates PhysReg.
1236 Spills.ClobberPhysReg(PhysReg);
1238 // Any stores to this stack slot are not dead anymore.
1240 MaybeDeadStores[SSorRMId] = NULL;
1241 Spills.addAvailable(SSorRMId, &MI, PhysReg);
1242 // Assumes this is the last use. IsKill will be unset if reg is reused
1243 // unless it's a two-address operand.
1244 if (TID->getOperandConstraint(i, TOI::TIED_TO) == -1)
1245 MI.getOperand(i).setIsKill();
1246 unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1247 MI.getOperand(i).setReg(RReg);
1248 UpdateKills(*prior(MII), RegKills, KillOps);
1249 DOUT << '\t' << *prior(MII);
1255 // If we have folded references to memory operands, make sure we clear all
1256 // physical registers that may contain the value of the spilled virtual
1258 SmallSet<int, 2> FoldedSS;
1259 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
1260 unsigned VirtReg = I->second.first;
1261 VirtRegMap::ModRef MR = I->second.second;
1262 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
1264 // If this is a split live interval, remember we have seen this so
1265 // we do not need to reload it for later uses.
1266 unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
1268 ReloadedSplits.insert(VirtReg);
1270 int SS = VRM.getStackSlot(VirtReg);
1271 if (SS == VirtRegMap::NO_STACK_SLOT)
1273 FoldedSS.insert(SS);
1274 DOUT << " - StackSlot: " << SS << "\n";
1276 // If this folded instruction is just a use, check to see if it's a
1277 // straight load from the virt reg slot.
1278 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1280 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1281 if (DestReg && FrameIdx == SS) {
1282 // If this spill slot is available, turn it into a copy (or nothing)
1283 // instead of leaving it as a load!
1284 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1285 DOUT << "Promoted Load To Copy: " << MI;
1286 if (DestReg != InReg) {
1287 const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg);
1288 MRI->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1289 // Revisit the copy so we make sure to notice the effects of the
1290 // operation on the destreg (either needing to RA it if it's
1291 // virtual or needing to clobber any values if it's physical).
1293 --NextMII; // backtrack to the copy.
1296 DOUT << "Removing now-noop copy: " << MI;
1298 VRM.RemoveFromFoldedVirtMap(&MI);
1301 goto ProcessNextInst;
1304 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1305 SmallVector<MachineInstr*, 4> NewMIs;
1307 MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1308 MBB.insert(MII, NewMIs[0]);
1309 VRM.RemoveFromFoldedVirtMap(&MI);
1312 --NextMII; // backtrack to the unfolded instruction.
1314 goto ProcessNextInst;
1319 // If this reference is not a use, any previous store is now dead.
1320 // Otherwise, the store to this stack slot is not dead anymore.
1321 MachineInstr* DeadStore = MaybeDeadStores[SS];
1323 bool isDead = !(MR & VirtRegMap::isRef);
1324 MachineInstr *NewStore = NULL;
1325 if (MR & VirtRegMap::isModRef) {
1326 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1327 SmallVector<MachineInstr*, 4> NewMIs;
1329 DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
1330 MRI->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
1331 MBB.insert(MII, NewMIs[0]);
1332 NewStore = NewMIs[1];
1333 MBB.insert(MII, NewStore);
1334 VRM.RemoveFromFoldedVirtMap(&MI);
1338 --NextMII; // backtrack to the unfolded instruction.
1344 if (isDead) { // Previous store is dead.
1345 // If we get here, the store is dead, nuke it now.
1346 DOUT << "Removed dead store:\t" << *DeadStore;
1347 InvalidateKills(*DeadStore, RegKills, KillOps);
1348 VRM.RemoveFromFoldedVirtMap(DeadStore);
1349 MBB.erase(DeadStore);
1354 MaybeDeadStores[SS] = NULL;
1356 // Treat this store as a spill merged into a copy. That makes the
1357 // stack slot value available.
1358 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1359 goto ProcessNextInst;
1363 // If the spill slot value is available, and this is a new definition of
1364 // the value, the value is not available anymore.
1365 if (MR & VirtRegMap::isMod) {
1366 // Notice that the value in this stack slot has been modified.
1367 Spills.ModifyStackSlotOrReMat(SS);
1369 // If this is *just* a mod of the value, check to see if this is just a
1370 // store to the spill slot (i.e. the spill got merged into the copy). If
1371 // so, realize that the vreg is available now, and add the store to the
1372 // MaybeDeadStore info.
1374 if (!(MR & VirtRegMap::isRef)) {
1375 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
1376 assert(MRegisterInfo::isPhysicalRegister(SrcReg) &&
1377 "Src hasn't been allocated yet?");
1378 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
1379 // this as a potentially dead store in case there is a subsequent
1380 // store into the stack slot without a read from it.
1381 MaybeDeadStores[StackSlot] = &MI;
1383 // If the stack slot value was previously available in some other
1384 // register, change it now. Otherwise, make the register available,
1386 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/);
1392 // Process all of the spilled defs.
1393 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1394 MachineOperand &MO = MI.getOperand(i);
1395 if (!(MO.isRegister() && MO.getReg() && MO.isDef()))
1398 unsigned VirtReg = MO.getReg();
1399 if (!MRegisterInfo::isVirtualRegister(VirtReg)) {
1400 // Check to see if this is a noop copy. If so, eliminate the
1401 // instruction before considering the dest reg to be changed.
1403 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
1405 DOUT << "Removing now-noop copy: " << MI;
1408 VRM.RemoveFromFoldedVirtMap(&MI);
1409 Spills.disallowClobberPhysReg(VirtReg);
1410 goto ProcessNextInst;
1413 // If it's not a no-op copy, it clobbers the value in the destreg.
1414 Spills.ClobberPhysReg(VirtReg);
1415 ReusedOperands.markClobbered(VirtReg);
1417 // Check to see if this instruction is a load from a stack slot into
1418 // a register. If so, this provides the stack slot value in the reg.
1420 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
1421 assert(DestReg == VirtReg && "Unknown load situation!");
1423 // If it is a folded reference, then it's not safe to clobber.
1424 bool Folded = FoldedSS.count(FrameIdx);
1425 // Otherwise, if it wasn't available, remember that it is now!
1426 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded);
1427 goto ProcessNextInst;
1433 unsigned SubIdx = MO.getSubReg();
1434 bool DoReMat = VRM.isReMaterialized(VirtReg);
1436 ReMatDefs.insert(&MI);
1438 // The only vregs left are stack slot definitions.
1439 int StackSlot = VRM.getStackSlot(VirtReg);
1440 const TargetRegisterClass *RC = RegMap->getRegClass(VirtReg);
1442 // If this def is part of a two-address operand, make sure to execute
1443 // the store from the correct physical register.
1445 int TiedOp = MI.getInstrDescriptor()->findTiedToSrcOperand(i);
1447 PhysReg = MI.getOperand(TiedOp).getReg();
1449 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, MRI);
1450 assert(SuperReg && MRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
1451 "Can't find corresponding super-register!");
1455 PhysReg = VRM.getPhys(VirtReg);
1456 if (ReusedOperands.isClobbered(PhysReg)) {
1457 // Another def has taken the assigned physreg. It must have been a
1458 // use&def which got it due to reuse. Undo the reuse!
1459 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1460 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1464 MF.setPhysRegUsed(PhysReg);
1465 unsigned RReg = SubIdx ? MRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1466 ReusedOperands.markClobbered(RReg);
1467 MI.getOperand(i).setReg(RReg);
1470 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
1471 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, LastStore,
1472 Spills, ReMatDefs, RegKills, KillOps, VRM);
1474 // Check to see if this is a noop copy. If so, eliminate the
1475 // instruction before considering the dest reg to be changed.
1478 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
1480 DOUT << "Removing now-noop copy: " << MI;
1483 VRM.RemoveFromFoldedVirtMap(&MI);
1484 UpdateKills(*LastStore, RegKills, KillOps);
1485 goto ProcessNextInst;
1491 if (!Erased && !BackTracked)
1492 for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II)
1493 UpdateKills(*II, RegKills, KillOps);
1499 llvm::Spiller* llvm::createSpiller() {
1500 switch (SpillerOpt) {
1501 default: assert(0 && "Unreachable!");
1503 return new LocalSpiller();
1505 return new SimpleSpiller();