1 //===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the VirtRegMap class.
12 // It also contains implementations of the the Spiller interface, which, given a
13 // virtual register map and a machine function, eliminates all virtual
14 // references by replacing them with physical register references - adding spill
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "spiller"
20 #include "VirtRegMap.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/Compiler.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/ADT/SmallSet.h"
39 STATISTIC(NumSpills , "Number of register spills");
40 STATISTIC(NumPSpills , "Number of physical register spills");
41 STATISTIC(NumReMats , "Number of re-materialization");
42 STATISTIC(NumDRM , "Number of re-materializable defs elided");
43 STATISTIC(NumStores , "Number of stores added");
44 STATISTIC(NumLoads , "Number of loads added");
45 STATISTIC(NumReused , "Number of values reused");
46 STATISTIC(NumDSE , "Number of dead stores elided");
47 STATISTIC(NumDCE , "Number of copies elided");
48 STATISTIC(NumDSS , "Number of dead spill slots removed");
49 STATISTIC(NumCommutes, "Number of instructions commuted");
52 enum SpillerName { simple, local };
55 static cl::opt<SpillerName>
57 cl::desc("Spiller to use: (default: local)"),
59 cl::values(clEnumVal(simple, "simple spiller"),
60 clEnumVal(local, "local spiller"),
64 //===----------------------------------------------------------------------===//
65 // VirtRegMap implementation
66 //===----------------------------------------------------------------------===//
68 VirtRegMap::VirtRegMap(MachineFunction &mf)
69 : TII(*mf.getTarget().getInstrInfo()), MF(mf),
70 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
71 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
72 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1),
73 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) {
74 SpillSlotToUsesMap.resize(8);
75 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1-
76 TargetRegisterInfo::FirstVirtualRegister);
80 void VirtRegMap::grow() {
81 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg();
82 Virt2PhysMap.grow(LastVirtReg);
83 Virt2StackSlotMap.grow(LastVirtReg);
84 Virt2ReMatIdMap.grow(LastVirtReg);
85 Virt2SplitMap.grow(LastVirtReg);
86 Virt2SplitKillMap.grow(LastVirtReg);
87 ReMatMap.grow(LastVirtReg);
88 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1);
91 int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
92 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
93 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
94 "attempt to assign stack slot to already spilled register");
95 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg);
96 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
98 if (LowSpillSlot == NO_STACK_SLOT)
100 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
102 unsigned Idx = SS-LowSpillSlot;
103 while (Idx >= SpillSlotToUsesMap.size())
104 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2);
105 Virt2StackSlotMap[virtReg] = SS;
110 void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
111 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
112 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
113 "attempt to assign stack slot to already spilled register");
115 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) &&
116 "illegal fixed frame index");
117 Virt2StackSlotMap[virtReg] = SS;
120 int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
121 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
122 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
123 "attempt to assign re-mat id to already spilled register");
124 Virt2ReMatIdMap[virtReg] = ReMatId;
128 void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
129 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
130 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
131 "attempt to assign re-mat id to already spilled register");
132 Virt2ReMatIdMap[virtReg] = id;
135 int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) {
136 std::map<const TargetRegisterClass*, int>::iterator I =
137 EmergencySpillSlots.find(RC);
138 if (I != EmergencySpillSlots.end())
140 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
142 if (LowSpillSlot == NO_STACK_SLOT)
144 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
146 EmergencySpillSlots[RC] = SS;
150 void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) {
151 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) {
152 // If FI < LowSpillSlot, this stack reference was produced by
153 // instruction selection and is not a spill
154 if (FI >= LowSpillSlot) {
155 assert(FI >= 0 && "Spill slot index should not be negative!");
156 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
157 && "Invalid spill slot");
158 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
163 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
164 MachineInstr *NewMI, ModRef MRInfo) {
165 // Move previous memory references folded to new instruction.
166 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
167 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
168 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) {
169 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second));
170 MI2VirtMap.erase(I++);
173 // add new memory reference
174 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
177 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) {
178 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI);
179 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo)));
182 void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) {
183 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
184 MachineOperand &MO = MI->getOperand(i);
187 int FI = MO.getIndex();
188 if (MF.getFrameInfo()->isFixedObjectIndex(FI))
190 // This stack reference was produced by instruction selection and
192 if (FI < LowSpillSlot)
194 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
195 && "Invalid spill slot");
196 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI);
198 MI2VirtMap.erase(MI);
199 SpillPt2VirtMap.erase(MI);
200 RestorePt2VirtMap.erase(MI);
201 EmergencySpillMap.erase(MI);
204 void VirtRegMap::print(std::ostream &OS) const {
205 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo();
207 OS << "********** REGISTER MAP **********\n";
208 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
209 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) {
210 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
211 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i])
215 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
216 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i)
217 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT)
218 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n";
222 void VirtRegMap::dump() const {
227 //===----------------------------------------------------------------------===//
228 // Simple Spiller Implementation
229 //===----------------------------------------------------------------------===//
231 Spiller::~Spiller() {}
234 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller {
235 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM);
239 bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
240 DOUT << "********** REWRITE MACHINE CODE **********\n";
241 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
242 const TargetMachine &TM = MF.getTarget();
243 const TargetInstrInfo &TII = *TM.getInstrInfo();
244 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
247 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
248 // each vreg once (in the case where a spilled vreg is used by multiple
249 // operands). This is always smaller than the number of operands to the
250 // current machine instr, so it should be small.
251 std::vector<unsigned> LoadedRegs;
253 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
255 DOUT << MBBI->getBasicBlock()->getName() << ":\n";
256 MachineBasicBlock &MBB = *MBBI;
257 for (MachineBasicBlock::iterator MII = MBB.begin(),
258 E = MBB.end(); MII != E; ++MII) {
259 MachineInstr &MI = *MII;
260 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
261 MachineOperand &MO = MI.getOperand(i);
262 if (MO.isReg() && MO.getReg()) {
263 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
264 unsigned VirtReg = MO.getReg();
265 unsigned SubIdx = MO.getSubReg();
266 unsigned PhysReg = VRM.getPhys(VirtReg);
267 unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg;
268 if (!VRM.isAssignedReg(VirtReg)) {
269 int StackSlot = VRM.getStackSlot(VirtReg);
270 const TargetRegisterClass* RC =
271 MF.getRegInfo().getRegClass(VirtReg);
274 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
275 == LoadedRegs.end()) {
276 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
277 MachineInstr *LoadMI = prior(MII);
278 VRM.addSpillSlotUse(StackSlot, LoadMI);
279 LoadedRegs.push_back(VirtReg);
281 DOUT << '\t' << *LoadMI;
285 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
287 MachineInstr *StoreMI = next(MII);
288 VRM.addSpillSlotUse(StackSlot, StoreMI);
292 MF.getRegInfo().setPhysRegUsed(RReg);
293 MI.getOperand(i).setReg(RReg);
295 MF.getRegInfo().setPhysRegUsed(MO.getReg());
307 //===----------------------------------------------------------------------===//
308 // Local Spiller Implementation
309 //===----------------------------------------------------------------------===//
312 class AvailableSpills;
314 /// LocalSpiller - This spiller does a simple pass over the machine basic
315 /// block to attempt to keep spills in registers as much as possible for
316 /// blocks that have low register pressure (the vreg may be spilled due to
317 /// register pressure in other blocks).
318 class VISIBILITY_HIDDEN LocalSpiller : public Spiller {
319 MachineRegisterInfo *RegInfo;
320 const TargetRegisterInfo *TRI;
321 const TargetInstrInfo *TII;
322 DenseMap<MachineInstr*, unsigned> DistanceMap;
324 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
325 RegInfo = &MF.getRegInfo();
326 TRI = MF.getTarget().getRegisterInfo();
327 TII = MF.getTarget().getInstrInfo();
328 DOUT << "\n**** Local spiller rewriting function '"
329 << MF.getFunction()->getName() << "':\n";
330 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
334 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
336 RewriteMBB(*MBB, VRM);
338 // Mark unused spill slots.
339 MachineFrameInfo *MFI = MF.getFrameInfo();
340 int SS = VRM.getLowSpillSlot();
341 if (SS != VirtRegMap::NO_STACK_SLOT)
342 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
343 if (!VRM.isSpillSlotUsed(SS)) {
344 MFI->RemoveStackObject(SS);
348 DOUT << "**** Post Machine Instrs ****\n";
354 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
355 unsigned Reg, BitVector &RegKills,
356 std::vector<MachineOperand*> &KillOps);
357 bool PrepForUnfoldOpti(MachineBasicBlock &MBB,
358 MachineBasicBlock::iterator &MII,
359 std::vector<MachineInstr*> &MaybeDeadStores,
360 AvailableSpills &Spills, BitVector &RegKills,
361 std::vector<MachineOperand*> &KillOps,
363 bool CommuteToFoldReload(MachineBasicBlock &MBB,
364 MachineBasicBlock::iterator &MII,
365 unsigned VirtReg, unsigned SrcReg, int SS,
367 std::vector<MachineOperand*> &KillOps,
368 const TargetRegisterInfo *TRI,
370 void SpillRegToStackSlot(MachineBasicBlock &MBB,
371 MachineBasicBlock::iterator &MII,
372 int Idx, unsigned PhysReg, int StackSlot,
373 const TargetRegisterClass *RC,
374 bool isAvailable, MachineInstr *&LastStore,
375 AvailableSpills &Spills,
376 SmallSet<MachineInstr*, 4> &ReMatDefs,
378 std::vector<MachineOperand*> &KillOps,
380 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM);
384 /// AvailableSpills - As the local spiller is scanning and rewriting an MBB from
385 /// top down, keep track of which spills slots or remat are available in each
388 /// Note that not all physregs are created equal here. In particular, some
389 /// physregs are reloads that we are allowed to clobber or ignore at any time.
390 /// Other physregs are values that the register allocated program is using that
391 /// we cannot CHANGE, but we can read if we like. We keep track of this on a
392 /// per-stack-slot / remat id basis as the low bit in the value of the
393 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
394 /// this bit and addAvailable sets it if.
396 class VISIBILITY_HIDDEN AvailableSpills {
397 const TargetRegisterInfo *TRI;
398 const TargetInstrInfo *TII;
400 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
401 // or remat'ed virtual register values that are still available, due to being
402 // loaded or stored to, but not invalidated yet.
403 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
405 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
406 // indicating which stack slot values are currently held by a physreg. This
407 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
408 // physreg is modified.
409 std::multimap<unsigned, int> PhysRegsAvailable;
411 void disallowClobberPhysRegOnly(unsigned PhysReg);
413 void ClobberPhysRegOnly(unsigned PhysReg);
415 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
416 : TRI(tri), TII(tii) {
419 const TargetRegisterInfo *getRegInfo() const { return TRI; }
421 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
422 /// available in a physical register, return that PhysReg, otherwise
424 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
425 std::map<int, unsigned>::const_iterator I =
426 SpillSlotsOrReMatsAvailable.find(Slot);
427 if (I != SpillSlotsOrReMatsAvailable.end()) {
428 return I->second >> 1; // Remove the CanClobber bit.
433 /// addAvailable - Mark that the specified stack slot / remat is available in
434 /// the specified physreg. If CanClobber is true, the physreg can be modified
435 /// at any time without changing the semantics of the program.
436 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg,
437 bool CanClobber = true) {
438 // If this stack slot is thought to be available in some other physreg,
439 // remove its record.
440 ModifyStackSlotOrReMat(SlotOrReMat);
442 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
443 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber;
445 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
446 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1;
448 DOUT << "Remembering SS#" << SlotOrReMat;
449 DOUT << " in physreg " << TRI->getName(Reg) << "\n";
452 /// canClobberPhysReg - Return true if the spiller is allowed to change the
453 /// value of the specified stackslot register if it desires. The specified
454 /// stack slot must be available in a physreg for this query to make sense.
455 bool canClobberPhysReg(int SlotOrReMat) const {
456 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
457 "Value not available!");
458 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
461 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
462 /// stackslot register. The register is still available but is no longer
463 /// allowed to be modifed.
464 void disallowClobberPhysReg(unsigned PhysReg);
466 /// ClobberPhysReg - This is called when the specified physreg changes
467 /// value. We use this to invalidate any info about stuff that lives in
468 /// it and any of its aliases.
469 void ClobberPhysReg(unsigned PhysReg);
471 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
472 /// slot changes. This removes information about which register the previous
473 /// value for this slot lives in (as the previous value is dead now).
474 void ModifyStackSlotOrReMat(int SlotOrReMat);
478 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
479 /// stackslot register. The register is still available but is no longer
480 /// allowed to be modifed.
481 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
482 std::multimap<unsigned, int>::iterator I =
483 PhysRegsAvailable.lower_bound(PhysReg);
484 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
485 int SlotOrReMat = I->second;
487 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
488 "Bidirectional map mismatch!");
489 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
490 DOUT << "PhysReg " << TRI->getName(PhysReg)
491 << " copied, it is available for use but can no longer be modified\n";
495 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
496 /// stackslot register and its aliases. The register and its aliases may
497 /// still available but is no longer allowed to be modifed.
498 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
499 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
500 disallowClobberPhysRegOnly(*AS);
501 disallowClobberPhysRegOnly(PhysReg);
504 /// ClobberPhysRegOnly - This is called when the specified physreg changes
505 /// value. We use this to invalidate any info about stuff we thing lives in it.
506 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
507 std::multimap<unsigned, int>::iterator I =
508 PhysRegsAvailable.lower_bound(PhysReg);
509 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
510 int SlotOrReMat = I->second;
511 PhysRegsAvailable.erase(I++);
512 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
513 "Bidirectional map mismatch!");
514 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
515 DOUT << "PhysReg " << TRI->getName(PhysReg)
516 << " clobbered, invalidating ";
517 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
518 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
520 DOUT << "SS#" << SlotOrReMat << "\n";
524 /// ClobberPhysReg - This is called when the specified physreg changes
525 /// value. We use this to invalidate any info about stuff we thing lives in
526 /// it and any of its aliases.
527 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
528 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
529 ClobberPhysRegOnly(*AS);
530 ClobberPhysRegOnly(PhysReg);
533 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
534 /// slot changes. This removes information about which register the previous
535 /// value for this slot lives in (as the previous value is dead now).
536 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
537 std::map<int, unsigned>::iterator It =
538 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
539 if (It == SpillSlotsOrReMatsAvailable.end()) return;
540 unsigned Reg = It->second >> 1;
541 SpillSlotsOrReMatsAvailable.erase(It);
543 // This register may hold the value of multiple stack slots, only remove this
544 // stack slot from the set of values the register contains.
545 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
547 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
548 "Map inverse broken!");
549 if (I->second == SlotOrReMat) break;
551 PhysRegsAvailable.erase(I);
556 /// InvalidateKills - MI is going to be deleted. If any of its operands are
557 /// marked kill, then invalidate the information.
558 static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
559 std::vector<MachineOperand*> &KillOps,
560 SmallVector<unsigned, 2> *KillRegs = NULL) {
561 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
562 MachineOperand &MO = MI.getOperand(i);
563 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
565 unsigned Reg = MO.getReg();
566 if (TargetRegisterInfo::isVirtualRegister(Reg))
569 KillRegs->push_back(Reg);
570 assert(Reg < KillOps.size());
571 if (KillOps[Reg] == &MO) {
578 /// InvalidateKill - A MI that defines the specified register is being deleted,
579 /// invalidate the register kill information.
580 static void InvalidateKill(unsigned Reg, BitVector &RegKills,
581 std::vector<MachineOperand*> &KillOps) {
583 KillOps[Reg]->setIsKill(false);
589 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
590 /// (since it's spill instruction is removed), mark it isDead. Also checks if
591 /// the def MI has other definition operands that are not dead. Returns it by
593 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
594 MachineInstr &NewDef, unsigned Reg,
596 // Due to remat, it's possible this reg isn't being reused. That is,
597 // the def of this reg (by prev MI) is now dead.
598 MachineInstr *DefMI = I;
599 MachineOperand *DefOp = NULL;
600 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
601 MachineOperand &MO = DefMI->getOperand(i);
602 if (MO.isReg() && MO.isDef()) {
603 if (MO.getReg() == Reg)
605 else if (!MO.isDead())
612 bool FoundUse = false, Done = false;
613 MachineBasicBlock::iterator E = &NewDef;
615 for (; !Done && I != E; ++I) {
616 MachineInstr *NMI = I;
617 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
618 MachineOperand &MO = NMI->getOperand(j);
619 if (!MO.isReg() || MO.getReg() != Reg)
623 Done = true; // Stop after scanning all the operands of this MI.
634 /// UpdateKills - Track and update kill info. If a MI reads a register that is
635 /// marked kill, then it must be due to register reuse. Transfer the kill info
637 static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
638 std::vector<MachineOperand*> &KillOps,
639 const TargetRegisterInfo* TRI) {
640 const TargetInstrDesc &TID = MI.getDesc();
641 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
642 MachineOperand &MO = MI.getOperand(i);
643 if (!MO.isReg() || !MO.isUse())
645 unsigned Reg = MO.getReg();
649 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
650 // That can't be right. Register is killed but not re-defined and it's
651 // being reused. Let's fix that.
652 KillOps[Reg]->setIsKill(false);
655 if (i < TID.getNumOperands() &&
656 TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
657 // Unless it's a two-address operand, this is the new kill.
666 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
667 const MachineOperand &MO = MI.getOperand(i);
668 if (!MO.isReg() || !MO.isDef())
670 unsigned Reg = MO.getReg();
673 // It also defines (or partially define) aliases.
674 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
681 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
683 static void ReMaterialize(MachineBasicBlock &MBB,
684 MachineBasicBlock::iterator &MII,
685 unsigned DestReg, unsigned Reg,
686 const TargetInstrInfo *TII,
687 const TargetRegisterInfo *TRI,
689 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
690 MachineInstr *NewMI = prior(MII);
691 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
692 MachineOperand &MO = NewMI->getOperand(i);
693 if (!MO.isReg() || MO.getReg() == 0)
695 unsigned VirtReg = MO.getReg();
696 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
699 unsigned SubIdx = MO.getSubReg();
700 unsigned Phys = VRM.getPhys(VirtReg);
702 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
709 // ReusedOp - For each reused operand, we keep track of a bit of information, in
710 // case we need to rollback upon processing a new operand. See comments below.
713 // The MachineInstr operand that reused an available value.
716 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
717 unsigned StackSlotOrReMat;
719 // PhysRegReused - The physical register the value was available in.
720 unsigned PhysRegReused;
722 // AssignedPhysReg - The physreg that was assigned for use by the reload.
723 unsigned AssignedPhysReg;
725 // VirtReg - The virtual register itself.
728 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
730 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
731 AssignedPhysReg(apr), VirtReg(vreg) {}
734 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
735 /// is reused instead of reloaded.
736 class VISIBILITY_HIDDEN ReuseInfo {
738 std::vector<ReusedOp> Reuses;
739 BitVector PhysRegsClobbered;
741 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
742 PhysRegsClobbered.resize(tri->getNumRegs());
745 bool hasReuses() const {
746 return !Reuses.empty();
749 /// addReuse - If we choose to reuse a virtual register that is already
750 /// available instead of reloading it, remember that we did so.
751 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
752 unsigned PhysRegReused, unsigned AssignedPhysReg,
754 // If the reload is to the assigned register anyway, no undo will be
756 if (PhysRegReused == AssignedPhysReg) return;
758 // Otherwise, remember this.
759 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
760 AssignedPhysReg, VirtReg));
763 void markClobbered(unsigned PhysReg) {
764 PhysRegsClobbered.set(PhysReg);
767 bool isClobbered(unsigned PhysReg) const {
768 return PhysRegsClobbered.test(PhysReg);
771 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
772 /// is some other operand that is using the specified register, either pick
773 /// a new register to use, or evict the previous reload and use this reg.
774 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
775 AvailableSpills &Spills,
776 std::vector<MachineInstr*> &MaybeDeadStores,
777 SmallSet<unsigned, 8> &Rejected,
779 std::vector<MachineOperand*> &KillOps,
781 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
784 if (Reuses.empty()) return PhysReg; // This is most often empty.
786 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
787 ReusedOp &Op = Reuses[ro];
788 // If we find some other reuse that was supposed to use this register
789 // exactly for its reload, we can change this reload to use ITS reload
790 // register. That is, unless its reload register has already been
791 // considered and subsequently rejected because it has also been reused
792 // by another operand.
793 if (Op.PhysRegReused == PhysReg &&
794 Rejected.count(Op.AssignedPhysReg) == 0) {
795 // Yup, use the reload register that we didn't use before.
796 unsigned NewReg = Op.AssignedPhysReg;
797 Rejected.insert(PhysReg);
798 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
799 RegKills, KillOps, VRM);
801 // Otherwise, we might also have a problem if a previously reused
802 // value aliases the new register. If so, codegen the previous reload
804 unsigned PRRU = Op.PhysRegReused;
805 const TargetRegisterInfo *TRI = Spills.getRegInfo();
806 if (TRI->areAliases(PRRU, PhysReg)) {
807 // Okay, we found out that an alias of a reused register
808 // was used. This isn't good because it means we have
809 // to undo a previous reuse.
810 MachineBasicBlock *MBB = MI->getParent();
811 const TargetRegisterClass *AliasRC =
812 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
814 // Copy Op out of the vector and remove it, we're going to insert an
815 // explicit load for it.
817 Reuses.erase(Reuses.begin()+ro);
819 // Ok, we're going to try to reload the assigned physreg into the
820 // slot that we were supposed to in the first place. However, that
821 // register could hold a reuse. Check to see if it conflicts or
822 // would prefer us to use a different register.
823 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
824 MI, Spills, MaybeDeadStores,
825 Rejected, RegKills, KillOps, VRM);
827 MachineBasicBlock::iterator MII = MI;
828 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
829 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM);
831 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
832 NewOp.StackSlotOrReMat, AliasRC);
833 MachineInstr *LoadMI = prior(MII);
834 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
835 // Any stores to this stack slot are not dead anymore.
836 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
839 Spills.ClobberPhysReg(NewPhysReg);
840 Spills.ClobberPhysReg(NewOp.PhysRegReused);
842 unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
843 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
844 MI->getOperand(NewOp.Operand).setReg(RReg);
846 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg);
848 UpdateKills(*MII, RegKills, KillOps, TRI);
849 DOUT << '\t' << *MII;
851 DOUT << "Reuse undone!\n";
854 // Finally, PhysReg is now available, go ahead and use it.
862 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
863 /// 'Rejected' set to remember which registers have been considered and
864 /// rejected for the reload. This avoids infinite looping in case like
867 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
868 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
870 /// sees r1 is taken by t2, tries t2's reload register r0
871 /// sees r0 is taken by t3, tries t3's reload register r1
872 /// sees r1 is taken by t2, tries t2's reload register r0 ...
873 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
874 AvailableSpills &Spills,
875 std::vector<MachineInstr*> &MaybeDeadStores,
877 std::vector<MachineOperand*> &KillOps,
879 SmallSet<unsigned, 8> Rejected;
880 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected,
881 RegKills, KillOps, VRM);
886 /// PrepForUnfoldOpti - Turn a store folding instruction into a load folding
887 /// instruction. e.g.
889 /// movl %eax, -32(%ebp)
890 /// movl -36(%ebp), %eax
891 /// orl %eax, -32(%ebp)
894 /// orl -36(%ebp), %eax
895 /// mov %eax, -32(%ebp)
896 /// This enables unfolding optimization for a subsequent instruction which will
897 /// also eliminate the newly introduced store instruction.
898 bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
899 MachineBasicBlock::iterator &MII,
900 std::vector<MachineInstr*> &MaybeDeadStores,
901 AvailableSpills &Spills,
903 std::vector<MachineOperand*> &KillOps,
905 MachineFunction &MF = *MBB.getParent();
906 MachineInstr &MI = *MII;
907 unsigned UnfoldedOpc = 0;
908 unsigned UnfoldPR = 0;
909 unsigned UnfoldVR = 0;
910 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
911 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
912 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
913 // Only transform a MI that folds a single register.
916 UnfoldVR = I->second.first;
917 VirtRegMap::ModRef MR = I->second.second;
918 // MI2VirtMap be can updated which invalidate the iterator.
919 // Increment the iterator first.
921 if (VRM.isAssignedReg(UnfoldVR))
923 // If this reference is not a use, any previous store is now dead.
924 // Otherwise, the store to this stack slot is not dead anymore.
925 FoldedSS = VRM.getStackSlot(UnfoldVR);
926 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
927 if (DeadStore && (MR & VirtRegMap::isModRef)) {
928 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
929 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
932 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
940 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
941 MachineOperand &MO = MI.getOperand(i);
942 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
944 unsigned VirtReg = MO.getReg();
945 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
947 if (VRM.isAssignedReg(VirtReg)) {
948 unsigned PhysReg = VRM.getPhys(VirtReg);
949 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
951 } else if (VRM.isReMaterialized(VirtReg))
953 int SS = VRM.getStackSlot(VirtReg);
954 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
956 if (TRI->regsOverlap(PhysReg, UnfoldPR))
960 if (VRM.hasPhys(VirtReg)) {
961 PhysReg = VRM.getPhys(VirtReg);
962 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
966 // Ok, we'll need to reload the value into a register which makes
967 // it impossible to perform the store unfolding optimization later.
968 // Let's see if it is possible to fold the load if the store is
969 // unfolded. This allows us to perform the store unfolding
971 SmallVector<MachineInstr*, 4> NewMIs;
972 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
973 assert(NewMIs.size() == 1);
974 MachineInstr *NewMI = NewMIs.back();
976 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
978 SmallVector<unsigned, 2> Ops;
980 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
982 VRM.addSpillSlotUse(SS, FoldedMI);
983 if (!VRM.hasPhys(UnfoldVR))
984 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
985 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
986 MII = MBB.insert(MII, FoldedMI);
987 InvalidateKills(MI, RegKills, KillOps);
988 VRM.RemoveMachineInstrFromMaps(&MI);
990 MF.DeleteMachineInstr(NewMI);
993 MF.DeleteMachineInstr(NewMI);
999 /// CommuteToFoldReload -
1002 /// r1 = op r1, r2<kill>
1005 /// If op is commutable and r2 is killed, then we can xform these to
1006 /// r2 = op r2, fi#1
1008 bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
1009 MachineBasicBlock::iterator &MII,
1010 unsigned VirtReg, unsigned SrcReg, int SS,
1011 BitVector &RegKills,
1012 std::vector<MachineOperand*> &KillOps,
1013 const TargetRegisterInfo *TRI,
1015 if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
1018 MachineFunction &MF = *MBB.getParent();
1019 MachineInstr &MI = *MII;
1020 MachineBasicBlock::iterator DefMII = prior(MII);
1021 MachineInstr *DefMI = DefMII;
1022 const TargetInstrDesc &TID = DefMI->getDesc();
1024 if (DefMII != MBB.begin() &&
1025 TID.isCommutable() &&
1026 TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
1027 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1028 unsigned NewReg = NewDstMO.getReg();
1029 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1031 MachineInstr *ReloadMI = prior(DefMII);
1033 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1034 if (DestReg != SrcReg || FrameIdx != SS)
1036 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1039 int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO);
1042 assert(DefMI->getOperand(DefIdx).isReg() &&
1043 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1045 // Now commute def instruction.
1046 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1049 SmallVector<unsigned, 2> Ops;
1050 Ops.push_back(NewDstIdx);
1051 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
1052 // Not needed since foldMemoryOperand returns new MI.
1053 MF.DeleteMachineInstr(CommutedMI);
1057 VRM.addSpillSlotUse(SS, FoldedMI);
1058 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1059 // Insert new def MI and spill MI.
1060 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
1061 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
1063 MachineInstr *StoreMI = MII;
1064 VRM.addSpillSlotUse(SS, StoreMI);
1065 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1066 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
1068 // Delete all 3 old instructions.
1069 InvalidateKills(*ReloadMI, RegKills, KillOps);
1070 VRM.RemoveMachineInstrFromMaps(ReloadMI);
1071 MBB.erase(ReloadMI);
1072 InvalidateKills(*DefMI, RegKills, KillOps);
1073 VRM.RemoveMachineInstrFromMaps(DefMI);
1075 InvalidateKills(MI, RegKills, KillOps);
1076 VRM.RemoveMachineInstrFromMaps(&MI);
1086 /// findSuperReg - Find the SubReg's super-register of given register class
1087 /// where its SubIdx sub-register is SubReg.
1088 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
1089 unsigned SubIdx, const TargetRegisterInfo *TRI) {
1090 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
1093 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
1099 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1100 /// the last store to the same slot is now dead. If so, remove the last store.
1101 void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
1102 MachineBasicBlock::iterator &MII,
1103 int Idx, unsigned PhysReg, int StackSlot,
1104 const TargetRegisterClass *RC,
1105 bool isAvailable, MachineInstr *&LastStore,
1106 AvailableSpills &Spills,
1107 SmallSet<MachineInstr*, 4> &ReMatDefs,
1108 BitVector &RegKills,
1109 std::vector<MachineOperand*> &KillOps,
1111 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
1112 MachineInstr *StoreMI = next(MII);
1113 VRM.addSpillSlotUse(StackSlot, StoreMI);
1114 DOUT << "Store:\t" << *StoreMI;
1116 // If there is a dead store to this stack slot, nuke it now.
1118 DOUT << "Removed dead store:\t" << *LastStore;
1120 SmallVector<unsigned, 2> KillRegs;
1121 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
1122 MachineBasicBlock::iterator PrevMII = LastStore;
1123 bool CheckDef = PrevMII != MBB.begin();
1126 VRM.RemoveMachineInstrFromMaps(LastStore);
1127 MBB.erase(LastStore);
1129 // Look at defs of killed registers on the store. Mark the defs
1130 // as dead since the store has been deleted and they aren't
1132 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1133 bool HasOtherDef = false;
1134 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
1135 MachineInstr *DeadDef = PrevMII;
1136 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1137 // FIXME: This assumes a remat def does not have side
1139 VRM.RemoveMachineInstrFromMaps(DeadDef);
1148 LastStore = next(MII);
1150 // If the stack slot value was previously available in some other
1151 // register, change it now. Otherwise, make the register available,
1153 Spills.ModifyStackSlotOrReMat(StackSlot);
1154 Spills.ClobberPhysReg(PhysReg);
1155 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable);
1159 /// TransferDeadness - A identity copy definition is dead and it's being
1160 /// removed. Find the last def or use and mark it as dead / kill.
1161 void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
1162 unsigned Reg, BitVector &RegKills,
1163 std::vector<MachineOperand*> &KillOps) {
1164 int LastUDDist = -1;
1165 MachineInstr *LastUDMI = NULL;
1166 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
1167 RE = RegInfo->reg_end(); RI != RE; ++RI) {
1168 MachineInstr *UDMI = &*RI;
1169 if (UDMI->getParent() != MBB)
1171 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1172 if (DI == DistanceMap.end() || DI->second > CurDist)
1174 if ((int)DI->second < LastUDDist)
1176 LastUDDist = DI->second;
1181 const TargetInstrDesc &TID = LastUDMI->getDesc();
1182 MachineOperand *LastUD = NULL;
1183 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1184 MachineOperand &MO = LastUDMI->getOperand(i);
1185 if (!MO.isReg() || MO.getReg() != Reg)
1187 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1189 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1)
1192 if (LastUD->isDef())
1193 LastUD->setIsDead();
1195 LastUD->setIsKill();
1197 KillOps[Reg] = LastUD;
1202 /// rewriteMBB - Keep track of which spills are available even after the
1203 /// register allocator is done with them. If possible, avid reloading vregs.
1204 void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
1205 DOUT << MBB.getBasicBlock()->getName() << ":\n";
1207 MachineFunction &MF = *MBB.getParent();
1209 // Spills - Keep track of which spilled values are available in physregs so
1210 // that we can choose to reuse the physregs instead of emitting reloads.
1211 AvailableSpills Spills(TRI, TII);
1213 // MaybeDeadStores - When we need to write a value back into a stack slot,
1214 // keep track of the inserted store. If the stack slot value is never read
1215 // (because the value was used from some available register, for example), and
1216 // subsequently stored to, the original store is dead. This map keeps track
1217 // of inserted stores that are not used. If we see a subsequent store to the
1218 // same stack slot, the original store is deleted.
1219 std::vector<MachineInstr*> MaybeDeadStores;
1220 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
1222 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1223 SmallSet<MachineInstr*, 4> ReMatDefs;
1225 // Keep track of kill information.
1226 BitVector RegKills(TRI->getNumRegs());
1227 std::vector<MachineOperand*> KillOps;
1228 KillOps.resize(TRI->getNumRegs(), NULL);
1231 DistanceMap.clear();
1232 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
1234 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
1236 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1237 bool Erased = false;
1238 bool BackTracked = false;
1239 if (PrepForUnfoldOpti(MBB, MII,
1240 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
1241 NextMII = next(MII);
1243 MachineInstr &MI = *MII;
1244 const TargetInstrDesc &TID = MI.getDesc();
1246 if (VRM.hasEmergencySpills(&MI)) {
1247 // Spill physical register(s) in the rare case the allocator has run out
1248 // of registers to allocate.
1249 SmallSet<int, 4> UsedSS;
1250 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
1251 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1252 unsigned PhysReg = EmSpills[i];
1253 const TargetRegisterClass *RC =
1254 TRI->getPhysicalRegisterRegClass(PhysReg);
1255 assert(RC && "Unable to determine register class!");
1256 int SS = VRM.getEmergencySpillSlot(RC);
1257 if (UsedSS.count(SS))
1258 assert(0 && "Need to spill more than one physical registers!");
1260 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
1261 MachineInstr *StoreMI = prior(MII);
1262 VRM.addSpillSlotUse(SS, StoreMI);
1263 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
1264 MachineInstr *LoadMI = next(MII);
1265 VRM.addSpillSlotUse(SS, LoadMI);
1268 NextMII = next(MII);
1271 // Insert restores here if asked to.
1272 if (VRM.isRestorePt(&MI)) {
1273 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
1274 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1275 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1276 if (!VRM.getPreSplitReg(VirtReg))
1277 continue; // Split interval spilled again.
1278 unsigned Phys = VRM.getPhys(VirtReg);
1279 RegInfo->setPhysRegUsed(Phys);
1280 if (VRM.isReMaterialized(VirtReg)) {
1281 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
1283 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1284 int SS = VRM.getStackSlot(VirtReg);
1285 TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC);
1286 MachineInstr *LoadMI = prior(MII);
1287 VRM.addSpillSlotUse(SS, LoadMI);
1290 // This invalidates Phys.
1291 Spills.ClobberPhysReg(Phys);
1292 UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1293 DOUT << '\t' << *prior(MII);
1297 // Insert spills here if asked to.
1298 if (VRM.isSpillPt(&MI)) {
1299 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1300 VRM.getSpillPtSpills(&MI);
1301 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1302 unsigned VirtReg = SpillRegs[i].first;
1303 bool isKill = SpillRegs[i].second;
1304 if (!VRM.getPreSplitReg(VirtReg))
1305 continue; // Split interval spilled again.
1306 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1307 unsigned Phys = VRM.getPhys(VirtReg);
1308 int StackSlot = VRM.getStackSlot(VirtReg);
1309 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1310 MachineInstr *StoreMI = next(MII);
1311 VRM.addSpillSlotUse(StackSlot, StoreMI);
1312 DOUT << "Store:\t" << *StoreMI;
1313 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1315 NextMII = next(MII);
1318 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1320 ReuseInfo ReusedOperands(MI, TRI);
1321 SmallVector<unsigned, 4> VirtUseOps;
1322 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1323 MachineOperand &MO = MI.getOperand(i);
1324 if (!MO.isReg() || MO.getReg() == 0)
1325 continue; // Ignore non-register operands.
1327 unsigned VirtReg = MO.getReg();
1328 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1329 // Ignore physregs for spilling, but remember that it is used by this
1331 RegInfo->setPhysRegUsed(VirtReg);
1335 // We want to process implicit virtual register uses first.
1336 if (MO.isImplicit())
1337 // If the virtual register is implicitly defined, emit a implicit_def
1338 // before so scavenger knows it's "defined".
1339 VirtUseOps.insert(VirtUseOps.begin(), i);
1341 VirtUseOps.push_back(i);
1344 // Process all of the spilled uses and all non spilled reg references.
1345 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1346 unsigned i = VirtUseOps[j];
1347 MachineOperand &MO = MI.getOperand(i);
1348 unsigned VirtReg = MO.getReg();
1349 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1350 "Not a virtual register?");
1352 unsigned SubIdx = MO.getSubReg();
1353 if (VRM.isAssignedReg(VirtReg)) {
1354 // This virtual register was assigned a physreg!
1355 unsigned Phys = VRM.getPhys(VirtReg);
1356 RegInfo->setPhysRegUsed(Phys);
1358 ReusedOperands.markClobbered(Phys);
1359 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1360 MI.getOperand(i).setReg(RReg);
1361 if (VRM.isImplicitlyDefined(VirtReg))
1362 BuildMI(MBB, &MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1366 // This virtual register is now known to be a spilled value.
1368 continue; // Handle defs in the loop below (handle use&def here though)
1370 bool DoReMat = VRM.isReMaterialized(VirtReg);
1371 int SSorRMId = DoReMat
1372 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1373 int ReuseSlot = SSorRMId;
1375 // Check to see if this stack slot is available.
1376 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1378 // If this is a sub-register use, make sure the reuse register is in the
1379 // right register class. For example, for x86 not all of the 32-bit
1380 // registers have accessible sub-registers.
1381 // Similarly so for EXTRACT_SUBREG. Consider this:
1383 // MOV32_mr fi#1, EDI
1385 // = EXTRACT_SUBREG fi#1
1386 // fi#1 is available in EDI, but it cannot be reused because it's not in
1387 // the right register file.
1389 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1390 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1391 if (!RC->contains(PhysReg))
1396 // This spilled operand might be part of a two-address operand. If this
1397 // is the case, then changing it will necessarily require changing the
1398 // def part of the instruction as well. However, in some cases, we
1399 // aren't allowed to modify the reused register. If none of these cases
1401 bool CanReuse = true;
1402 int ti = TID.getOperandConstraint(i, TOI::TIED_TO);
1404 MI.getOperand(ti).isReg() &&
1405 MI.getOperand(ti).getReg() == VirtReg) {
1406 // Okay, we have a two address operand. We can reuse this physreg as
1407 // long as we are allowed to clobber the value and there isn't an
1408 // earlier def that has already clobbered the physreg.
1409 CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
1410 !ReusedOperands.isClobbered(PhysReg);
1414 // If this stack slot value is already available, reuse it!
1415 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1416 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1418 DOUT << "Reusing SS#" << ReuseSlot;
1419 DOUT << " from physreg "
1420 << TRI->getName(PhysReg) << " for vreg"
1421 << VirtReg <<" instead of reloading into physreg "
1422 << TRI->getName(VRM.getPhys(VirtReg)) << "\n";
1423 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1424 MI.getOperand(i).setReg(RReg);
1426 // The only technical detail we have is that we don't know that
1427 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1428 // later in the instruction. In particular, consider 'op V1, V2'.
1429 // If V1 is available in physreg R0, we would choose to reuse it
1430 // here, instead of reloading it into the register the allocator
1431 // indicated (say R1). However, V2 might have to be reloaded
1432 // later, and it might indicate that it needs to live in R0. When
1433 // this occurs, we need to have information available that
1434 // indicates it is safe to use R1 for the reload instead of R0.
1436 // To further complicate matters, we might conflict with an alias,
1437 // or R0 and R1 might not be compatible with each other. In this
1438 // case, we actually insert a reload for V1 in R1, ensuring that
1439 // we can get at R0 or its alias.
1440 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1441 VRM.getPhys(VirtReg), VirtReg);
1443 // Only mark it clobbered if this is a use&def operand.
1444 ReusedOperands.markClobbered(PhysReg);
1447 if (MI.getOperand(i).isKill() &&
1448 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1449 // This was the last use and the spilled value is still available
1450 // for reuse. That means the spill was unnecessary!
1451 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot];
1453 DOUT << "Removed dead store:\t" << *DeadStore;
1454 InvalidateKills(*DeadStore, RegKills, KillOps);
1455 VRM.RemoveMachineInstrFromMaps(DeadStore);
1456 MBB.erase(DeadStore);
1457 MaybeDeadStores[ReuseSlot] = NULL;
1464 // Otherwise we have a situation where we have a two-address instruction
1465 // whose mod/ref operand needs to be reloaded. This reload is already
1466 // available in some register "PhysReg", but if we used PhysReg as the
1467 // operand to our 2-addr instruction, the instruction would modify
1468 // PhysReg. This isn't cool if something later uses PhysReg and expects
1469 // to get its initial value.
1471 // To avoid this problem, and to avoid doing a load right after a store,
1472 // we emit a copy from PhysReg into the designated register for this
1474 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1475 assert(DesignatedReg && "Must map virtreg to physreg!");
1477 // Note that, if we reused a register for a previous operand, the
1478 // register we want to reload into might not actually be
1479 // available. If this occurs, use the register indicated by the
1481 if (ReusedOperands.hasReuses())
1482 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1483 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1485 // If the mapped designated register is actually the physreg we have
1486 // incoming, we don't need to inserted a dead copy.
1487 if (DesignatedReg == PhysReg) {
1488 // If this stack slot value is already available, reuse it!
1489 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1490 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1492 DOUT << "Reusing SS#" << ReuseSlot;
1493 DOUT << " from physreg " << TRI->getName(PhysReg)
1494 << " for vreg" << VirtReg
1495 << " instead of reloading into same physreg.\n";
1496 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1497 MI.getOperand(i).setReg(RReg);
1498 ReusedOperands.markClobbered(RReg);
1503 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1504 RegInfo->setPhysRegUsed(DesignatedReg);
1505 ReusedOperands.markClobbered(DesignatedReg);
1506 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1508 MachineInstr *CopyMI = prior(MII);
1509 UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1511 // This invalidates DesignatedReg.
1512 Spills.ClobberPhysReg(DesignatedReg);
1514 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg);
1516 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1517 MI.getOperand(i).setReg(RReg);
1518 DOUT << '\t' << *prior(MII);
1523 // Otherwise, reload it and remember that we have it.
1524 PhysReg = VRM.getPhys(VirtReg);
1525 assert(PhysReg && "Must map virtreg to physreg!");
1527 // Note that, if we reused a register for a previous operand, the
1528 // register we want to reload into might not actually be
1529 // available. If this occurs, use the register indicated by the
1531 if (ReusedOperands.hasReuses())
1532 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1533 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1535 RegInfo->setPhysRegUsed(PhysReg);
1536 ReusedOperands.markClobbered(PhysReg);
1538 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
1540 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1541 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1542 MachineInstr *LoadMI = prior(MII);
1543 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1546 // This invalidates PhysReg.
1547 Spills.ClobberPhysReg(PhysReg);
1549 // Any stores to this stack slot are not dead anymore.
1551 MaybeDeadStores[SSorRMId] = NULL;
1552 Spills.addAvailable(SSorRMId, &MI, PhysReg);
1553 // Assumes this is the last use. IsKill will be unset if reg is reused
1554 // unless it's a two-address operand.
1555 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
1556 MI.getOperand(i).setIsKill();
1557 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1558 MI.getOperand(i).setReg(RReg);
1559 UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1560 DOUT << '\t' << *prior(MII);
1566 // If we have folded references to memory operands, make sure we clear all
1567 // physical registers that may contain the value of the spilled virtual
1569 SmallSet<int, 2> FoldedSS;
1570 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1571 unsigned VirtReg = I->second.first;
1572 VirtRegMap::ModRef MR = I->second.second;
1573 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
1575 // MI2VirtMap be can updated which invalidate the iterator.
1576 // Increment the iterator first.
1578 int SS = VRM.getStackSlot(VirtReg);
1579 if (SS == VirtRegMap::NO_STACK_SLOT)
1581 FoldedSS.insert(SS);
1582 DOUT << " - StackSlot: " << SS << "\n";
1584 // If this folded instruction is just a use, check to see if it's a
1585 // straight load from the virt reg slot.
1586 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1588 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1589 if (DestReg && FrameIdx == SS) {
1590 // If this spill slot is available, turn it into a copy (or nothing)
1591 // instead of leaving it as a load!
1592 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1593 DOUT << "Promoted Load To Copy: " << MI;
1594 if (DestReg != InReg) {
1595 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1596 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1597 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
1598 unsigned SubIdx = DefMO->getSubReg();
1599 // Revisit the copy so we make sure to notice the effects of the
1600 // operation on the destreg (either needing to RA it if it's
1601 // virtual or needing to clobber any values if it's physical).
1603 --NextMII; // backtrack to the copy.
1604 // Propagate the sub-register index over.
1606 DefMO = NextMII->findRegisterDefOperand(DestReg);
1607 DefMO->setSubReg(SubIdx);
1611 DOUT << "Removing now-noop copy: " << MI;
1612 // Unset last kill since it's being reused.
1613 InvalidateKill(InReg, RegKills, KillOps);
1616 InvalidateKills(MI, RegKills, KillOps);
1617 VRM.RemoveMachineInstrFromMaps(&MI);
1620 goto ProcessNextInst;
1623 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1624 SmallVector<MachineInstr*, 4> NewMIs;
1626 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1627 MBB.insert(MII, NewMIs[0]);
1628 InvalidateKills(MI, RegKills, KillOps);
1629 VRM.RemoveMachineInstrFromMaps(&MI);
1632 --NextMII; // backtrack to the unfolded instruction.
1634 goto ProcessNextInst;
1639 // If this reference is not a use, any previous store is now dead.
1640 // Otherwise, the store to this stack slot is not dead anymore.
1641 MachineInstr* DeadStore = MaybeDeadStores[SS];
1643 bool isDead = !(MR & VirtRegMap::isRef);
1644 MachineInstr *NewStore = NULL;
1645 if (MR & VirtRegMap::isModRef) {
1646 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1647 SmallVector<MachineInstr*, 4> NewMIs;
1648 // We can reuse this physreg as long as we are allowed to clobber
1649 // the value and there isn't an earlier def that has already clobbered
1652 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
1653 MachineOperand *KillOpnd =
1654 DeadStore->findRegisterUseOperand(PhysReg, true);
1655 // Note, if the store is storing a sub-register, it's possible the
1656 // super-register is needed below.
1657 if (KillOpnd && !KillOpnd->getSubReg() &&
1658 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
1659 MBB.insert(MII, NewMIs[0]);
1660 NewStore = NewMIs[1];
1661 MBB.insert(MII, NewStore);
1662 VRM.addSpillSlotUse(SS, NewStore);
1663 InvalidateKills(MI, RegKills, KillOps);
1664 VRM.RemoveMachineInstrFromMaps(&MI);
1668 --NextMII; // backtrack to the unfolded instruction.
1675 if (isDead) { // Previous store is dead.
1676 // If we get here, the store is dead, nuke it now.
1677 DOUT << "Removed dead store:\t" << *DeadStore;
1678 InvalidateKills(*DeadStore, RegKills, KillOps);
1679 VRM.RemoveMachineInstrFromMaps(DeadStore);
1680 MBB.erase(DeadStore);
1685 MaybeDeadStores[SS] = NULL;
1687 // Treat this store as a spill merged into a copy. That makes the
1688 // stack slot value available.
1689 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1690 goto ProcessNextInst;
1694 // If the spill slot value is available, and this is a new definition of
1695 // the value, the value is not available anymore.
1696 if (MR & VirtRegMap::isMod) {
1697 // Notice that the value in this stack slot has been modified.
1698 Spills.ModifyStackSlotOrReMat(SS);
1700 // If this is *just* a mod of the value, check to see if this is just a
1701 // store to the spill slot (i.e. the spill got merged into the copy). If
1702 // so, realize that the vreg is available now, and add the store to the
1703 // MaybeDeadStore info.
1705 if (!(MR & VirtRegMap::isRef)) {
1706 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
1707 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1708 "Src hasn't been allocated yet?");
1710 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
1711 RegKills, KillOps, TRI, VRM)) {
1712 NextMII = next(MII);
1714 goto ProcessNextInst;
1717 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
1718 // this as a potentially dead store in case there is a subsequent
1719 // store into the stack slot without a read from it.
1720 MaybeDeadStores[StackSlot] = &MI;
1722 // If the stack slot value was previously available in some other
1723 // register, change it now. Otherwise, make the register
1724 // available in PhysReg.
1725 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*!clobber*/);
1731 // Process all of the spilled defs.
1732 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1733 MachineOperand &MO = MI.getOperand(i);
1734 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
1737 unsigned VirtReg = MO.getReg();
1738 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
1739 // Check to see if this is a noop copy. If so, eliminate the
1740 // instruction before considering the dest reg to be changed.
1742 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
1744 DOUT << "Removing now-noop copy: " << MI;
1745 SmallVector<unsigned, 2> KillRegs;
1746 InvalidateKills(MI, RegKills, KillOps, &KillRegs);
1747 if (MO.isDead() && !KillRegs.empty()) {
1748 // Source register or an implicit super-register use is killed.
1749 assert(KillRegs[0] == Dst || TRI->isSubRegister(KillRegs[0], Dst));
1750 // Last def is now dead.
1751 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
1753 VRM.RemoveMachineInstrFromMaps(&MI);
1756 Spills.disallowClobberPhysReg(VirtReg);
1757 goto ProcessNextInst;
1760 // If it's not a no-op copy, it clobbers the value in the destreg.
1761 Spills.ClobberPhysReg(VirtReg);
1762 ReusedOperands.markClobbered(VirtReg);
1764 // Check to see if this instruction is a load from a stack slot into
1765 // a register. If so, this provides the stack slot value in the reg.
1767 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
1768 assert(DestReg == VirtReg && "Unknown load situation!");
1770 // If it is a folded reference, then it's not safe to clobber.
1771 bool Folded = FoldedSS.count(FrameIdx);
1772 // Otherwise, if it wasn't available, remember that it is now!
1773 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded);
1774 goto ProcessNextInst;
1780 unsigned SubIdx = MO.getSubReg();
1781 bool DoReMat = VRM.isReMaterialized(VirtReg);
1783 ReMatDefs.insert(&MI);
1785 // The only vregs left are stack slot definitions.
1786 int StackSlot = VRM.getStackSlot(VirtReg);
1787 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1789 // If this def is part of a two-address operand, make sure to execute
1790 // the store from the correct physical register.
1792 int TiedOp = MI.getDesc().findTiedToSrcOperand(i);
1794 PhysReg = MI.getOperand(TiedOp).getReg();
1796 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
1797 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
1798 "Can't find corresponding super-register!");
1802 PhysReg = VRM.getPhys(VirtReg);
1803 if (ReusedOperands.isClobbered(PhysReg)) {
1804 // Another def has taken the assigned physreg. It must have been a
1805 // use&def which got it due to reuse. Undo the reuse!
1806 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1807 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1811 assert(PhysReg && "VR not assigned a physical register?");
1812 RegInfo->setPhysRegUsed(PhysReg);
1813 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1814 ReusedOperands.markClobbered(RReg);
1815 MI.getOperand(i).setReg(RReg);
1818 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
1819 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
1820 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
1821 NextMII = next(MII);
1823 // Check to see if this is a noop copy. If so, eliminate the
1824 // instruction before considering the dest reg to be changed.
1827 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
1829 DOUT << "Removing now-noop copy: " << MI;
1830 InvalidateKills(MI, RegKills, KillOps);
1831 VRM.RemoveMachineInstrFromMaps(&MI);
1834 UpdateKills(*LastStore, RegKills, KillOps, TRI);
1835 goto ProcessNextInst;
1841 DistanceMap.insert(std::make_pair(&MI, Dist++));
1842 if (!Erased && !BackTracked) {
1843 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
1844 UpdateKills(*II, RegKills, KillOps, TRI);
1850 llvm::Spiller* llvm::createSpiller() {
1851 switch (SpillerOpt) {
1852 default: assert(0 && "Unreachable!");
1854 return new LocalSpiller();
1856 return new SimpleSpiller();