1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Perform peephole optimizations on the machine code:
12 // - Optimize Extensions
14 // Optimization of sign / zero extension instructions. It may be extended to
15 // handle other instructions with similar properties.
17 // On some targets, some instructions, e.g. X86 sign / zero extension, may
18 // leave the source value in the lower part of the result. This optimization
19 // will replace some uses of the pre-extension value with uses of the
20 // sub-register of the results.
22 // - Optimize Comparisons
24 // Optimization of comparison instructions. For instance, in this code:
30 // If the "sub" instruction all ready sets (or could be modified to set) the
31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can
32 // eliminate the "cmp" instruction.
34 // Another instance, in this code:
36 // sub r1, r3 | sub r1, imm
37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm
40 // If the branch instruction can use flag from "sub", then we can replace
41 // "sub" with "subs" and eliminate the "cmp" instruction.
45 // Loads that can be folded into a later instruction. A load is foldable
46 // if it loads to virtual registers and the virtual register defined has
49 // - Optimize Copies and Bitcast:
51 // Rewrite copies and bitcasts to avoid cross register bank copies
53 // E.g., Consider the following example, where capital and lower
54 // letters denote different register file:
55 // b = copy A <-- cross-bank copy
56 // C = copy b <-- cross-bank copy
58 // b = copy A <-- cross-bank copy
59 // C = copy A <-- same-bank copy
62 // b = bitcast A <-- cross-bank copy
63 // C = bitcast b <-- cross-bank copy
65 // b = bitcast A <-- cross-bank copy
66 // C = copy A <-- same-bank copy
67 //===----------------------------------------------------------------------===//
69 #include "llvm/CodeGen/Passes.h"
70 #include "llvm/ADT/DenseMap.h"
71 #include "llvm/ADT/SmallPtrSet.h"
72 #include "llvm/ADT/SmallSet.h"
73 #include "llvm/ADT/Statistic.h"
74 #include "llvm/CodeGen/MachineDominators.h"
75 #include "llvm/CodeGen/MachineInstrBuilder.h"
76 #include "llvm/CodeGen/MachineRegisterInfo.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Target/TargetInstrInfo.h"
80 #include "llvm/Target/TargetRegisterInfo.h"
83 #define DEBUG_TYPE "peephole-opt"
85 // Optimize Extensions
87 Aggressive("aggressive-ext-opt", cl::Hidden,
88 cl::desc("Aggressive extension optimization"));
91 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false),
92 cl::desc("Disable the peephole optimizer"));
94 STATISTIC(NumReuse, "Number of extension results reused");
95 STATISTIC(NumCmps, "Number of compares eliminated");
96 STATISTIC(NumImmFold, "Number of move immediate folded");
97 STATISTIC(NumLoadFold, "Number of loads folded");
98 STATISTIC(NumSelects, "Number of selects optimized");
99 STATISTIC(NumCopiesBitcasts, "Number of copies/bitcasts optimized");
102 class PeepholeOptimizer : public MachineFunctionPass {
103 const TargetMachine *TM;
104 const TargetInstrInfo *TII;
105 MachineRegisterInfo *MRI;
106 MachineDominatorTree *DT; // Machine dominator tree
109 static char ID; // Pass identification
110 PeepholeOptimizer() : MachineFunctionPass(ID) {
111 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry());
114 bool runOnMachineFunction(MachineFunction &MF) override;
116 void getAnalysisUsage(AnalysisUsage &AU) const override {
117 AU.setPreservesCFG();
118 MachineFunctionPass::getAnalysisUsage(AU);
120 AU.addRequired<MachineDominatorTree>();
121 AU.addPreserved<MachineDominatorTree>();
126 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB);
127 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
128 SmallPtrSet<MachineInstr*, 8> &LocalMIs);
129 bool optimizeSelect(MachineInstr *MI);
130 bool optimizeCopyOrBitcast(MachineInstr *MI);
131 bool isMoveImmediate(MachineInstr *MI,
132 SmallSet<unsigned, 4> &ImmDefRegs,
133 DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
134 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
135 SmallSet<unsigned, 4> &ImmDefRegs,
136 DenseMap<unsigned, MachineInstr*> &ImmDefMIs);
137 bool isLoadFoldable(MachineInstr *MI,
138 SmallSet<unsigned, 16> &FoldAsLoadDefCandidates);
142 char PeepholeOptimizer::ID = 0;
143 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID;
144 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts",
145 "Peephole Optimizations", false, false)
146 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
147 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts",
148 "Peephole Optimizations", false, false)
150 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads
151 /// a single register and writes a single register and it does not modify the
152 /// source, and if the source value is preserved as a sub-register of the
153 /// result, then replace all reachable uses of the source with the subreg of the
156 /// Do not generate an EXTRACT that is used only in a debug use, as this changes
157 /// the code. Since this code does not currently share EXTRACTs, just ignore all
159 bool PeepholeOptimizer::
160 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB,
161 SmallPtrSet<MachineInstr*, 8> &LocalMIs) {
162 unsigned SrcReg, DstReg, SubIdx;
163 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx))
166 if (TargetRegisterInfo::isPhysicalRegister(DstReg) ||
167 TargetRegisterInfo::isPhysicalRegister(SrcReg))
170 if (MRI->hasOneNonDBGUse(SrcReg))
174 // Ensure DstReg can get a register class that actually supports
175 // sub-registers. Don't change the class until we commit.
176 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
177 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx);
181 // The ext instr may be operating on a sub-register of SrcReg as well.
182 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit
184 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of
185 // SrcReg:SubIdx should be replaced.
186 bool UseSrcSubIdx = TM->getRegisterInfo()->
187 getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != nullptr;
189 // The source has other uses. See if we can replace the other uses with use of
190 // the result of the extension.
191 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs;
192 for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
193 ReachedBBs.insert(UI.getParent());
195 // Uses that are in the same BB of uses of the result of the instruction.
196 SmallVector<MachineOperand*, 8> Uses;
198 // Uses that the result of the instruction can reach.
199 SmallVector<MachineOperand*, 8> ExtendedUses;
201 bool ExtendLife = true;
202 for (MachineOperand &UseMO : MRI->use_nodbg_operands(SrcReg)) {
203 MachineInstr *UseMI = UseMO.getParent();
207 if (UseMI->isPHI()) {
212 // Only accept uses of SrcReg:SubIdx.
213 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx)
216 // It's an error to translate this:
218 // %reg1025 = <sext> %reg1024
220 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4
224 // %reg1025 = <sext> %reg1024
226 // %reg1027 = COPY %reg1025:4
227 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4
229 // The problem here is that SUBREG_TO_REG is there to assert that an
230 // implicit zext occurs. It doesn't insert a zext instruction. If we allow
231 // the COPY here, it will give us the value after the <sext>, not the
232 // original value of %reg1024 before <sext>.
233 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG)
236 MachineBasicBlock *UseMBB = UseMI->getParent();
238 // Local uses that come after the extension.
239 if (!LocalMIs.count(UseMI))
240 Uses.push_back(&UseMO);
241 } else if (ReachedBBs.count(UseMBB)) {
242 // Non-local uses where the result of the extension is used. Always
243 // replace these unless it's a PHI.
244 Uses.push_back(&UseMO);
245 } else if (Aggressive && DT->dominates(MBB, UseMBB)) {
246 // We may want to extend the live range of the extension result in order
247 // to replace these uses.
248 ExtendedUses.push_back(&UseMO);
250 // Both will be live out of the def MBB anyway. Don't extend live range of
251 // the extension result.
257 if (ExtendLife && !ExtendedUses.empty())
258 // Extend the liveness of the extension result.
259 std::copy(ExtendedUses.begin(), ExtendedUses.end(),
260 std::back_inserter(Uses));
262 // Now replace all uses.
263 bool Changed = false;
265 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs;
267 // Look for PHI uses of the extended result, we don't want to extend the
268 // liveness of a PHI input. It breaks all kinds of assumptions down
269 // stream. A PHI use is expected to be the kill of its source values.
270 for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg))
272 PHIBBs.insert(UI.getParent());
274 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg);
275 for (unsigned i = 0, e = Uses.size(); i != e; ++i) {
276 MachineOperand *UseMO = Uses[i];
277 MachineInstr *UseMI = UseMO->getParent();
278 MachineBasicBlock *UseMBB = UseMI->getParent();
279 if (PHIBBs.count(UseMBB))
282 // About to add uses of DstReg, clear DstReg's kill flags.
284 MRI->clearKillFlags(DstReg);
285 MRI->constrainRegClass(DstReg, DstRC);
288 unsigned NewVR = MRI->createVirtualRegister(RC);
289 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(),
290 TII->get(TargetOpcode::COPY), NewVR)
291 .addReg(DstReg, 0, SubIdx);
292 // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set.
294 Copy->getOperand(0).setSubReg(SubIdx);
295 Copy->getOperand(0).setIsUndef();
297 UseMO->setReg(NewVR);
306 /// optimizeCmpInstr - If the instruction is a compare and the previous
307 /// instruction it's comparing against all ready sets (or could be modified to
308 /// set) the same flag as the compare, then we can remove the comparison and use
309 /// the flag from the previous instruction.
310 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI,
311 MachineBasicBlock *MBB) {
312 // If this instruction is a comparison against zero and isn't comparing a
313 // physical register, we can try to optimize it.
314 unsigned SrcReg, SrcReg2;
315 int CmpMask, CmpValue;
316 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) ||
317 TargetRegisterInfo::isPhysicalRegister(SrcReg) ||
318 (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2)))
321 // Attempt to optimize the comparison instruction.
322 if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) {
330 /// Optimize a select instruction.
331 bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) {
333 unsigned FalseOp = 0;
334 bool Optimizable = false;
335 SmallVector<MachineOperand, 4> Cond;
336 if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable))
340 if (!TII->optimizeSelect(MI))
342 MI->eraseFromParent();
347 /// \brief Check if the registers defined by the pair (RegisterClass, SubReg)
348 /// share the same register file.
349 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI,
350 const TargetRegisterClass *DefRC,
352 const TargetRegisterClass *SrcRC,
353 unsigned SrcSubReg) {
354 // Same register class.
358 // Both operands are sub registers. Check if they share a register class.
359 unsigned SrcIdx, DefIdx;
360 if (SrcSubReg && DefSubReg)
361 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg,
362 SrcIdx, DefIdx) != nullptr;
363 // At most one of the register is a sub register, make it Src to avoid
364 // duplicating the test.
366 std::swap(DefSubReg, SrcSubReg);
367 std::swap(DefRC, SrcRC);
370 // One of the register is a sub register, check if we can get a superclass.
372 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != nullptr;
374 return TRI.getCommonSubClass(DefRC, SrcRC) != nullptr;
377 /// \brief Get the index of the definition and source for \p Copy
379 /// \pre Copy.isCopy() or Copy.isBitcast().
380 /// \return True if the Copy instruction has only one register source
381 /// and one register definition. Otherwise, \p DefIdx and \p SrcIdx
383 static bool getCopyOrBitcastDefUseIdx(const MachineInstr &Copy,
384 unsigned &DefIdx, unsigned &SrcIdx) {
385 assert((Copy.isCopy() || Copy.isBitcast()) && "Wrong operation type.");
387 // Copy instruction are supposed to be: Def = Src.
388 if (Copy.getDesc().getNumOperands() != 2)
392 assert(Copy.getOperand(DefIdx).isDef() && "Use comes before def!");
396 // Bitcasts with more than one def are not supported.
397 if (Copy.getDesc().getNumDefs() != 1)
399 // Initialize SrcIdx to an undefined operand.
400 SrcIdx = Copy.getDesc().getNumOperands();
401 for (unsigned OpIdx = 0, EndOpIdx = SrcIdx; OpIdx != EndOpIdx; ++OpIdx) {
402 const MachineOperand &MO = Copy.getOperand(OpIdx);
403 if (!MO.isReg() || !MO.getReg())
407 else if (SrcIdx != EndOpIdx)
415 /// \brief Optimize a copy or bitcast instruction to avoid cross
416 /// register bank copy. The optimization looks through a chain of
417 /// copies and try to find a source that has a compatible register
419 /// Two register classes are considered to be compatible if they share
420 /// the same register bank.
421 /// New copies issued by this optimization are register allocator
422 /// friendly. This optimization does not remove any copy as it may
423 /// overconstraint the register allocator, but replaces some when
425 /// \pre \p MI is a Copy (MI->isCopy() is true)
426 /// \return True, when \p MI has been optimized. In that case, \p MI has
427 /// been removed from its parent.
428 bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) {
429 unsigned DefIdx, SrcIdx;
430 if (!MI || !getCopyOrBitcastDefUseIdx(*MI, DefIdx, SrcIdx))
433 const MachineOperand &MODef = MI->getOperand(DefIdx);
434 assert(MODef.isReg() && "Copies must be between registers.");
435 unsigned Def = MODef.getReg();
437 if (TargetRegisterInfo::isPhysicalRegister(Def))
440 const TargetRegisterClass *DefRC = MRI->getRegClass(Def);
441 unsigned DefSubReg = MODef.getSubReg();
445 bool ShouldRewrite = false;
446 MachineInstr *Copy = MI;
447 const TargetRegisterInfo &TRI = *TM->getRegisterInfo();
449 // Follow the chain of copies until we reach the top or find a
450 // more suitable source.
452 unsigned CopyDefIdx, CopySrcIdx;
453 if (!getCopyOrBitcastDefUseIdx(*Copy, CopyDefIdx, CopySrcIdx))
455 const MachineOperand &MO = Copy->getOperand(CopySrcIdx);
456 assert(MO.isReg() && "Copies must be between registers.");
459 if (TargetRegisterInfo::isPhysicalRegister(Src))
462 const TargetRegisterClass *SrcRC = MRI->getRegClass(Src);
463 SrcSubReg = MO.getSubReg();
465 // If this source does not incur a cross register bank copy, use it.
466 ShouldRewrite = shareSameRegisterFile(TRI, DefRC, DefSubReg, SrcRC,
468 // Follow the chain of copies: get the definition of Src.
469 Copy = MRI->getVRegDef(Src);
470 } while (!ShouldRewrite && Copy && (Copy->isCopy() || Copy->isBitcast()));
472 // If we did not find a more suitable source, there is nothing to optimize.
473 if (!ShouldRewrite || Src == MI->getOperand(SrcIdx).getReg())
476 // Rewrite the copy to avoid a cross register bank penalty.
477 unsigned NewVR = TargetRegisterInfo::isPhysicalRegister(Def) ? Def :
478 MRI->createVirtualRegister(DefRC);
479 MachineInstr *NewCopy = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
480 TII->get(TargetOpcode::COPY), NewVR)
481 .addReg(Src, 0, SrcSubReg);
482 NewCopy->getOperand(0).setSubReg(DefSubReg);
484 MRI->replaceRegWith(Def, NewVR);
485 MRI->clearKillFlags(NewVR);
486 MI->eraseFromParent();
491 /// isLoadFoldable - Check whether MI is a candidate for folding into a later
492 /// instruction. We only fold loads to virtual registers and the virtual
493 /// register defined has a single use.
494 bool PeepholeOptimizer::isLoadFoldable(
496 SmallSet<unsigned, 16> &FoldAsLoadDefCandidates) {
497 if (!MI->canFoldAsLoad() || !MI->mayLoad())
499 const MCInstrDesc &MCID = MI->getDesc();
500 if (MCID.getNumDefs() != 1)
503 unsigned Reg = MI->getOperand(0).getReg();
504 // To reduce compilation time, we check MRI->hasOneNonDBGUse when inserting
505 // loads. It should be checked when processing uses of the load, since
506 // uses can be removed during peephole.
507 if (!MI->getOperand(0).getSubReg() &&
508 TargetRegisterInfo::isVirtualRegister(Reg) &&
509 MRI->hasOneNonDBGUse(Reg)) {
510 FoldAsLoadDefCandidates.insert(Reg);
516 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI,
517 SmallSet<unsigned, 4> &ImmDefRegs,
518 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
519 const MCInstrDesc &MCID = MI->getDesc();
520 if (!MI->isMoveImmediate())
522 if (MCID.getNumDefs() != 1)
524 unsigned Reg = MI->getOperand(0).getReg();
525 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
526 ImmDefMIs.insert(std::make_pair(Reg, MI));
527 ImmDefRegs.insert(Reg);
534 /// foldImmediate - Try folding register operands that are defined by move
535 /// immediate instructions, i.e. a trivial constant folding optimization, if
536 /// and only if the def and use are in the same BB.
537 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB,
538 SmallSet<unsigned, 4> &ImmDefRegs,
539 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) {
540 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
541 MachineOperand &MO = MI->getOperand(i);
542 if (!MO.isReg() || MO.isDef())
544 unsigned Reg = MO.getReg();
545 if (!TargetRegisterInfo::isVirtualRegister(Reg))
547 if (ImmDefRegs.count(Reg) == 0)
549 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg);
550 assert(II != ImmDefMIs.end());
551 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) {
559 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) {
560 if (skipOptnoneFunction(*MF.getFunction()))
563 DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n");
564 DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n');
569 TM = &MF.getTarget();
570 TII = TM->getInstrInfo();
571 MRI = &MF.getRegInfo();
572 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : nullptr;
574 bool Changed = false;
576 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) {
577 MachineBasicBlock *MBB = &*I;
579 bool SeenMoveImm = false;
580 SmallPtrSet<MachineInstr*, 8> LocalMIs;
581 SmallSet<unsigned, 4> ImmDefRegs;
582 DenseMap<unsigned, MachineInstr*> ImmDefMIs;
583 SmallSet<unsigned, 16> FoldAsLoadDefCandidates;
585 for (MachineBasicBlock::iterator
586 MII = I->begin(), MIE = I->end(); MII != MIE; ) {
587 MachineInstr *MI = &*MII;
588 // We may be erasing MI below, increment MII now.
592 // Skip debug values. They should not affect this peephole optimization.
593 if (MI->isDebugValue())
596 // If there exists an instruction which belongs to the following
597 // categories, we will discard the load candidates.
598 if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() ||
599 MI->isKill() || MI->isInlineAsm() ||
600 MI->hasUnmodeledSideEffects()) {
601 FoldAsLoadDefCandidates.clear();
604 if (MI->mayStore() || MI->isCall())
605 FoldAsLoadDefCandidates.clear();
607 if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) ||
608 (MI->isCompare() && optimizeCmpInstr(MI, MBB)) ||
609 (MI->isSelect() && optimizeSelect(MI))) {
616 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) {
619 Changed |= optimizeExtInstr(MI, MBB, LocalMIs);
620 // optimizeExtInstr might have created new instructions after MI
621 // and before the already incremented MII. Adjust MII so that the
622 // next iteration sees the new instructions.
626 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs);
629 // Check whether MI is a load candidate for folding into a later
630 // instruction. If MI is not a candidate, check whether we can fold an
631 // earlier load into MI.
632 if (!isLoadFoldable(MI, FoldAsLoadDefCandidates) &&
633 !FoldAsLoadDefCandidates.empty()) {
634 const MCInstrDesc &MIDesc = MI->getDesc();
635 for (unsigned i = MIDesc.getNumDefs(); i != MIDesc.getNumOperands();
637 const MachineOperand &MOp = MI->getOperand(i);
640 unsigned FoldAsLoadDefReg = MOp.getReg();
641 if (FoldAsLoadDefCandidates.count(FoldAsLoadDefReg)) {
642 // We need to fold load after optimizeCmpInstr, since
643 // optimizeCmpInstr can enable folding by converting SUB to CMP.
644 // Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and
645 // we need it for markUsesInDebugValueAsUndef().
646 unsigned FoldedReg = FoldAsLoadDefReg;
647 MachineInstr *DefMI = nullptr;
648 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI,
652 // Update LocalMIs since we replaced MI with FoldMI and deleted
654 DEBUG(dbgs() << "Replacing: " << *MI);
655 DEBUG(dbgs() << " With: " << *FoldMI);
657 LocalMIs.erase(DefMI);
658 LocalMIs.insert(FoldMI);
659 MI->eraseFromParent();
660 DefMI->eraseFromParent();
661 MRI->markUsesInDebugValueAsUndef(FoldedReg);
662 FoldAsLoadDefCandidates.erase(FoldedReg);
664 // MI is replaced with FoldMI.