1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-ldst-opt"
17 #include "ARMAddressingModes.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/RegisterScavenging.h"
28 #include "llvm/Support/Compiler.h"
29 #include "llvm/Target/MRegisterInfo.h"
30 #include "llvm/Target/TargetInstrInfo.h"
31 #include "llvm/Target/TargetMachine.h"
34 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
35 STATISTIC(NumSTMGened , "Number of stm instructions generated");
36 STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
37 STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
40 struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
41 const TargetInstrInfo *TII;
42 const MRegisterInfo *MRI;
46 virtual bool runOnMachineFunction(MachineFunction &Fn);
48 virtual const char *getPassName() const {
49 return "ARM load / store optimization pass";
53 struct MemOpQueueEntry {
56 MachineBasicBlock::iterator MBBI;
58 MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
59 : Offset(o), Position(p), MBBI(i), Merged(false) {};
61 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
62 typedef MemOpQueue::iterator MemOpQueueIter;
64 SmallVector<MachineBasicBlock::iterator, 4>
65 MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
66 int Opcode, unsigned Size, unsigned Scratch,
69 void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
70 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
71 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
75 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
76 /// optimization pass.
77 FunctionPass *llvm::createARMLoadStoreOptimizationPass() {
78 return new ARMLoadStoreOpt();
81 static int getLoadStoreMultipleOpcode(int Opcode) {
106 /// mergeOps - Create and insert a LDM or STM with Base as base register and
107 /// registers in Regs as the register operands that would be loaded / stored.
108 /// It returns true if the transformation is done.
109 static bool mergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
110 int Offset, unsigned Base, bool BaseKill, int Opcode,
112 SmallVector<std::pair<unsigned, bool>, 8> &Regs,
113 const TargetInstrInfo *TII) {
114 // Only a single register to load / store. Don't bother.
115 unsigned NumRegs = Regs.size();
119 ARM_AM::AMSubMode Mode = ARM_AM::ia;
120 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
121 if (isAM4 && Offset == 4)
123 else if (isAM4 && Offset == -4 * (int)NumRegs + 4)
125 else if (isAM4 && Offset == -4 * (int)NumRegs)
127 else if (Offset != 0) {
128 // If starting offset isn't zero, insert a MI to materialize a new base.
129 // But only do so if it is cost effective, i.e. merging more than two
135 if (Opcode == ARM::LDR)
136 // If it is a load, then just use one of the destination register to
137 // use as the new base.
138 NewBase = Regs[NumRegs-1].first;
140 // Use the scratch register to use as a new base.
145 int BaseOpc = ARM::ADDri;
147 BaseOpc = ARM::SUBri;
150 int ImmedOffset = ARM_AM::getSOImmVal(Offset);
151 if (ImmedOffset == -1)
152 return false; // Probably not worth it then.
154 BuildMI(MBB, MBBI, TII->get(BaseOpc), NewBase)
155 .addReg(Base, false, false, BaseKill).addImm(ImmedOffset);
157 BaseKill = true; // New base is always killed right its use.
160 bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
161 bool isDef = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
162 Opcode = getLoadStoreMultipleOpcode(Opcode);
163 MachineInstrBuilder MIB = (isAM4)
164 ? BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base, false, false, BaseKill)
165 .addImm(ARM_AM::getAM4ModeImm(Mode))
166 : BuildMI(MBB, MBBI, TII->get(Opcode)).addReg(Base, false, false, BaseKill)
167 .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs));
168 for (unsigned i = 0; i != NumRegs; ++i)
169 MIB = MIB.addReg(Regs[i].first, isDef, false, Regs[i].second);
174 /// MergeLDR_STR - Merge a number of load / store instructions into one or more
175 /// load / store multiple instructions.
176 SmallVector<MachineBasicBlock::iterator, 4>
177 ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
178 unsigned Base, int Opcode, unsigned Size,
179 unsigned Scratch, MemOpQueue &MemOps) {
180 SmallVector<MachineBasicBlock::iterator, 4> Merges;
181 SmallVector<std::pair<unsigned,bool>, 8> Regs;
182 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
183 int Offset = MemOps[SIndex].Offset;
184 int SOffset = Offset;
185 unsigned Pos = MemOps[SIndex].Position;
186 MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
187 unsigned PReg = MemOps[SIndex].MBBI->getOperand(0).getReg();
188 unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
189 bool isKill = MemOps[SIndex].MBBI->getOperand(0).isKill();
190 Regs.push_back(std::make_pair(PReg, isKill));
191 for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
192 int NewOffset = MemOps[i].Offset;
193 unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
194 unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
195 isKill = MemOps[i].MBBI->getOperand(0).isKill();
196 // AM4 - register numbers in ascending order.
197 // AM5 - consecutive register numbers in ascending order.
198 if (NewOffset == Offset + (int)Size &&
199 ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
201 Regs.push_back(std::make_pair(Reg, isKill));
204 // Can't merge this in. Try merge the earlier ones first.
205 if (mergeOps(MBB, ++Loc, SOffset, Base, false, Opcode,Scratch,Regs,TII)) {
206 Merges.push_back(prior(Loc));
207 for (unsigned j = SIndex; j < i; ++j) {
208 MBB.erase(MemOps[j].MBBI);
209 MemOps[j].Merged = true;
212 SmallVector<MachineBasicBlock::iterator, 4> Merges2 =
213 MergeLDR_STR(MBB, i, Base, Opcode, Size, Scratch, MemOps);
214 Merges.append(Merges2.begin(), Merges2.end());
218 if (MemOps[i].Position > Pos) {
219 Pos = MemOps[i].Position;
220 Loc = MemOps[i].MBBI;
224 bool BaseKill = Loc->findRegisterUseOperand(Base, true) != -1;
225 if (mergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode,Scratch,Regs, TII)) {
226 Merges.push_back(prior(Loc));
227 for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
228 MBB.erase(MemOps[i].MBBI);
229 MemOps[i].Merged = true;
236 static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
238 return (MI && MI->getOpcode() == ARM::SUBri &&
239 MI->getOperand(0).getReg() == Base &&
240 MI->getOperand(1).getReg() == Base &&
241 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
244 static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
246 return (MI && MI->getOpcode() == ARM::ADDri &&
247 MI->getOperand(0).getReg() == Base &&
248 MI->getOperand(1).getReg() == Base &&
249 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes);
252 static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
253 switch (MI->getOpcode()) {
265 return (MI->getNumOperands() - 2) * 4;
270 return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
274 /// mergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
275 /// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
277 /// stmia rn, <ra, rb, rc>
278 /// rn := rn + 4 * 3;
280 /// stmia rn!, <ra, rb, rc>
282 /// rn := rn - 4 * 3;
283 /// ldmia rn, <ra, rb, rc>
285 /// ldmdb rn!, <ra, rb, rc>
286 static bool mergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
287 MachineBasicBlock::iterator MBBI) {
288 MachineInstr *MI = MBBI;
289 unsigned Base = MI->getOperand(0).getReg();
290 unsigned Bytes = getLSMultipleTransferSize(MI);
291 int Opcode = MI->getOpcode();
292 bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
295 if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
298 // Can't use the updating AM4 sub-mode if the base register is also a dest
299 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
300 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) {
301 if (MI->getOperand(i).getReg() == Base)
305 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
306 if (MBBI != MBB.begin()) {
307 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
308 if (Mode == ARM_AM::ia &&
309 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
310 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
313 } else if (Mode == ARM_AM::ib &&
314 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
315 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
321 if (MBBI != MBB.end()) {
322 MachineBasicBlock::iterator NextMBBI = next(MBBI);
323 if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
324 isMatchingIncrement(NextMBBI, Base, Bytes)) {
325 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
328 } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
329 isMatchingDecrement(NextMBBI, Base, Bytes)) {
330 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
336 // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
337 if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
340 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
341 unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
342 if (MBBI != MBB.begin()) {
343 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
344 if (Mode == ARM_AM::ia &&
345 isMatchingDecrement(PrevMBBI, Base, Bytes)) {
346 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
352 if (MBBI != MBB.end()) {
353 MachineBasicBlock::iterator NextMBBI = next(MBBI);
354 if (Mode == ARM_AM::ia &&
355 isMatchingIncrement(NextMBBI, Base, Bytes)) {
356 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
366 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
368 case ARM::LDR: return ARM::LDR_PRE;
369 case ARM::STR: return ARM::STR_PRE;
370 case ARM::FLDS: return ARM::FLDMS;
371 case ARM::FLDD: return ARM::FLDMD;
372 case ARM::FSTS: return ARM::FSTMS;
373 case ARM::FSTD: return ARM::FSTMD;
379 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
381 case ARM::LDR: return ARM::LDR_POST;
382 case ARM::STR: return ARM::STR_POST;
383 case ARM::FLDS: return ARM::FLDMS;
384 case ARM::FLDD: return ARM::FLDMD;
385 case ARM::FSTS: return ARM::FSTMS;
386 case ARM::FSTD: return ARM::FSTMD;
392 /// mergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
393 /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
394 static bool mergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
395 MachineBasicBlock::iterator MBBI,
396 const TargetInstrInfo *TII) {
397 MachineInstr *MI = MBBI;
398 unsigned Base = MI->getOperand(1).getReg();
399 bool BaseKill = MI->getOperand(1).isKill();
400 unsigned Bytes = getLSMultipleTransferSize(MI);
401 int Opcode = MI->getOpcode();
402 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
403 if ((isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0) ||
404 (!isAM2 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0))
407 bool isLd = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
408 // Can't do the merge if the destination register is the same as the would-be
409 // writeback register.
410 if (isLd && MI->getOperand(0).getReg() == Base)
413 bool DoMerge = false;
414 ARM_AM::AddrOpc AddSub = ARM_AM::add;
416 if (MBBI != MBB.begin()) {
417 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
418 if (isMatchingDecrement(PrevMBBI, Base, Bytes)) {
420 AddSub = ARM_AM::sub;
421 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
422 } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes)) {
424 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
430 if (!DoMerge && MBBI != MBB.end()) {
431 MachineBasicBlock::iterator NextMBBI = next(MBBI);
432 if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes)) {
434 AddSub = ARM_AM::sub;
435 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
436 } else if (isMatchingIncrement(NextMBBI, Base, Bytes)) {
438 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
447 bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
448 unsigned Offset = isAM2 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
449 : ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
450 true, isDPR ? 2 : 1);
453 // LDR_PRE, LDR_POST;
454 BuildMI(MBB, MBBI, TII->get(NewOpc), MI->getOperand(0).getReg())
456 .addReg(Base).addReg(0).addImm(Offset);
458 BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base, false, false, BaseKill)
459 .addImm(Offset).addReg(MI->getOperand(0).getReg(), true);
461 MachineOperand &MO = MI->getOperand(0);
463 // STR_PRE, STR_POST;
464 BuildMI(MBB, MBBI, TII->get(NewOpc), Base)
465 .addReg(MO.getReg(), false, false, MO.isKill())
466 .addReg(Base).addReg(0).addImm(Offset);
468 BuildMI(MBB, MBBI, TII->get(NewOpc)).addReg(Base)
469 .addImm(Offset).addReg(MO.getReg(), false, false, MO.isKill());
476 /// isMemoryOp - Returns true if instruction is a memory operations (that this
477 /// pass is capable of operating on).
478 static bool isMemoryOp(MachineInstr *MI) {
479 int Opcode = MI->getOpcode();
484 return MI->getOperand(1).isRegister() && MI->getOperand(2).getReg() == 0;
487 return MI->getOperand(1).isRegister();
490 return MI->getOperand(1).isRegister();
495 /// AdvanceRS - Advance register scavenger to just before the earliest memory
496 /// op that is being merged.
497 void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
498 MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
499 unsigned Position = MemOps[0].Position;
500 for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
501 if (MemOps[i].Position < Position) {
502 Position = MemOps[i].Position;
503 Loc = MemOps[i].MBBI;
507 if (Loc != MBB.begin())
508 RS->forward(prior(Loc));
511 /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
512 /// ops of the same base and incrementing offset into LDM / STM ops.
513 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
514 unsigned NumMerges = 0;
515 unsigned NumMemOps = 0;
517 unsigned CurrBase = 0;
519 unsigned CurrSize = 0;
520 unsigned Position = 0;
522 RS->enterBasicBlock(&MBB);
523 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
525 bool Advance = false;
526 bool TryMerge = false;
527 bool Clobber = false;
529 bool isMemOp = isMemoryOp(MBBI);
531 int Opcode = MBBI->getOpcode();
532 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
533 unsigned Size = getLSMultipleTransferSize(MBBI);
534 unsigned Base = MBBI->getOperand(1).getReg();
535 unsigned OffIdx = MBBI->getNumOperands()-1;
536 unsigned OffField = MBBI->getOperand(OffIdx).getImm();
538 ? ARM_AM::getAM2Offset(OffField) : ARM_AM::getAM5Offset(OffField) * 4;
540 if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
543 if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
548 // r5 := ldr [r5, #4]
549 // r6 := ldr [r5, #8]
551 // The second ldr has effectively broken the chain even though it
552 // looks like the later ldr(s) use the same base register. Try to
553 // merge the ldr's so far, including this one. But don't try to
554 // combine the following ldr(s).
555 Clobber = (Opcode == ARM::LDR && Base == MBBI->getOperand(0).getReg());
556 if (CurrBase == 0 && !Clobber) {
557 // Start of a new chain.
561 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
570 if (CurrOpc == Opcode && CurrBase == Base) {
571 // Continue adding to the queue.
572 if (Offset > MemOps.back().Offset) {
573 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
577 for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
579 if (Offset < I->Offset) {
580 MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
584 } else if (Offset == I->Offset) {
585 // Collision! This can't be merged!
602 // Try to find a free register to use as a new base in case it's needed.
603 // First advance to the instruction just before the start of the chain.
604 AdvanceRS(MBB, MemOps);
605 // Find a scratch register. Make sure it's a call clobbered register or
606 // a spilled callee-saved register.
607 unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass, true);
609 Scratch = RS->FindUnusedReg(&ARM::GPRRegClass,
610 AFI->getSpilledCSRegisters());
611 // Process the load / store instructions.
612 RS->forward(prior(MBBI));
615 SmallVector<MachineBasicBlock::iterator,4> MBBII =
616 MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize, Scratch, MemOps);
618 // Try folding preceeding/trailing base inc/dec into the generated
620 for (unsigned i = 0, e = MBBII.size(); i < e; ++i)
621 if (mergeBaseUpdateLSMultiple(MBB, MBBII[i]))
623 NumMerges += MBBII.size();
625 // Try folding preceeding/trailing base inc/dec into those load/store
626 // that were not merged to form LDM/STM ops.
627 for (unsigned i = 0; i != NumMemOps; ++i)
628 if (!MemOps[i].Merged)
629 if (mergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII))
632 // RS may be pointing to an instruction that's deleted.
633 RS->skipTo(prior(MBBI));
643 // If iterator hasn't been advanced and this is not a memory op, skip it.
644 // It can't start a new chain anyway.
645 if (!Advance && !isMemOp && MBBI != E) {
651 return NumMerges > 0;
654 /// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
655 /// (bx lr) into the preceeding stack restore so it directly restore the value
657 /// ldmfd sp!, {r7, lr}
660 /// ldmfd sp!, {r7, pc}
661 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
662 if (MBB.empty()) return false;
664 MachineBasicBlock::iterator MBBI = prior(MBB.end());
665 if (MBBI->getOpcode() == ARM::BX_RET && MBBI != MBB.begin()) {
666 MachineInstr *PrevMI = prior(MBBI);
667 if (PrevMI->getOpcode() == ARM::LDM) {
668 MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
669 if (MO.getReg() == ARM::LR) {
670 PrevMI->setInstrDescriptor(TII->get(ARM::LDM_RET));
680 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
681 const TargetMachine &TM = Fn.getTarget();
682 AFI = Fn.getInfo<ARMFunctionInfo>();
683 TII = TM.getInstrInfo();
684 MRI = TM.getRegisterInfo();
685 RS = new RegScavenger();
687 bool Modified = false;
688 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
690 MachineBasicBlock &MBB = *MFI;
691 Modified |= LoadStoreMultipleOpti(MBB);
692 Modified |= MergeReturnIntoLDM(MBB);