1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-ldst-opt"
17 #include "ARMAddressingModes.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/SmallSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
41 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
42 STATISTIC(NumSTMGened , "Number of stm instructions generated");
43 STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
44 STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
45 STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
46 STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
47 STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
48 STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
49 STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
50 STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
51 STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
53 /// ARMAllocLoadStoreOpt - Post- register allocation pass the combine
54 /// load / store instructions to form ldm / stm instructions.
57 struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
59 ARMLoadStoreOpt() : MachineFunctionPass(&ID) {}
61 const TargetInstrInfo *TII;
62 const TargetRegisterInfo *TRI;
67 virtual bool runOnMachineFunction(MachineFunction &Fn);
69 virtual const char *getPassName() const {
70 return "ARM load / store optimization pass";
74 struct MemOpQueueEntry {
77 MachineBasicBlock::iterator MBBI;
79 MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
80 : Offset(o), Position(p), MBBI(i), Merged(false) {};
82 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
83 typedef MemOpQueue::iterator MemOpQueueIter;
85 bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
86 int Offset, unsigned Base, bool BaseKill, int Opcode,
87 ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
88 DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
89 void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
90 int Opcode, unsigned Size,
91 ARMCC::CondCodes Pred, unsigned PredReg,
92 unsigned Scratch, MemOpQueue &MemOps,
93 SmallVector<MachineBasicBlock::iterator, 4> &Merges);
95 void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
96 bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
97 MachineBasicBlock::iterator &MBBI);
98 bool MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
99 MachineBasicBlock::iterator MBBI,
100 const TargetInstrInfo *TII,
102 MachineBasicBlock::iterator &I);
103 bool MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
104 MachineBasicBlock::iterator MBBI,
106 MachineBasicBlock::iterator &I);
107 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
108 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
110 char ARMLoadStoreOpt::ID = 0;
113 static int getLoadStoreMultipleOpcode(int Opcode) {
141 default: llvm_unreachable("Unhandled opcode!");
146 static bool isT2i32Load(unsigned Opc) {
147 return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
150 static bool isi32Load(unsigned Opc) {
151 return Opc == ARM::LDR || isT2i32Load(Opc);
154 static bool isT2i32Store(unsigned Opc) {
155 return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
158 static bool isi32Store(unsigned Opc) {
159 return Opc == ARM::STR || isT2i32Store(Opc);
162 /// MergeOps - Create and insert a LDM or STM with Base as base register and
163 /// registers in Regs as the register operands that would be loaded / stored.
164 /// It returns true if the transformation is done.
166 ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
167 MachineBasicBlock::iterator MBBI,
168 int Offset, unsigned Base, bool BaseKill,
169 int Opcode, ARMCC::CondCodes Pred,
170 unsigned PredReg, unsigned Scratch, DebugLoc dl,
171 SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
172 // Only a single register to load / store. Don't bother.
173 unsigned NumRegs = Regs.size();
177 ARM_AM::AMSubMode Mode = ARM_AM::ia;
178 bool isAM4 = isi32Load(Opcode) || isi32Store(Opcode);
179 if (isAM4 && Offset == 4) {
181 // Thumb2 does not support ldmib / stmib.
184 } else if (isAM4 && Offset == -4 * (int)NumRegs + 4) {
186 // Thumb2 does not support ldmda / stmda.
189 } else if (isAM4 && Offset == -4 * (int)NumRegs) {
191 } else if (Offset != 0) {
192 // If starting offset isn't zero, insert a MI to materialize a new base.
193 // But only do so if it is cost effective, i.e. merging more than two
199 if (isi32Load(Opcode))
200 // If it is a load, then just use one of the destination register to
201 // use as the new base.
202 NewBase = Regs[NumRegs-1].first;
204 // Use the scratch register to use as a new base.
209 int BaseOpc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
211 BaseOpc = isThumb2 ? ARM::t2SUBri : ARM::SUBri;
214 int ImmedOffset = isThumb2
215 ? ARM_AM::getT2SOImmVal(Offset) : ARM_AM::getSOImmVal(Offset);
216 if (ImmedOffset == -1)
217 // FIXME: Try t2ADDri12 or t2SUBri12?
218 return false; // Probably not worth it then.
220 BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
221 .addReg(Base, getKillRegState(BaseKill)).addImm(Offset)
222 .addImm(Pred).addReg(PredReg).addReg(0);
224 BaseKill = true; // New base is always killed right its use.
227 bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
228 bool isDef = isi32Load(Opcode) || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
229 Opcode = getLoadStoreMultipleOpcode(Opcode);
230 MachineInstrBuilder MIB = (isAM4)
231 ? BuildMI(MBB, MBBI, dl, TII->get(Opcode))
232 .addReg(Base, getKillRegState(BaseKill))
233 .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred).addReg(PredReg)
234 : BuildMI(MBB, MBBI, dl, TII->get(Opcode))
235 .addReg(Base, getKillRegState(BaseKill))
236 .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
237 .addImm(Pred).addReg(PredReg);
238 for (unsigned i = 0; i != NumRegs; ++i)
239 MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
240 | getKillRegState(Regs[i].second));
245 /// MergeLDR_STR - Merge a number of load / store instructions into one or more
246 /// load / store multiple instructions.
248 ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
249 unsigned Base, int Opcode, unsigned Size,
250 ARMCC::CondCodes Pred, unsigned PredReg,
251 unsigned Scratch, MemOpQueue &MemOps,
252 SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
253 bool isAM4 = isi32Load(Opcode) || isi32Store(Opcode);
254 int Offset = MemOps[SIndex].Offset;
255 int SOffset = Offset;
256 unsigned Pos = MemOps[SIndex].Position;
257 MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
258 DebugLoc dl = Loc->getDebugLoc();
259 unsigned PReg = Loc->getOperand(0).getReg();
260 unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
261 bool isKill = Loc->getOperand(0).isKill();
263 SmallVector<std::pair<unsigned,bool>, 8> Regs;
264 Regs.push_back(std::make_pair(PReg, isKill));
265 for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
266 int NewOffset = MemOps[i].Offset;
267 unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
268 unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
269 isKill = MemOps[i].MBBI->getOperand(0).isKill();
270 // AM4 - register numbers in ascending order.
271 // AM5 - consecutive register numbers in ascending order.
272 if (NewOffset == Offset + (int)Size &&
273 ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
275 Regs.push_back(std::make_pair(Reg, isKill));
278 // Can't merge this in. Try merge the earlier ones first.
279 if (MergeOps(MBB, ++Loc, SOffset, Base, false, Opcode, Pred, PredReg,
280 Scratch, dl, Regs)) {
281 Merges.push_back(prior(Loc));
282 for (unsigned j = SIndex; j < i; ++j) {
283 MBB.erase(MemOps[j].MBBI);
284 MemOps[j].Merged = true;
287 MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch,
292 if (MemOps[i].Position > Pos) {
293 Pos = MemOps[i].Position;
294 Loc = MemOps[i].MBBI;
298 bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
299 if (MergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode, Pred, PredReg,
300 Scratch, dl, Regs)) {
301 Merges.push_back(prior(Loc));
302 for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
303 MBB.erase(MemOps[i].MBBI);
304 MemOps[i].Merged = true;
311 /// getInstrPredicate - If instruction is predicated, returns its predicate
312 /// condition, otherwise returns AL. It also returns the condition code
313 /// register by reference.
314 static ARMCC::CondCodes getInstrPredicate(MachineInstr *MI, unsigned &PredReg) {
315 int PIdx = MI->findFirstPredOperandIdx();
321 PredReg = MI->getOperand(PIdx+1).getReg();
322 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
325 static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
326 unsigned Bytes, unsigned Limit,
327 ARMCC::CondCodes Pred, unsigned PredReg){
328 unsigned MyPredReg = 0;
331 if (MI->getOpcode() != ARM::t2SUBri &&
332 MI->getOpcode() != ARM::SUBri)
335 // Make sure the offset fits in 8 bits.
336 if (Bytes <= 0 || (Limit && Bytes >= Limit))
339 return (MI->getOperand(0).getReg() == Base &&
340 MI->getOperand(1).getReg() == Base &&
341 MI->getOperand(2).getImm() == Bytes &&
342 getInstrPredicate(MI, MyPredReg) == Pred &&
343 MyPredReg == PredReg);
346 static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
347 unsigned Bytes, unsigned Limit,
348 ARMCC::CondCodes Pred, unsigned PredReg){
349 unsigned MyPredReg = 0;
352 if (MI->getOpcode() != ARM::t2ADDri &&
353 MI->getOpcode() != ARM::ADDri)
356 if (Bytes <= 0 || (Limit && Bytes >= Limit))
357 // Make sure the offset fits in 8 bits.
360 return (MI->getOperand(0).getReg() == Base &&
361 MI->getOperand(1).getReg() == Base &&
362 MI->getOperand(2).getImm() == Bytes &&
363 getInstrPredicate(MI, MyPredReg) == Pred &&
364 MyPredReg == PredReg);
367 static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
368 switch (MI->getOpcode()) {
386 return (MI->getNumOperands() - 4) * 4;
391 return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
395 /// MergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
396 /// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
398 /// stmia rn, <ra, rb, rc>
399 /// rn := rn + 4 * 3;
401 /// stmia rn!, <ra, rb, rc>
403 /// rn := rn - 4 * 3;
404 /// ldmia rn, <ra, rb, rc>
406 /// ldmdb rn!, <ra, rb, rc>
407 bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
408 MachineBasicBlock::iterator MBBI,
410 MachineBasicBlock::iterator &I) {
411 MachineInstr *MI = MBBI;
412 unsigned Base = MI->getOperand(0).getReg();
413 unsigned Bytes = getLSMultipleTransferSize(MI);
414 unsigned PredReg = 0;
415 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
416 int Opcode = MI->getOpcode();
417 bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::t2LDM ||
418 Opcode == ARM::STM || Opcode == ARM::t2STM;
421 if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
424 // Can't use the updating AM4 sub-mode if the base register is also a dest
425 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
426 for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
427 if (MI->getOperand(i).getReg() == Base)
431 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
432 if (MBBI != MBB.begin()) {
433 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
434 if (Mode == ARM_AM::ia &&
435 isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
436 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
439 } else if (Mode == ARM_AM::ib &&
440 isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
441 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
447 if (MBBI != MBB.end()) {
448 MachineBasicBlock::iterator NextMBBI = next(MBBI);
449 if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
450 isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
451 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
458 } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
459 isMatchingDecrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
460 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
470 // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
471 if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
474 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
475 unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
476 if (MBBI != MBB.begin()) {
477 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
478 if (Mode == ARM_AM::ia &&
479 isMatchingDecrement(PrevMBBI, Base, Bytes, 0, Pred, PredReg)) {
480 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
486 if (MBBI != MBB.end()) {
487 MachineBasicBlock::iterator NextMBBI = next(MBBI);
488 if (Mode == ARM_AM::ia &&
489 isMatchingIncrement(NextMBBI, Base, Bytes, 0, Pred, PredReg)) {
490 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
504 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
506 case ARM::LDR: return ARM::LDR_PRE;
507 case ARM::STR: return ARM::STR_PRE;
508 case ARM::FLDS: return ARM::FLDMS;
509 case ARM::FLDD: return ARM::FLDMD;
510 case ARM::FSTS: return ARM::FSTMS;
511 case ARM::FSTD: return ARM::FSTMD;
514 return ARM::t2LDR_PRE;
517 return ARM::t2STR_PRE;
518 default: llvm_unreachable("Unhandled opcode!");
523 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
525 case ARM::LDR: return ARM::LDR_POST;
526 case ARM::STR: return ARM::STR_POST;
527 case ARM::FLDS: return ARM::FLDMS;
528 case ARM::FLDD: return ARM::FLDMD;
529 case ARM::FSTS: return ARM::FSTMS;
530 case ARM::FSTD: return ARM::FSTMD;
533 return ARM::t2LDR_POST;
536 return ARM::t2STR_POST;
537 default: llvm_unreachable("Unhandled opcode!");
542 /// MergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
543 /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
544 bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
545 MachineBasicBlock::iterator MBBI,
546 const TargetInstrInfo *TII,
548 MachineBasicBlock::iterator &I) {
549 MachineInstr *MI = MBBI;
550 unsigned Base = MI->getOperand(1).getReg();
551 bool BaseKill = MI->getOperand(1).isKill();
552 unsigned Bytes = getLSMultipleTransferSize(MI);
553 int Opcode = MI->getOpcode();
554 DebugLoc dl = MI->getDebugLoc();
555 bool isAM5 = Opcode == ARM::FLDD || Opcode == ARM::FLDS ||
556 Opcode == ARM::FSTD || Opcode == ARM::FSTS;
557 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
558 if (isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0)
560 else if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
562 else if (isT2i32Load(Opcode) || isT2i32Store(Opcode))
563 if (MI->getOperand(2).getImm() != 0)
566 bool isLd = isi32Load(Opcode) || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
567 // Can't do the merge if the destination register is the same as the would-be
568 // writeback register.
569 if (isLd && MI->getOperand(0).getReg() == Base)
572 unsigned PredReg = 0;
573 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
574 bool DoMerge = false;
575 ARM_AM::AddrOpc AddSub = ARM_AM::add;
577 // AM2 - 12 bits, thumb2 - 8 bits.
578 unsigned Limit = isAM5 ? 0 : (isAM2 ? 0x1000 : 0x100);
579 if (MBBI != MBB.begin()) {
580 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
581 if (isMatchingDecrement(PrevMBBI, Base, Bytes, Limit, Pred, PredReg)) {
583 AddSub = ARM_AM::sub;
584 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
586 isMatchingIncrement(PrevMBBI, Base, Bytes, Limit,Pred,PredReg)) {
588 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
594 if (!DoMerge && MBBI != MBB.end()) {
595 MachineBasicBlock::iterator NextMBBI = next(MBBI);
597 isMatchingDecrement(NextMBBI, Base, Bytes, Limit, Pred, PredReg)) {
599 AddSub = ARM_AM::sub;
600 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
601 } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Limit,Pred,PredReg)) {
603 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
617 bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
618 unsigned Offset = isAM5
619 ? ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
622 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
627 BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
628 .addReg(Base, getKillRegState(BaseKill))
629 .addImm(Offset).addImm(Pred).addReg(PredReg)
630 .addReg(MI->getOperand(0).getReg(), RegState::Define);
632 // LDR_PRE, LDR_POST,
633 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
634 .addReg(Base, RegState::Define)
635 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
637 // t2LDR_PRE, t2LDR_POST
638 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
639 .addReg(Base, RegState::Define)
640 .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
642 MachineOperand &MO = MI->getOperand(0);
645 BuildMI(MBB, MBBI, dl, TII->get(NewOpc)).addReg(Base).addImm(Offset)
646 .addImm(Pred).addReg(PredReg)
647 .addReg(MO.getReg(), getKillRegState(MO.isKill()));
650 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
651 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
652 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
654 // t2STR_PRE, t2STR_POST
655 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
656 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
657 .addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
664 /// isMemoryOp - Returns true if instruction is a memory operations (that this
665 /// pass is capable of operating on).
666 static bool isMemoryOp(const MachineInstr *MI) {
667 int Opcode = MI->getOpcode();
672 return MI->getOperand(1).isReg() && MI->getOperand(2).getReg() == 0;
675 return MI->getOperand(1).isReg();
678 return MI->getOperand(1).isReg();
688 /// AdvanceRS - Advance register scavenger to just before the earliest memory
689 /// op that is being merged.
690 void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
691 MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
692 unsigned Position = MemOps[0].Position;
693 for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
694 if (MemOps[i].Position < Position) {
695 Position = MemOps[i].Position;
696 Loc = MemOps[i].MBBI;
700 if (Loc != MBB.begin())
701 RS->forward(prior(Loc));
704 static int getMemoryOpOffset(const MachineInstr *MI) {
705 int Opcode = MI->getOpcode();
706 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
707 bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
708 unsigned NumOperands = MI->getDesc().getNumOperands();
709 unsigned OffField = MI->getOperand(NumOperands-3).getImm();
711 if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
712 Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
713 Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8)
717 ? ARM_AM::getAM2Offset(OffField)
718 : (isAM3 ? ARM_AM::getAM3Offset(OffField)
719 : ARM_AM::getAM5Offset(OffField) * 4);
721 if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
724 if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub)
727 if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
733 static void InsertLDR_STR(MachineBasicBlock &MBB,
734 MachineBasicBlock::iterator &MBBI,
735 int OffImm, bool isDef,
736 DebugLoc dl, unsigned NewOpc,
737 unsigned Reg, bool RegDeadKill,
738 unsigned BaseReg, bool BaseKill,
739 unsigned OffReg, bool OffKill,
740 ARMCC::CondCodes Pred, unsigned PredReg,
741 const TargetInstrInfo *TII) {
744 Offset = ARM_AM::getAM2Opc(ARM_AM::sub, -OffImm, ARM_AM::no_shift);
746 Offset = ARM_AM::getAM2Opc(ARM_AM::add, OffImm, ARM_AM::no_shift);
748 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
749 .addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
750 .addReg(BaseReg, getKillRegState(BaseKill))
751 .addReg(OffReg, getKillRegState(OffKill))
753 .addImm(Pred).addReg(PredReg);
755 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
756 .addReg(Reg, getKillRegState(RegDeadKill))
757 .addReg(BaseReg, getKillRegState(BaseKill))
758 .addReg(OffReg, getKillRegState(OffKill))
760 .addImm(Pred).addReg(PredReg);
763 bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
764 MachineBasicBlock::iterator &MBBI) {
765 MachineInstr *MI = &*MBBI;
766 unsigned Opcode = MI->getOpcode();
767 if (Opcode == ARM::LDRD || Opcode == ARM::STRD) {
768 unsigned EvenReg = MI->getOperand(0).getReg();
769 unsigned OddReg = MI->getOperand(1).getReg();
770 unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
771 unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
772 if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
775 bool isLd = Opcode == ARM::LDRD;
776 bool EvenDeadKill = isLd ?
777 MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
778 bool OddDeadKill = isLd ?
779 MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
780 const MachineOperand &BaseOp = MI->getOperand(2);
781 unsigned BaseReg = BaseOp.getReg();
782 bool BaseKill = BaseOp.isKill();
783 const MachineOperand &OffOp = MI->getOperand(3);
784 unsigned OffReg = OffOp.getReg();
785 bool OffKill = OffOp.isKill();
786 int OffImm = getMemoryOpOffset(MI);
787 unsigned PredReg = 0;
788 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
790 if (OddRegNum > EvenRegNum && OffReg == 0 && OffImm == 0) {
791 // Ascending register numbers and no offset. It's safe to change it to a
793 unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDM : ARM::STM;
795 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
796 .addReg(BaseReg, getKillRegState(BaseKill))
797 .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
798 .addImm(Pred).addReg(PredReg)
799 .addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
800 .addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
803 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
804 .addReg(BaseReg, getKillRegState(BaseKill))
805 .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
806 .addImm(Pred).addReg(PredReg)
807 .addReg(EvenReg, getKillRegState(EvenDeadKill))
808 .addReg(OddReg, getKillRegState(OddDeadKill));
812 // Split into two instructions.
813 unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDR : ARM::STR;
814 DebugLoc dl = MBBI->getDebugLoc();
815 // If this is a load and base register is killed, it may have been
816 // re-defed by the load, make sure the first load does not clobber it.
818 (BaseKill || OffKill) &&
819 (TRI->regsOverlap(EvenReg, BaseReg) ||
820 (OffReg && TRI->regsOverlap(EvenReg, OffReg)))) {
821 assert(!TRI->regsOverlap(OddReg, BaseReg) &&
822 (!OffReg || !TRI->regsOverlap(OddReg, OffReg)));
823 InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc, OddReg, OddDeadKill,
824 BaseReg, false, OffReg, false, Pred, PredReg, TII);
825 InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc, EvenReg, EvenDeadKill,
826 BaseReg, BaseKill, OffReg, OffKill, Pred, PredReg, TII);
828 InsertLDR_STR(MBB, MBBI, OffImm, isLd, dl, NewOpc,
829 EvenReg, EvenDeadKill, BaseReg, false, OffReg, false,
831 InsertLDR_STR(MBB, MBBI, OffImm+4, isLd, dl, NewOpc,
832 OddReg, OddDeadKill, BaseReg, BaseKill, OffReg, OffKill,
847 /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
848 /// ops of the same base and incrementing offset into LDM / STM ops.
849 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
850 unsigned NumMerges = 0;
851 unsigned NumMemOps = 0;
853 unsigned CurrBase = 0;
855 unsigned CurrSize = 0;
856 ARMCC::CondCodes CurrPred = ARMCC::AL;
857 unsigned CurrPredReg = 0;
858 unsigned Position = 0;
859 SmallVector<MachineBasicBlock::iterator,4> Merges;
861 RS->enterBasicBlock(&MBB);
862 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
864 if (FixInvalidRegPairOp(MBB, MBBI))
867 bool Advance = false;
868 bool TryMerge = false;
869 bool Clobber = false;
871 bool isMemOp = isMemoryOp(MBBI);
873 int Opcode = MBBI->getOpcode();
874 unsigned Size = getLSMultipleTransferSize(MBBI);
875 unsigned Base = MBBI->getOperand(1).getReg();
876 unsigned PredReg = 0;
877 ARMCC::CondCodes Pred = getInstrPredicate(MBBI, PredReg);
878 int Offset = getMemoryOpOffset(MBBI);
881 // r5 := ldr [r5, #4]
882 // r6 := ldr [r5, #8]
884 // The second ldr has effectively broken the chain even though it
885 // looks like the later ldr(s) use the same base register. Try to
886 // merge the ldr's so far, including this one. But don't try to
887 // combine the following ldr(s).
888 Clobber = (isi32Load(Opcode) && Base == MBBI->getOperand(0).getReg());
889 if (CurrBase == 0 && !Clobber) {
890 // Start of a new chain.
895 CurrPredReg = PredReg;
896 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
905 if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
906 // No need to match PredReg.
907 // Continue adding to the queue.
908 if (Offset > MemOps.back().Offset) {
909 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
913 for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
915 if (Offset < I->Offset) {
916 MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
920 } else if (Offset == I->Offset) {
921 // Collision! This can't be merged!
938 // Try to find a free register to use as a new base in case it's needed.
939 // First advance to the instruction just before the start of the chain.
940 AdvanceRS(MBB, MemOps);
941 // Find a scratch register. Make sure it's a call clobbered register or
942 // a spilled callee-saved register.
943 unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass, true);
945 Scratch = RS->FindUnusedReg(&ARM::GPRRegClass,
946 AFI->getSpilledCSRegisters());
947 // Process the load / store instructions.
948 RS->forward(prior(MBBI));
952 MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,
953 CurrPred, CurrPredReg, Scratch, MemOps, Merges);
955 // Try folding preceeding/trailing base inc/dec into the generated
957 for (unsigned i = 0, e = Merges.size(); i < e; ++i)
958 if (MergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))
960 NumMerges += Merges.size();
962 // Try folding preceeding/trailing base inc/dec into those load/store
963 // that were not merged to form LDM/STM ops.
964 for (unsigned i = 0; i != NumMemOps; ++i)
965 if (!MemOps[i].Merged)
966 if (MergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
969 // RS may be pointing to an instruction that's deleted.
970 RS->skipTo(prior(MBBI));
971 } else if (NumMemOps == 1) {
972 // Try folding preceeding/trailing base inc/dec into the single
974 if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
976 RS->forward(prior(MBBI));
983 CurrPred = ARMCC::AL;
990 // If iterator hasn't been advanced and this is not a memory op, skip it.
991 // It can't start a new chain anyway.
992 if (!Advance && !isMemOp && MBBI != E) {
998 return NumMerges > 0;
1002 struct OffsetCompare {
1003 bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
1004 int LOffset = getMemoryOpOffset(LHS);
1005 int ROffset = getMemoryOpOffset(RHS);
1006 assert(LHS == RHS || LOffset != ROffset);
1007 return LOffset > ROffset;
1012 /// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
1013 /// (bx lr) into the preceeding stack restore so it directly restore the value
1015 /// ldmfd sp!, {r7, lr}
1018 /// ldmfd sp!, {r7, pc}
1019 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
1020 if (MBB.empty()) return false;
1022 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1023 if (MBBI != MBB.begin() &&
1024 (MBBI->getOpcode() == ARM::BX_RET || MBBI->getOpcode() == ARM::tBX_RET)) {
1025 MachineInstr *PrevMI = prior(MBBI);
1026 if (PrevMI->getOpcode() == ARM::LDM || PrevMI->getOpcode() == ARM::t2LDM) {
1027 MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
1028 if (MO.getReg() != ARM::LR)
1030 unsigned NewOpc = isThumb2 ? ARM::t2LDM_RET : ARM::LDM_RET;
1031 PrevMI->setDesc(TII->get(NewOpc));
1040 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1041 const TargetMachine &TM = Fn.getTarget();
1042 AFI = Fn.getInfo<ARMFunctionInfo>();
1043 TII = TM.getInstrInfo();
1044 TRI = TM.getRegisterInfo();
1045 RS = new RegScavenger();
1046 isThumb2 = AFI->isThumb2Function();
1048 bool Modified = false;
1049 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
1051 MachineBasicBlock &MBB = *MFI;
1052 Modified |= LoadStoreMultipleOpti(MBB);
1053 Modified |= MergeReturnIntoLDM(MBB);
1061 /// ARMPreAllocLoadStoreOpt - Pre- register allocation pass that move
1062 /// load / stores from consecutive locations close to make it more
1063 /// likely they will be combined later.
1066 struct VISIBILITY_HIDDEN ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
1068 ARMPreAllocLoadStoreOpt() : MachineFunctionPass(&ID) {}
1070 const TargetData *TD;
1071 const TargetInstrInfo *TII;
1072 const TargetRegisterInfo *TRI;
1073 const ARMSubtarget *STI;
1074 MachineRegisterInfo *MRI;
1076 virtual bool runOnMachineFunction(MachineFunction &Fn);
1078 virtual const char *getPassName() const {
1079 return "ARM pre- register allocation load / store optimization pass";
1083 bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
1084 unsigned &NewOpc, unsigned &EvenReg,
1085 unsigned &OddReg, unsigned &BaseReg,
1086 unsigned &OffReg, unsigned &Offset,
1087 unsigned &PredReg, ARMCC::CondCodes &Pred);
1088 bool RescheduleOps(MachineBasicBlock *MBB,
1089 SmallVector<MachineInstr*, 4> &Ops,
1090 unsigned Base, bool isLd,
1091 DenseMap<MachineInstr*, unsigned> &MI2LocMap);
1092 bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
1094 char ARMPreAllocLoadStoreOpt::ID = 0;
1097 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
1098 TD = Fn.getTarget().getTargetData();
1099 TII = Fn.getTarget().getInstrInfo();
1100 TRI = Fn.getTarget().getRegisterInfo();
1101 STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
1102 MRI = &Fn.getRegInfo();
1104 bool Modified = false;
1105 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
1107 Modified |= RescheduleLoadStoreInstrs(MFI);
1112 static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
1113 MachineBasicBlock::iterator I,
1114 MachineBasicBlock::iterator E,
1115 SmallPtrSet<MachineInstr*, 4> &MemOps,
1116 SmallSet<unsigned, 4> &MemRegs,
1117 const TargetRegisterInfo *TRI) {
1118 // Are there stores / loads / calls between them?
1119 // FIXME: This is overly conservative. We should make use of alias information
1121 SmallSet<unsigned, 4> AddedRegPressure;
1123 if (MemOps.count(&*I))
1125 const TargetInstrDesc &TID = I->getDesc();
1126 if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
1128 if (isLd && TID.mayStore())
1133 // It's not safe to move the first 'str' down.
1136 // str r4, [r0, #+4]
1140 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
1141 MachineOperand &MO = I->getOperand(j);
1144 unsigned Reg = MO.getReg();
1145 if (MO.isDef() && TRI->regsOverlap(Reg, Base))
1147 if (Reg != Base && !MemRegs.count(Reg))
1148 AddedRegPressure.insert(Reg);
1152 // Estimate register pressure increase due to the transformation.
1153 if (MemRegs.size() <= 4)
1154 // Ok if we are moving small number of instructions.
1156 return AddedRegPressure.size() <= MemRegs.size() * 2;
1160 ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
1162 unsigned &NewOpc, unsigned &EvenReg,
1163 unsigned &OddReg, unsigned &BaseReg,
1164 unsigned &OffReg, unsigned &Offset,
1166 ARMCC::CondCodes &Pred) {
1167 // FIXME: FLDS / FSTS -> FLDD / FSTD
1168 unsigned Opcode = Op0->getOpcode();
1169 if (Opcode == ARM::LDR)
1171 else if (Opcode == ARM::STR)
1176 // Must sure the base address satisfies i64 ld / st alignment requirement.
1177 if (!Op0->hasOneMemOperand() ||
1178 !Op0->memoperands_begin()->getValue() ||
1179 Op0->memoperands_begin()->isVolatile())
1182 unsigned Align = Op0->memoperands_begin()->getAlignment();
1183 unsigned ReqAlign = STI->hasV6Ops()
1184 ? TD->getPrefTypeAlignment(Type::Int64Ty) : 8; // Pre-v6 need 8-byte align
1185 if (Align < ReqAlign)
1188 // Then make sure the immediate offset fits.
1189 int OffImm = getMemoryOpOffset(Op0);
1190 ARM_AM::AddrOpc AddSub = ARM_AM::add;
1192 AddSub = ARM_AM::sub;
1195 if (OffImm >= 256) // 8 bits
1197 Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
1199 EvenReg = Op0->getOperand(0).getReg();
1200 OddReg = Op1->getOperand(0).getReg();
1201 if (EvenReg == OddReg)
1203 BaseReg = Op0->getOperand(1).getReg();
1204 OffReg = Op0->getOperand(2).getReg();
1205 Pred = getInstrPredicate(Op0, PredReg);
1206 dl = Op0->getDebugLoc();
1210 bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
1211 SmallVector<MachineInstr*, 4> &Ops,
1212 unsigned Base, bool isLd,
1213 DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
1214 bool RetVal = false;
1216 // Sort by offset (in reverse order).
1217 std::sort(Ops.begin(), Ops.end(), OffsetCompare());
1219 // The loads / stores of the same base are in order. Scan them from first to
1220 // last and check for the followins:
1221 // 1. Any def of base.
1223 while (Ops.size() > 1) {
1224 unsigned FirstLoc = ~0U;
1225 unsigned LastLoc = 0;
1226 MachineInstr *FirstOp = 0;
1227 MachineInstr *LastOp = 0;
1229 unsigned LastOpcode = 0;
1230 unsigned LastBytes = 0;
1231 unsigned NumMove = 0;
1232 for (int i = Ops.size() - 1; i >= 0; --i) {
1233 MachineInstr *Op = Ops[i];
1234 unsigned Loc = MI2LocMap[Op];
1235 if (Loc <= FirstLoc) {
1239 if (Loc >= LastLoc) {
1244 unsigned Opcode = Op->getOpcode();
1245 if (LastOpcode && Opcode != LastOpcode)
1248 int Offset = getMemoryOpOffset(Op);
1249 unsigned Bytes = getLSMultipleTransferSize(Op);
1251 if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
1254 LastOffset = Offset;
1256 LastOpcode = Opcode;
1257 if (++NumMove == 8) // FIXME: Tune
1264 SmallPtrSet<MachineInstr*, 4> MemOps;
1265 SmallSet<unsigned, 4> MemRegs;
1266 for (int i = NumMove-1; i >= 0; --i) {
1267 MemOps.insert(Ops[i]);
1268 MemRegs.insert(Ops[i]->getOperand(0).getReg());
1271 // Be conservative, if the instructions are too far apart, don't
1272 // move them. We want to limit the increase of register pressure.
1273 bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
1275 DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
1276 MemOps, MemRegs, TRI);
1278 for (unsigned i = 0; i != NumMove; ++i)
1281 // This is the new location for the loads / stores.
1282 MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
1283 while (InsertPos != MBB->end() && MemOps.count(InsertPos))
1286 // If we are moving a pair of loads / stores, see if it makes sense
1287 // to try to allocate a pair of registers that can form register pairs.
1288 MachineInstr *Op0 = Ops.back();
1289 MachineInstr *Op1 = Ops[Ops.size()-2];
1290 unsigned EvenReg = 0, OddReg = 0;
1291 unsigned BaseReg = 0, OffReg = 0, PredReg = 0;
1292 ARMCC::CondCodes Pred = ARMCC::AL;
1293 unsigned NewOpc = 0;
1294 unsigned Offset = 0;
1296 if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
1297 EvenReg, OddReg, BaseReg, OffReg,
1298 Offset, PredReg, Pred)) {
1302 // Form the pair instruction.
1304 BuildMI(*MBB, InsertPos, dl, TII->get(NewOpc))
1305 .addReg(EvenReg, RegState::Define)
1306 .addReg(OddReg, RegState::Define)
1307 .addReg(BaseReg).addReg(0).addImm(Offset)
1308 .addImm(Pred).addReg(PredReg);
1311 BuildMI(*MBB, InsertPos, dl, TII->get(NewOpc))
1314 .addReg(BaseReg).addReg(0).addImm(Offset)
1315 .addImm(Pred).addReg(PredReg);
1321 // Add register allocation hints to form register pairs.
1322 MRI->setRegAllocationHint(EvenReg, ARMRI::RegPairEven, OddReg);
1323 MRI->setRegAllocationHint(OddReg, ARMRI::RegPairOdd, EvenReg);
1325 for (unsigned i = 0; i != NumMove; ++i) {
1326 MachineInstr *Op = Ops.back();
1328 MBB->splice(InsertPos, MBB, Op);
1332 NumLdStMoved += NumMove;
1342 ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
1343 bool RetVal = false;
1345 DenseMap<MachineInstr*, unsigned> MI2LocMap;
1346 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
1347 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
1348 SmallVector<unsigned, 4> LdBases;
1349 SmallVector<unsigned, 4> StBases;
1352 MachineBasicBlock::iterator MBBI = MBB->begin();
1353 MachineBasicBlock::iterator E = MBB->end();
1355 for (; MBBI != E; ++MBBI) {
1356 MachineInstr *MI = MBBI;
1357 const TargetInstrDesc &TID = MI->getDesc();
1358 if (TID.isCall() || TID.isTerminator()) {
1359 // Stop at barriers.
1364 MI2LocMap[MI] = Loc++;
1365 if (!isMemoryOp(MI))
1367 unsigned PredReg = 0;
1368 if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
1371 int Opcode = MI->getOpcode();
1372 bool isLd = Opcode == ARM::LDR ||
1373 Opcode == ARM::FLDS || Opcode == ARM::FLDD;
1374 unsigned Base = MI->getOperand(1).getReg();
1375 int Offset = getMemoryOpOffset(MI);
1377 bool StopHere = false;
1379 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1380 Base2LdsMap.find(Base);
1381 if (BI != Base2LdsMap.end()) {
1382 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1383 if (Offset == getMemoryOpOffset(BI->second[i])) {
1389 BI->second.push_back(MI);
1391 SmallVector<MachineInstr*, 4> MIs;
1393 Base2LdsMap[Base] = MIs;
1394 LdBases.push_back(Base);
1397 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1398 Base2StsMap.find(Base);
1399 if (BI != Base2StsMap.end()) {
1400 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1401 if (Offset == getMemoryOpOffset(BI->second[i])) {
1407 BI->second.push_back(MI);
1409 SmallVector<MachineInstr*, 4> MIs;
1411 Base2StsMap[Base] = MIs;
1412 StBases.push_back(Base);
1417 // Found a duplicate (a base+offset combination that's seen earlier).
1424 // Re-schedule loads.
1425 for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
1426 unsigned Base = LdBases[i];
1427 SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
1429 RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
1432 // Re-schedule stores.
1433 for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
1434 unsigned Base = StBases[i];
1435 SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
1437 RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
1441 Base2LdsMap.clear();
1442 Base2StsMap.clear();
1452 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
1453 /// optimization pass.
1454 FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
1456 return new ARMPreAllocLoadStoreOpt();
1457 return new ARMLoadStoreOpt();