1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ----*- C++ -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains a pass that performs load / store related peephole
11 // optimizations. This pass should be run after register allocation.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "arm-ldst-opt"
17 #include "ARMAddressingModes.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMRegisterInfo.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/CodeGen/MachineBasicBlock.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Support/Compiler.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/SmallVector.h"
36 #include "llvm/ADT/Statistic.h"
39 STATISTIC(NumLDMGened , "Number of ldm instructions generated");
40 STATISTIC(NumSTMGened , "Number of stm instructions generated");
41 STATISTIC(NumFLDMGened, "Number of fldm instructions generated");
42 STATISTIC(NumFSTMGened, "Number of fstm instructions generated");
43 STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
45 /// ARMAllocLoadStoreOpt - Post- register allocation pass the combine
46 /// load / store instructions to form ldm / stm instructions.
49 struct VISIBILITY_HIDDEN ARMLoadStoreOpt : public MachineFunctionPass {
51 ARMLoadStoreOpt() : MachineFunctionPass(&ID) {}
53 const TargetInstrInfo *TII;
54 const TargetRegisterInfo *TRI;
58 virtual bool runOnMachineFunction(MachineFunction &Fn);
60 virtual const char *getPassName() const {
61 return "ARM load / store optimization pass";
65 struct MemOpQueueEntry {
68 MachineBasicBlock::iterator MBBI;
70 MemOpQueueEntry(int o, int p, MachineBasicBlock::iterator i)
71 : Offset(o), Position(p), MBBI(i), Merged(false) {};
73 typedef SmallVector<MemOpQueueEntry,8> MemOpQueue;
74 typedef MemOpQueue::iterator MemOpQueueIter;
76 bool MergeOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
77 int Offset, unsigned Base, bool BaseKill, int Opcode,
78 ARMCC::CondCodes Pred, unsigned PredReg, unsigned Scratch,
79 DebugLoc dl, SmallVector<std::pair<unsigned, bool>, 8> &Regs);
80 void MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex, unsigned Base,
81 int Opcode, unsigned Size,
82 ARMCC::CondCodes Pred, unsigned PredReg,
83 unsigned Scratch, MemOpQueue &MemOps,
84 SmallVector<MachineBasicBlock::iterator, 4> &Merges);
86 void AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps);
87 bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
88 MachineBasicBlock::iterator &MBBI);
89 bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
90 bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
92 char ARMLoadStoreOpt::ID = 0;
95 static int getLoadStoreMultipleOpcode(int Opcode) {
120 /// MergeOps - Create and insert a LDM or STM with Base as base register and
121 /// registers in Regs as the register operands that would be loaded / stored.
122 /// It returns true if the transformation is done.
124 ARMLoadStoreOpt::MergeOps(MachineBasicBlock &MBB,
125 MachineBasicBlock::iterator MBBI,
126 int Offset, unsigned Base, bool BaseKill,
127 int Opcode, ARMCC::CondCodes Pred,
128 unsigned PredReg, unsigned Scratch, DebugLoc dl,
129 SmallVector<std::pair<unsigned, bool>, 8> &Regs) {
130 // Only a single register to load / store. Don't bother.
131 unsigned NumRegs = Regs.size();
135 ARM_AM::AMSubMode Mode = ARM_AM::ia;
136 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
137 if (isAM4 && Offset == 4)
139 else if (isAM4 && Offset == -4 * (int)NumRegs + 4)
141 else if (isAM4 && Offset == -4 * (int)NumRegs)
143 else if (Offset != 0) {
144 // If starting offset isn't zero, insert a MI to materialize a new base.
145 // But only do so if it is cost effective, i.e. merging more than two
151 if (Opcode == ARM::LDR)
152 // If it is a load, then just use one of the destination register to
153 // use as the new base.
154 NewBase = Regs[NumRegs-1].first;
156 // Use the scratch register to use as a new base.
161 int BaseOpc = ARM::ADDri;
163 BaseOpc = ARM::SUBri;
166 int ImmedOffset = ARM_AM::getSOImmVal(Offset);
167 if (ImmedOffset == -1)
168 return false; // Probably not worth it then.
170 BuildMI(MBB, MBBI, dl, TII->get(BaseOpc), NewBase)
171 .addReg(Base, getKillRegState(BaseKill)).addImm(ImmedOffset)
172 .addImm(Pred).addReg(PredReg).addReg(0);
174 BaseKill = true; // New base is always killed right its use.
177 bool isDPR = Opcode == ARM::FLDD || Opcode == ARM::FSTD;
178 bool isDef = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
179 Opcode = getLoadStoreMultipleOpcode(Opcode);
180 MachineInstrBuilder MIB = (isAM4)
181 ? BuildMI(MBB, MBBI, dl, TII->get(Opcode))
182 .addReg(Base, getKillRegState(BaseKill))
183 .addImm(ARM_AM::getAM4ModeImm(Mode)).addImm(Pred).addReg(PredReg)
184 : BuildMI(MBB, MBBI, dl, TII->get(Opcode))
185 .addReg(Base, getKillRegState(BaseKill))
186 .addImm(ARM_AM::getAM5Opc(Mode, false, isDPR ? NumRegs<<1 : NumRegs))
187 .addImm(Pred).addReg(PredReg);
188 for (unsigned i = 0; i != NumRegs; ++i)
189 MIB = MIB.addReg(Regs[i].first, getDefRegState(isDef)
190 | getKillRegState(Regs[i].second));
195 /// MergeLDR_STR - Merge a number of load / store instructions into one or more
196 /// load / store multiple instructions.
198 ARMLoadStoreOpt::MergeLDR_STR(MachineBasicBlock &MBB, unsigned SIndex,
199 unsigned Base, int Opcode, unsigned Size,
200 ARMCC::CondCodes Pred, unsigned PredReg,
201 unsigned Scratch, MemOpQueue &MemOps,
202 SmallVector<MachineBasicBlock::iterator, 4> &Merges) {
203 bool isAM4 = Opcode == ARM::LDR || Opcode == ARM::STR;
204 int Offset = MemOps[SIndex].Offset;
205 int SOffset = Offset;
206 unsigned Pos = MemOps[SIndex].Position;
207 MachineBasicBlock::iterator Loc = MemOps[SIndex].MBBI;
208 DebugLoc dl = Loc->getDebugLoc();
209 unsigned PReg = Loc->getOperand(0).getReg();
210 unsigned PRegNum = ARMRegisterInfo::getRegisterNumbering(PReg);
211 bool isKill = Loc->getOperand(0).isKill();
213 SmallVector<std::pair<unsigned,bool>, 8> Regs;
214 Regs.push_back(std::make_pair(PReg, isKill));
215 for (unsigned i = SIndex+1, e = MemOps.size(); i != e; ++i) {
216 int NewOffset = MemOps[i].Offset;
217 unsigned Reg = MemOps[i].MBBI->getOperand(0).getReg();
218 unsigned RegNum = ARMRegisterInfo::getRegisterNumbering(Reg);
219 isKill = MemOps[i].MBBI->getOperand(0).isKill();
220 // AM4 - register numbers in ascending order.
221 // AM5 - consecutive register numbers in ascending order.
222 if (NewOffset == Offset + (int)Size &&
223 ((isAM4 && RegNum > PRegNum) || RegNum == PRegNum+1)) {
225 Regs.push_back(std::make_pair(Reg, isKill));
228 // Can't merge this in. Try merge the earlier ones first.
229 if (MergeOps(MBB, ++Loc, SOffset, Base, false, Opcode, Pred, PredReg,
230 Scratch, dl, Regs)) {
231 Merges.push_back(prior(Loc));
232 for (unsigned j = SIndex; j < i; ++j) {
233 MBB.erase(MemOps[j].MBBI);
234 MemOps[j].Merged = true;
237 MergeLDR_STR(MBB, i, Base, Opcode, Size, Pred, PredReg, Scratch,
242 if (MemOps[i].Position > Pos) {
243 Pos = MemOps[i].Position;
244 Loc = MemOps[i].MBBI;
248 bool BaseKill = Loc->findRegisterUseOperandIdx(Base, true) != -1;
249 if (MergeOps(MBB, ++Loc, SOffset, Base, BaseKill, Opcode, Pred, PredReg,
250 Scratch, dl, Regs)) {
251 Merges.push_back(prior(Loc));
252 for (unsigned i = SIndex, e = MemOps.size(); i != e; ++i) {
253 MBB.erase(MemOps[i].MBBI);
254 MemOps[i].Merged = true;
261 /// getInstrPredicate - If instruction is predicated, returns its predicate
262 /// condition, otherwise returns AL. It also returns the condition code
263 /// register by reference.
264 static ARMCC::CondCodes getInstrPredicate(MachineInstr *MI, unsigned &PredReg) {
265 int PIdx = MI->findFirstPredOperandIdx();
271 PredReg = MI->getOperand(PIdx+1).getReg();
272 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
275 static inline bool isMatchingDecrement(MachineInstr *MI, unsigned Base,
276 unsigned Bytes, ARMCC::CondCodes Pred,
278 unsigned MyPredReg = 0;
279 return (MI && MI->getOpcode() == ARM::SUBri &&
280 MI->getOperand(0).getReg() == Base &&
281 MI->getOperand(1).getReg() == Base &&
282 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
283 getInstrPredicate(MI, MyPredReg) == Pred &&
284 MyPredReg == PredReg);
287 static inline bool isMatchingIncrement(MachineInstr *MI, unsigned Base,
288 unsigned Bytes, ARMCC::CondCodes Pred,
290 unsigned MyPredReg = 0;
291 return (MI && MI->getOpcode() == ARM::ADDri &&
292 MI->getOperand(0).getReg() == Base &&
293 MI->getOperand(1).getReg() == Base &&
294 ARM_AM::getAM2Offset(MI->getOperand(2).getImm()) == Bytes &&
295 getInstrPredicate(MI, MyPredReg) == Pred &&
296 MyPredReg == PredReg);
299 static inline unsigned getLSMultipleTransferSize(MachineInstr *MI) {
300 switch (MI->getOpcode()) {
312 return (MI->getNumOperands() - 4) * 4;
317 return ARM_AM::getAM5Offset(MI->getOperand(1).getImm()) * 4;
321 /// mergeBaseUpdateLSMultiple - Fold proceeding/trailing inc/dec of base
322 /// register into the LDM/STM/FLDM{D|S}/FSTM{D|S} op when possible:
324 /// stmia rn, <ra, rb, rc>
325 /// rn := rn + 4 * 3;
327 /// stmia rn!, <ra, rb, rc>
329 /// rn := rn - 4 * 3;
330 /// ldmia rn, <ra, rb, rc>
332 /// ldmdb rn!, <ra, rb, rc>
333 static bool mergeBaseUpdateLSMultiple(MachineBasicBlock &MBB,
334 MachineBasicBlock::iterator MBBI,
336 MachineBasicBlock::iterator &I) {
337 MachineInstr *MI = MBBI;
338 unsigned Base = MI->getOperand(0).getReg();
339 unsigned Bytes = getLSMultipleTransferSize(MI);
340 unsigned PredReg = 0;
341 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
342 int Opcode = MI->getOpcode();
343 bool isAM4 = Opcode == ARM::LDM || Opcode == ARM::STM;
346 if (ARM_AM::getAM4WBFlag(MI->getOperand(1).getImm()))
349 // Can't use the updating AM4 sub-mode if the base register is also a dest
350 // register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
351 for (unsigned i = 3, e = MI->getNumOperands(); i != e; ++i) {
352 if (MI->getOperand(i).getReg() == Base)
356 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MI->getOperand(1).getImm());
357 if (MBBI != MBB.begin()) {
358 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
359 if (Mode == ARM_AM::ia &&
360 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
361 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::db, true));
364 } else if (Mode == ARM_AM::ib &&
365 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
366 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(ARM_AM::da, true));
372 if (MBBI != MBB.end()) {
373 MachineBasicBlock::iterator NextMBBI = next(MBBI);
374 if ((Mode == ARM_AM::ia || Mode == ARM_AM::ib) &&
375 isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
376 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
383 } else if ((Mode == ARM_AM::da || Mode == ARM_AM::db) &&
384 isMatchingDecrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
385 MI->getOperand(1).setImm(ARM_AM::getAM4ModeImm(Mode, true));
395 // FLDM{D|S}, FSTM{D|S} addressing mode 5 ops.
396 if (ARM_AM::getAM5WBFlag(MI->getOperand(1).getImm()))
399 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MI->getOperand(1).getImm());
400 unsigned Offset = ARM_AM::getAM5Offset(MI->getOperand(1).getImm());
401 if (MBBI != MBB.begin()) {
402 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
403 if (Mode == ARM_AM::ia &&
404 isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
405 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::db, true, Offset));
411 if (MBBI != MBB.end()) {
412 MachineBasicBlock::iterator NextMBBI = next(MBBI);
413 if (Mode == ARM_AM::ia &&
414 isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
415 MI->getOperand(1).setImm(ARM_AM::getAM5Opc(ARM_AM::ia, true, Offset));
429 static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc) {
431 case ARM::LDR: return ARM::LDR_PRE;
432 case ARM::STR: return ARM::STR_PRE;
433 case ARM::FLDS: return ARM::FLDMS;
434 case ARM::FLDD: return ARM::FLDMD;
435 case ARM::FSTS: return ARM::FSTMS;
436 case ARM::FSTD: return ARM::FSTMD;
442 static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc) {
444 case ARM::LDR: return ARM::LDR_POST;
445 case ARM::STR: return ARM::STR_POST;
446 case ARM::FLDS: return ARM::FLDMS;
447 case ARM::FLDD: return ARM::FLDMD;
448 case ARM::FSTS: return ARM::FSTMS;
449 case ARM::FSTD: return ARM::FSTMD;
455 /// mergeBaseUpdateLoadStore - Fold proceeding/trailing inc/dec of base
456 /// register into the LDR/STR/FLD{D|S}/FST{D|S} op when possible:
457 static bool mergeBaseUpdateLoadStore(MachineBasicBlock &MBB,
458 MachineBasicBlock::iterator MBBI,
459 const TargetInstrInfo *TII,
461 MachineBasicBlock::iterator &I) {
462 MachineInstr *MI = MBBI;
463 unsigned Base = MI->getOperand(1).getReg();
464 bool BaseKill = MI->getOperand(1).isKill();
465 unsigned Bytes = getLSMultipleTransferSize(MI);
466 int Opcode = MI->getOpcode();
467 DebugLoc dl = MI->getDebugLoc();
468 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
469 if ((isAM2 && ARM_AM::getAM2Offset(MI->getOperand(3).getImm()) != 0) ||
470 (!isAM2 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0))
473 bool isLd = Opcode == ARM::LDR || Opcode == ARM::FLDS || Opcode == ARM::FLDD;
474 // Can't do the merge if the destination register is the same as the would-be
475 // writeback register.
476 if (isLd && MI->getOperand(0).getReg() == Base)
479 unsigned PredReg = 0;
480 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
481 bool DoMerge = false;
482 ARM_AM::AddrOpc AddSub = ARM_AM::add;
484 if (MBBI != MBB.begin()) {
485 MachineBasicBlock::iterator PrevMBBI = prior(MBBI);
486 if (isMatchingDecrement(PrevMBBI, Base, Bytes, Pred, PredReg)) {
488 AddSub = ARM_AM::sub;
489 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
490 } else if (isAM2 && isMatchingIncrement(PrevMBBI, Base, Bytes,
493 NewOpc = getPreIndexedLoadStoreOpcode(Opcode);
499 if (!DoMerge && MBBI != MBB.end()) {
500 MachineBasicBlock::iterator NextMBBI = next(MBBI);
501 if (isAM2 && isMatchingDecrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
503 AddSub = ARM_AM::sub;
504 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
505 } else if (isMatchingIncrement(NextMBBI, Base, Bytes, Pred, PredReg)) {
507 NewOpc = getPostIndexedLoadStoreOpcode(Opcode);
521 bool isDPR = NewOpc == ARM::FLDMD || NewOpc == ARM::FSTMD;
522 unsigned Offset = isAM2 ? ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift)
523 : ARM_AM::getAM5Opc((AddSub == ARM_AM::sub) ? ARM_AM::db : ARM_AM::ia,
524 true, isDPR ? 2 : 1);
527 // LDR_PRE, LDR_POST;
528 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), MI->getOperand(0).getReg())
529 .addReg(Base, RegState::Define)
530 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
533 BuildMI(MBB, MBBI, dl, TII->get(NewOpc))
534 .addReg(Base, getKillRegState(BaseKill))
535 .addImm(Offset).addImm(Pred).addReg(PredReg)
536 .addReg(MI->getOperand(0).getReg(), RegState::Define);
538 MachineOperand &MO = MI->getOperand(0);
540 // STR_PRE, STR_POST;
541 BuildMI(MBB, MBBI, dl, TII->get(NewOpc), Base)
542 .addReg(MO.getReg(), getKillRegState(MO.isKill()))
543 .addReg(Base).addReg(0).addImm(Offset).addImm(Pred).addReg(PredReg);
546 BuildMI(MBB, MBBI, dl, TII->get(NewOpc)).addReg(Base).addImm(Offset)
547 .addImm(Pred).addReg(PredReg)
548 .addReg(MO.getReg(), getKillRegState(MO.isKill()));
555 /// isMemoryOp - Returns true if instruction is a memory operations (that this
556 /// pass is capable of operating on).
557 static bool isMemoryOp(MachineInstr *MI) {
558 int Opcode = MI->getOpcode();
563 return MI->getOperand(1).isReg() && MI->getOperand(2).getReg() == 0;
566 return MI->getOperand(1).isReg();
569 return MI->getOperand(1).isReg();
574 /// AdvanceRS - Advance register scavenger to just before the earliest memory
575 /// op that is being merged.
576 void ARMLoadStoreOpt::AdvanceRS(MachineBasicBlock &MBB, MemOpQueue &MemOps) {
577 MachineBasicBlock::iterator Loc = MemOps[0].MBBI;
578 unsigned Position = MemOps[0].Position;
579 for (unsigned i = 1, e = MemOps.size(); i != e; ++i) {
580 if (MemOps[i].Position < Position) {
581 Position = MemOps[i].Position;
582 Loc = MemOps[i].MBBI;
586 if (Loc != MBB.begin())
587 RS->forward(prior(Loc));
590 static int getMemoryOpOffset(const MachineInstr *MI) {
591 int Opcode = MI->getOpcode();
592 bool isAM2 = Opcode == ARM::LDR || Opcode == ARM::STR;
593 bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
594 unsigned NumOperands = MI->getDesc().getNumOperands();
595 unsigned OffField = MI->getOperand(NumOperands-3).getImm();
597 ? ARM_AM::getAM2Offset(OffField)
598 : (isAM3 ? ARM_AM::getAM3Offset(OffField)
599 : ARM_AM::getAM5Offset(OffField) * 4);
601 if (ARM_AM::getAM2Op(OffField) == ARM_AM::sub)
604 if (ARM_AM::getAM3Op(OffField) == ARM_AM::sub)
607 if (ARM_AM::getAM5Op(OffField) == ARM_AM::sub)
613 static void InsertLDR_STR(MachineBasicBlock &MBB,
614 MachineBasicBlock::iterator &MBBI,
615 int OffImm, bool isDef,
616 DebugLoc dl, unsigned NewOpc,
617 unsigned Reg, bool RegKill,
618 unsigned BaseReg, bool BaseKill,
619 unsigned OffReg, bool OffKill,
620 ARMCC::CondCodes Pred, unsigned PredReg,
621 const TargetInstrInfo *TII) {
624 Offset = ARM_AM::getAM2Opc(ARM_AM::sub, -OffImm, ARM_AM::no_shift);
626 Offset = ARM_AM::getAM2Opc(ARM_AM::add, OffImm, ARM_AM::no_shift);
628 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc), Reg)
629 .addReg(BaseReg, getKillRegState(BaseKill))
630 .addReg(OffReg, getKillRegState(OffKill))
632 .addImm(Pred).addReg(PredReg);
634 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
635 .addReg(Reg, getKillRegState(RegKill))
636 .addReg(BaseReg, getKillRegState(BaseKill))
637 .addReg(OffReg, getKillRegState(OffKill))
639 .addImm(Pred).addReg(PredReg);
642 bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
643 MachineBasicBlock::iterator &MBBI) {
644 MachineInstr *MI = &*MBBI;
645 unsigned Opcode = MI->getOpcode();
646 if (Opcode == ARM::LDRD || Opcode == ARM::STRD) {
647 unsigned EvenReg = MI->getOperand(0).getReg();
648 unsigned OddReg = MI->getOperand(1).getReg();
649 unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
650 unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
651 if ((EvenRegNum & 1) == 0 && (EvenRegNum + 1) == OddRegNum)
654 bool isDef = Opcode == ARM::LDRD;
655 bool EvenKill = isDef ? false : MI->getOperand(0).isKill();
656 bool OddKill = isDef ? false : MI->getOperand(1).isKill();
657 const MachineOperand &BaseOp = MI->getOperand(2);
658 unsigned BaseReg = BaseOp.getReg();
659 bool BaseKill = BaseOp.isKill();
660 const MachineOperand &OffOp = MI->getOperand(3);
661 unsigned OffReg = OffOp.getReg();
662 bool OffKill = OffOp.isKill();
663 int OffImm = getMemoryOpOffset(MI);
664 unsigned PredReg = 0;
665 ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
667 if (OddRegNum > EvenRegNum && OffReg == 0 && OffImm == 0) {
668 // Ascending register numbers and no offset. It's safe to change it to a
670 unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDM : ARM::STM;
671 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
672 .addReg(BaseReg, getKillRegState(BaseKill))
673 .addImm(ARM_AM::getAM4ModeImm(ARM_AM::ia))
674 .addImm(Pred).addReg(PredReg)
675 .addReg(EvenReg, getDefRegState(isDef))
676 .addReg(OddReg, getDefRegState(isDef));
678 // Split into two instructions.
679 unsigned NewOpc = (Opcode == ARM::LDRD) ? ARM::LDR : ARM::STR;
680 DebugLoc dl = MBBI->getDebugLoc();
681 // If this is a load and base register is killed, it may have been
682 // re-defed by the load, make sure the first load does not clobber it.
684 (BaseKill || OffKill) &&
685 (TRI->regsOverlap(EvenReg, BaseReg) ||
686 (OffReg && TRI->regsOverlap(EvenReg, OffReg)))) {
687 assert(!TRI->regsOverlap(OddReg, BaseReg) &&
688 (!OffReg || !TRI->regsOverlap(OddReg, OffReg)));
689 InsertLDR_STR(MBB, MBBI, OffImm+4, isDef, dl, NewOpc, OddReg, OddKill,
690 BaseReg, false, OffReg, false, Pred, PredReg, TII);
691 InsertLDR_STR(MBB, MBBI, OffImm, isDef, dl, NewOpc, EvenReg, EvenKill,
692 BaseReg, BaseKill, OffReg, OffKill, Pred, PredReg, TII);
694 InsertLDR_STR(MBB, MBBI, OffImm, isDef, dl, NewOpc, EvenReg, EvenKill,
695 BaseReg, false, OffReg, false, Pred, PredReg, TII);
696 InsertLDR_STR(MBB, MBBI, OffImm+4, isDef, dl, NewOpc, OddReg, OddKill,
697 BaseReg, BaseKill, OffReg, OffKill, Pred, PredReg, TII);
707 /// LoadStoreMultipleOpti - An optimization pass to turn multiple LDR / STR
708 /// ops of the same base and incrementing offset into LDM / STM ops.
709 bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
710 unsigned NumMerges = 0;
711 unsigned NumMemOps = 0;
713 unsigned CurrBase = 0;
715 unsigned CurrSize = 0;
716 ARMCC::CondCodes CurrPred = ARMCC::AL;
717 unsigned CurrPredReg = 0;
718 unsigned Position = 0;
719 SmallVector<MachineBasicBlock::iterator,4> Merges;
721 RS->enterBasicBlock(&MBB);
722 MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
724 if (FixInvalidRegPairOp(MBB, MBBI))
727 bool Advance = false;
728 bool TryMerge = false;
729 bool Clobber = false;
731 bool isMemOp = isMemoryOp(MBBI);
733 int Opcode = MBBI->getOpcode();
734 unsigned Size = getLSMultipleTransferSize(MBBI);
735 unsigned Base = MBBI->getOperand(1).getReg();
736 unsigned PredReg = 0;
737 ARMCC::CondCodes Pred = getInstrPredicate(MBBI, PredReg);
738 int Offset = getMemoryOpOffset(MBBI);
741 // r5 := ldr [r5, #4]
742 // r6 := ldr [r5, #8]
744 // The second ldr has effectively broken the chain even though it
745 // looks like the later ldr(s) use the same base register. Try to
746 // merge the ldr's so far, including this one. But don't try to
747 // combine the following ldr(s).
748 Clobber = (Opcode == ARM::LDR && Base == MBBI->getOperand(0).getReg());
749 if (CurrBase == 0 && !Clobber) {
750 // Start of a new chain.
755 CurrPredReg = PredReg;
756 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
765 if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
766 // No need to match PredReg.
767 // Continue adding to the queue.
768 if (Offset > MemOps.back().Offset) {
769 MemOps.push_back(MemOpQueueEntry(Offset, Position, MBBI));
773 for (MemOpQueueIter I = MemOps.begin(), E = MemOps.end();
775 if (Offset < I->Offset) {
776 MemOps.insert(I, MemOpQueueEntry(Offset, Position, MBBI));
780 } else if (Offset == I->Offset) {
781 // Collision! This can't be merged!
798 // Try to find a free register to use as a new base in case it's needed.
799 // First advance to the instruction just before the start of the chain.
800 AdvanceRS(MBB, MemOps);
801 // Find a scratch register. Make sure it's a call clobbered register or
802 // a spilled callee-saved register.
803 unsigned Scratch = RS->FindUnusedReg(&ARM::GPRRegClass, true);
805 Scratch = RS->FindUnusedReg(&ARM::GPRRegClass,
806 AFI->getSpilledCSRegisters());
807 // Process the load / store instructions.
808 RS->forward(prior(MBBI));
812 MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize,
813 CurrPred, CurrPredReg, Scratch, MemOps, Merges);
815 // Try folding preceeding/trailing base inc/dec into the generated
817 for (unsigned i = 0, e = Merges.size(); i < e; ++i)
818 if (mergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI))
820 NumMerges += Merges.size();
822 // Try folding preceeding/trailing base inc/dec into those load/store
823 // that were not merged to form LDM/STM ops.
824 for (unsigned i = 0; i != NumMemOps; ++i)
825 if (!MemOps[i].Merged)
826 if (mergeBaseUpdateLoadStore(MBB, MemOps[i].MBBI, TII,Advance,MBBI))
829 // RS may be pointing to an instruction that's deleted.
830 RS->skipTo(prior(MBBI));
831 } else if (NumMemOps == 1) {
832 // Try folding preceeding/trailing base inc/dec into the single
834 if (mergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) {
836 RS->forward(prior(MBBI));
843 CurrPred = ARMCC::AL;
850 // If iterator hasn't been advanced and this is not a memory op, skip it.
851 // It can't start a new chain anyway.
852 if (!Advance && !isMemOp && MBBI != E) {
858 return NumMerges > 0;
862 struct OffsetCompare {
863 bool operator()(const MachineInstr *LHS, const MachineInstr *RHS) const {
864 int LOffset = getMemoryOpOffset(LHS);
865 int ROffset = getMemoryOpOffset(RHS);
866 assert(LHS == RHS || LOffset != ROffset);
867 return LOffset > ROffset;
872 /// MergeReturnIntoLDM - If this is a exit BB, try merging the return op
873 /// (bx lr) into the preceeding stack restore so it directly restore the value
875 /// ldmfd sp!, {r7, lr}
878 /// ldmfd sp!, {r7, pc}
879 bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
880 if (MBB.empty()) return false;
882 MachineBasicBlock::iterator MBBI = prior(MBB.end());
883 if (MBBI->getOpcode() == ARM::BX_RET && MBBI != MBB.begin()) {
884 MachineInstr *PrevMI = prior(MBBI);
885 if (PrevMI->getOpcode() == ARM::LDM) {
886 MachineOperand &MO = PrevMI->getOperand(PrevMI->getNumOperands()-1);
887 if (MO.getReg() == ARM::LR) {
888 PrevMI->setDesc(TII->get(ARM::LDM_RET));
898 bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
899 const TargetMachine &TM = Fn.getTarget();
900 AFI = Fn.getInfo<ARMFunctionInfo>();
901 TII = TM.getInstrInfo();
902 TRI = TM.getRegisterInfo();
903 RS = new RegScavenger();
905 bool Modified = false;
906 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
908 MachineBasicBlock &MBB = *MFI;
909 Modified |= LoadStoreMultipleOpti(MBB);
910 Modified |= MergeReturnIntoLDM(MBB);
918 /// ARMPreAllocLoadStoreOpt - Pre- register allocation pass that move
919 /// load / stores from consecutive locations close to make it more
920 /// likely they will be combined later.
923 struct VISIBILITY_HIDDEN ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
925 ARMPreAllocLoadStoreOpt() : MachineFunctionPass(&ID) {}
927 const TargetData *TD;
928 const TargetInstrInfo *TII;
929 const TargetRegisterInfo *TRI;
930 const ARMSubtarget *STI;
931 MachineRegisterInfo *MRI;
933 virtual bool runOnMachineFunction(MachineFunction &Fn);
935 virtual const char *getPassName() const {
936 return "ARM pre- register allocation load / store optimization pass";
940 bool SatisfyLdStDWordlignment(MachineInstr *MI);
941 bool RescheduleOps(MachineBasicBlock *MBB,
942 SmallVector<MachineInstr*, 4> &Ops,
943 unsigned Base, bool isLd,
944 DenseMap<MachineInstr*, unsigned> &MI2LocMap);
945 bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
947 char ARMPreAllocLoadStoreOpt::ID = 0;
950 bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
951 TD = Fn.getTarget().getTargetData();
952 TII = Fn.getTarget().getInstrInfo();
953 TRI = Fn.getTarget().getRegisterInfo();
954 STI = &Fn.getTarget().getSubtarget<ARMSubtarget>();
955 MRI = &Fn.getRegInfo();
957 bool Modified = false;
958 for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
960 Modified |= RescheduleLoadStoreInstrs(MFI);
965 static bool IsSafeToMove(bool isLd, unsigned Base,
966 MachineBasicBlock::iterator I,
967 MachineBasicBlock::iterator E,
968 SmallPtrSet<MachineInstr*, 4> MoveOps,
969 const TargetRegisterInfo *TRI) {
970 // Are there stores / loads / calls between them?
971 // FIXME: This is overly conservative. We should make use of alias information
974 const TargetInstrDesc &TID = I->getDesc();
975 if (TID.isCall() || TID.isTerminator() || TID.hasUnmodeledSideEffects())
977 if (isLd && TID.mayStore())
982 // It's not safe to move the first 'str' down.
986 if (TID.mayStore() && !MoveOps.count(&*I))
989 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
990 MachineOperand &MO = I->getOperand(j);
991 if (MO.isReg() && MO.isDef() && TRI->regsOverlap(MO.getReg(), Base))
998 bool ARMPreAllocLoadStoreOpt::SatisfyLdStDWordlignment(MachineInstr *MI) {
999 if (!MI->hasOneMemOperand() ||
1000 !MI->memoperands_begin()->getValue() ||
1001 MI->memoperands_begin()->isVolatile())
1004 unsigned Align = MI->memoperands_begin()->getAlignment();
1005 unsigned ReqAlign = STI->hasV6Ops()
1006 ? TD->getPrefTypeAlignment(Type::Int64Ty) : 8; // Pre-v6 need 8-byte align
1007 return Align >= ReqAlign;
1010 bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
1011 SmallVector<MachineInstr*, 4> &Ops,
1012 unsigned Base, bool isLd,
1013 DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
1014 bool RetVal = false;
1016 // Sort by offset (in reverse order).
1017 std::sort(Ops.begin(), Ops.end(), OffsetCompare());
1019 // The loads / stores of the same base are in order. Scan them from first to
1020 // last and check for the followins:
1021 // 1. Any def of base.
1023 while (Ops.size() > 1) {
1024 unsigned FirstLoc = ~0U;
1025 unsigned LastLoc = 0;
1026 MachineInstr *FirstOp = 0;
1027 MachineInstr *LastOp = 0;
1029 unsigned LastBytes = 0;
1030 unsigned NumMove = 0;
1031 for (int i = Ops.size() - 1; i >= 0; --i) {
1032 MachineInstr *Op = Ops[i];
1033 unsigned Loc = MI2LocMap[Op];
1034 if (Loc <= FirstLoc) {
1038 if (Loc >= LastLoc) {
1043 int Offset = getMemoryOpOffset(Op);
1044 unsigned Bytes = getLSMultipleTransferSize(Op);
1046 if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
1049 LastOffset = Offset;
1058 SmallPtrSet<MachineInstr*, 4> MoveOps;
1059 for (int i = NumMove-1; i >= 0; --i)
1060 MoveOps.insert(Ops[i]);
1062 // Be conservative, if the instructions are too far apart, don't
1063 // move them. We want to limit the increase of register pressure.
1064 bool DoMove = (LastLoc - FirstLoc) < NumMove*4;
1066 DoMove = IsSafeToMove(isLd, Base, FirstOp, LastOp, MoveOps, TRI);
1068 for (unsigned i = 0; i != NumMove; ++i)
1071 // This is the new location for the loads / stores.
1072 MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
1073 while (InsertPos != MBB->end() && MoveOps.count(InsertPos))
1076 // If we are moving a pair of loads / stores, see if it makes sense
1077 // to try to allocate a pair of registers that can form register pairs.
1078 unsigned PairOpcode = 0;
1079 unsigned Offset = 0;
1081 // Make sure the alignment requirement is met.
1082 if (NumMove == 2 && SatisfyLdStDWordlignment(Ops.back())) {
1083 int Opcode = Ops.back()->getOpcode();
1084 // FIXME: FLDS / FSTS -> FLDD / FSTD
1085 if (Opcode == ARM::LDR)
1086 PairOpcode = ARM::LDRD;
1087 else if (Opcode == ARM::STR)
1088 PairOpcode = ARM::STRD;
1090 // Then make sure the immediate offset fits.
1092 int OffImm = getMemoryOpOffset(Ops.back());
1093 ARM_AM::AddrOpc AddSub = ARM_AM::add;
1095 AddSub = ARM_AM::sub;
1098 if (OffImm >= 256) // 8 bits
1101 Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
1105 for (unsigned i = 0; i != NumMove; ++i) {
1106 MachineInstr *Op = Ops.back();
1108 MBB->splice(InsertPos, MBB, Op);
1111 // Form the pair instruction instead.
1112 unsigned EvenReg = 0, OddReg = 0;
1113 unsigned BaseReg = 0, OffReg = 0, PredReg = 0;
1114 ARMCC::CondCodes Pred = ARMCC::AL;
1116 for (unsigned i = 0; i != NumMove; ++i) {
1117 MachineInstr *Op = Ops.back();
1119 unsigned Reg = Op->getOperand(0).getReg();
1122 BaseReg = Op->getOperand(1).getReg();
1123 OffReg = Op->getOperand(2).getReg();
1124 Pred = getInstrPredicate(Op, PredReg);
1125 dl = Op->getDebugLoc();
1131 BuildMI(*MBB, InsertPos, dl, TII->get(PairOpcode))
1132 .addReg(EvenReg, RegState::Define)
1133 .addReg(OddReg, RegState::Define)
1134 .addReg(BaseReg).addReg(0).addImm(Offset)
1135 .addImm(Pred).addReg(PredReg);
1137 BuildMI(*MBB, InsertPos, dl, TII->get(PairOpcode))
1140 .addReg(BaseReg).addReg(0).addImm(Offset)
1141 .addImm(Pred).addReg(PredReg);
1143 // Add register allocation hints to form register pairs.
1144 MRI->setRegAllocationHint(EvenReg, ARMRI::RegPairEven, OddReg);
1145 MRI->setRegAllocationHint(OddReg, ARMRI::RegPairOdd, EvenReg);
1148 NumLdStMoved += NumMove;
1158 ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
1159 bool RetVal = false;
1161 DenseMap<MachineInstr*, unsigned> MI2LocMap;
1162 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2LdsMap;
1163 DenseMap<unsigned, SmallVector<MachineInstr*, 4> > Base2StsMap;
1164 SmallVector<unsigned, 4> LdBases;
1165 SmallVector<unsigned, 4> StBases;
1168 MachineBasicBlock::iterator MBBI = MBB->begin();
1169 MachineBasicBlock::iterator E = MBB->end();
1171 for (; MBBI != E; ++MBBI) {
1172 MachineInstr *MI = MBBI;
1173 const TargetInstrDesc &TID = MI->getDesc();
1174 if (TID.isCall() || TID.isTerminator()) {
1175 // Stop at barriers.
1180 MI2LocMap[MI] = Loc++;
1181 if (!isMemoryOp(MI))
1183 unsigned PredReg = 0;
1184 if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
1187 int Opcode = MI->getOpcode();
1188 bool isLd = Opcode == ARM::LDR ||
1189 Opcode == ARM::FLDS || Opcode == ARM::FLDD;
1190 unsigned Base = MI->getOperand(1).getReg();
1191 int Offset = getMemoryOpOffset(MI);
1193 bool StopHere = false;
1195 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1196 Base2LdsMap.find(Base);
1197 if (BI != Base2LdsMap.end()) {
1198 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1199 if (Offset == getMemoryOpOffset(BI->second[i])) {
1205 BI->second.push_back(MI);
1207 SmallVector<MachineInstr*, 4> MIs;
1209 Base2LdsMap[Base] = MIs;
1210 LdBases.push_back(Base);
1213 DenseMap<unsigned, SmallVector<MachineInstr*, 4> >::iterator BI =
1214 Base2StsMap.find(Base);
1215 if (BI != Base2StsMap.end()) {
1216 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
1217 if (Offset == getMemoryOpOffset(BI->second[i])) {
1223 BI->second.push_back(MI);
1225 SmallVector<MachineInstr*, 4> MIs;
1227 Base2StsMap[Base] = MIs;
1228 StBases.push_back(Base);
1233 // Found a duplicate (a base+offset combination that's seen earlier). Backtrack.
1239 // Re-schedule loads.
1240 for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
1241 unsigned Base = LdBases[i];
1242 SmallVector<MachineInstr*, 4> &Lds = Base2LdsMap[Base];
1244 RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
1247 // Re-schedule stores.
1248 for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
1249 unsigned Base = StBases[i];
1250 SmallVector<MachineInstr*, 4> &Sts = Base2StsMap[Base];
1252 RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
1256 Base2LdsMap.clear();
1257 Base2StsMap.clear();
1267 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
1268 /// optimization pass.
1269 FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
1271 return new ARMPreAllocLoadStoreOpt();
1272 return new ARMLoadStoreOpt();