1 //===- ARMInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
28 static cl::opt<bool> EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
29 cl::desc("Enable ARM 2-addr to 3-addr conv"));
32 const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) {
33 return MIB.addImm((int64_t)ARMCC::AL).addReg(0);
37 const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
41 ARMInstrInfo::ARMInstrInfo(const ARMSubtarget &STI)
42 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
46 const TargetRegisterClass *ARMInstrInfo::getPointerRegClass() const {
47 return &ARM::GPRRegClass;
50 /// Return true if the instruction is a register to register move and
51 /// leave the source and dest operands in the passed parameters.
53 bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
54 unsigned &SrcReg, unsigned &DstReg) const {
55 unsigned oc = MI.getOpcode();
61 SrcReg = MI.getOperand(1).getReg();
62 DstReg = MI.getOperand(0).getReg();
66 assert(MI.getDesc().getNumOperands() >= 2 &&
67 MI.getOperand(0).isRegister() &&
68 MI.getOperand(1).isRegister() &&
69 "Invalid ARM MOV instruction");
70 SrcReg = MI.getOperand(1).getReg();
71 DstReg = MI.getOperand(0).getReg();
76 unsigned ARMInstrInfo::isLoadFromStackSlot(MachineInstr *MI, int &FrameIndex) const{
77 switch (MI->getOpcode()) {
80 if (MI->getOperand(1).isFrameIndex() &&
81 MI->getOperand(2).isRegister() &&
82 MI->getOperand(3).isImmediate() &&
83 MI->getOperand(2).getReg() == 0 &&
84 MI->getOperand(3).getImm() == 0) {
85 FrameIndex = MI->getOperand(1).getIndex();
86 return MI->getOperand(0).getReg();
91 if (MI->getOperand(1).isFrameIndex() &&
92 MI->getOperand(2).isImmediate() &&
93 MI->getOperand(2).getImm() == 0) {
94 FrameIndex = MI->getOperand(1).getIndex();
95 return MI->getOperand(0).getReg();
99 if (MI->getOperand(1).isFrameIndex() &&
100 MI->getOperand(2).isImmediate() &&
101 MI->getOperand(2).getImm() == 0) {
102 FrameIndex = MI->getOperand(1).getIndex();
103 return MI->getOperand(0).getReg();
110 unsigned ARMInstrInfo::isStoreToStackSlot(MachineInstr *MI, int &FrameIndex) const {
111 switch (MI->getOpcode()) {
114 if (MI->getOperand(1).isFrameIndex() &&
115 MI->getOperand(2).isRegister() &&
116 MI->getOperand(3).isImmediate() &&
117 MI->getOperand(2).getReg() == 0 &&
118 MI->getOperand(3).getImm() == 0) {
119 FrameIndex = MI->getOperand(1).getIndex();
120 return MI->getOperand(0).getReg();
125 if (MI->getOperand(1).isFrameIndex() &&
126 MI->getOperand(2).isImmediate() &&
127 MI->getOperand(2).getImm() == 0) {
128 FrameIndex = MI->getOperand(1).getIndex();
129 return MI->getOperand(0).getReg();
133 if (MI->getOperand(1).isFrameIndex() &&
134 MI->getOperand(2).isImmediate() &&
135 MI->getOperand(2).getImm() == 0) {
136 FrameIndex = MI->getOperand(1).getIndex();
137 return MI->getOperand(0).getReg();
144 void ARMInstrInfo::reMaterialize(MachineBasicBlock &MBB,
145 MachineBasicBlock::iterator I,
147 const MachineInstr *Orig) const {
148 if (Orig->getOpcode() == ARM::MOVi2pieces) {
149 RI.emitLoadConstPool(MBB, I, DestReg, Orig->getOperand(1).getImm(),
150 Orig->getOperand(2).getImm(),
151 Orig->getOperand(3).getReg(), this, false);
155 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
156 MI->getOperand(0).setReg(DestReg);
160 static unsigned getUnindexedOpcode(unsigned Opc) {
173 case ARM::LDRSH_POST:
176 case ARM::LDRSB_POST:
192 ARMInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
193 MachineBasicBlock::iterator &MBBI,
194 LiveVariables *LV) const {
198 MachineInstr *MI = MBBI;
199 MachineFunction &MF = *MI->getParent()->getParent();
200 unsigned TSFlags = MI->getDesc().TSFlags;
202 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
203 default: return NULL;
204 case ARMII::IndexModePre:
207 case ARMII::IndexModePost:
211 // Try spliting an indexed load / store to a un-indexed one plus an add/sub
213 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
217 MachineInstr *UpdateMI = NULL;
218 MachineInstr *MemMI = NULL;
219 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
220 const TargetInstrDesc &TID = MI->getDesc();
221 unsigned NumOps = TID.getNumOperands();
222 bool isLoad = !TID.mayStore();
223 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
224 const MachineOperand &Base = MI->getOperand(2);
225 const MachineOperand &Offset = MI->getOperand(NumOps-3);
226 unsigned WBReg = WB.getReg();
227 unsigned BaseReg = Base.getReg();
228 unsigned OffReg = Offset.getReg();
229 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
230 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
233 assert(false && "Unknown indexed op!");
235 case ARMII::AddrMode2: {
236 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
237 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
239 int SOImmVal = ARM_AM::getSOImmVal(Amt);
241 // Can't encode it in a so_imm operand. This transformation will
242 // add more than 1 instruction. Abandon!
244 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
245 .addReg(BaseReg).addImm(SOImmVal)
246 .addImm(Pred).addReg(0).addReg(0);
247 } else if (Amt != 0) {
248 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
249 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
250 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
251 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
252 .addImm(Pred).addReg(0).addReg(0);
254 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
255 .addReg(BaseReg).addReg(OffReg)
256 .addImm(Pred).addReg(0).addReg(0);
259 case ARMII::AddrMode3 : {
260 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
261 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
263 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
264 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
265 .addReg(BaseReg).addImm(Amt)
266 .addImm(Pred).addReg(0).addReg(0);
268 UpdateMI = BuildMI(MF, get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
269 .addReg(BaseReg).addReg(OffReg)
270 .addImm(Pred).addReg(0).addReg(0);
275 std::vector<MachineInstr*> NewMIs;
278 MemMI = BuildMI(MF, get(MemOpc), MI->getOperand(0).getReg())
279 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
281 MemMI = BuildMI(MF, get(MemOpc)).addReg(MI->getOperand(1).getReg())
282 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
283 NewMIs.push_back(MemMI);
284 NewMIs.push_back(UpdateMI);
287 MemMI = BuildMI(MF, get(MemOpc), MI->getOperand(0).getReg())
288 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
290 MemMI = BuildMI(MF, get(MemOpc)).addReg(MI->getOperand(1).getReg())
291 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
293 UpdateMI->getOperand(0).setIsDead();
294 NewMIs.push_back(UpdateMI);
295 NewMIs.push_back(MemMI);
298 // Transfer LiveVariables states, kill / dead info.
299 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
300 MachineOperand &MO = MI->getOperand(i);
301 if (MO.isRegister() && MO.getReg() &&
302 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
303 unsigned Reg = MO.getReg();
306 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
308 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
310 LV->addVirtualRegisterDead(Reg, NewMI);
312 if (MO.isUse() && MO.isKill()) {
313 for (unsigned j = 0; j < 2; ++j) {
314 // Look at the two new MI's in reverse order.
315 MachineInstr *NewMI = NewMIs[j];
316 if (!NewMI->readsRegister(Reg))
318 LV->addVirtualRegisterKilled(Reg, NewMI);
319 if (VI.removeKill(MI))
320 VI.Kills.push_back(NewMI);
328 MFI->insert(MBBI, NewMIs[1]);
329 MFI->insert(MBBI, NewMIs[0]);
334 bool ARMInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
335 MachineBasicBlock *&FBB,
336 SmallVectorImpl<MachineOperand> &Cond) const {
337 // If the block has no terminators, it just falls into the block after it.
338 MachineBasicBlock::iterator I = MBB.end();
339 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
342 // Get the last instruction in the block.
343 MachineInstr *LastInst = I;
345 // If there is only one terminator instruction, process it.
346 unsigned LastOpc = LastInst->getOpcode();
347 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
348 if (LastOpc == ARM::B || LastOpc == ARM::tB) {
349 TBB = LastInst->getOperand(0).getMBB();
352 if (LastOpc == ARM::Bcc || LastOpc == ARM::tBcc) {
353 // Block ends with fall-through condbranch.
354 TBB = LastInst->getOperand(0).getMBB();
355 Cond.push_back(LastInst->getOperand(1));
356 Cond.push_back(LastInst->getOperand(2));
359 return true; // Can't handle indirect branch.
362 // Get the instruction before it if it is a terminator.
363 MachineInstr *SecondLastInst = I;
365 // If there are three terminators, we don't know what sort of block this is.
366 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
369 // If the block ends with ARM::B/ARM::tB and a ARM::Bcc/ARM::tBcc, handle it.
370 unsigned SecondLastOpc = SecondLastInst->getOpcode();
371 if ((SecondLastOpc == ARM::Bcc && LastOpc == ARM::B) ||
372 (SecondLastOpc == ARM::tBcc && LastOpc == ARM::tB)) {
373 TBB = SecondLastInst->getOperand(0).getMBB();
374 Cond.push_back(SecondLastInst->getOperand(1));
375 Cond.push_back(SecondLastInst->getOperand(2));
376 FBB = LastInst->getOperand(0).getMBB();
380 // If the block ends with two unconditional branches, handle it. The second
381 // one is not executed, so remove it.
382 if ((SecondLastOpc == ARM::B || SecondLastOpc==ARM::tB) &&
383 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
384 TBB = SecondLastInst->getOperand(0).getMBB();
386 I->eraseFromParent();
390 // Likewise if it ends with a branch table followed by an unconditional branch.
391 // The branch folder can create these, and we must get rid of them for
392 // correctness of Thumb constant islands.
393 if ((SecondLastOpc == ARM::BR_JTr || SecondLastOpc==ARM::BR_JTm ||
394 SecondLastOpc == ARM::BR_JTadd || SecondLastOpc==ARM::tBR_JTr) &&
395 (LastOpc == ARM::B || LastOpc == ARM::tB)) {
397 I->eraseFromParent();
401 // Otherwise, can't handle this.
406 unsigned ARMInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
407 MachineFunction &MF = *MBB.getParent();
408 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
409 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
410 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
412 MachineBasicBlock::iterator I = MBB.end();
413 if (I == MBB.begin()) return 0;
415 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
418 // Remove the branch.
419 I->eraseFromParent();
423 if (I == MBB.begin()) return 1;
425 if (I->getOpcode() != BccOpc)
428 // Remove the branch.
429 I->eraseFromParent();
433 unsigned ARMInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
434 MachineBasicBlock *FBB,
435 const SmallVectorImpl<MachineOperand> &Cond) const {
436 MachineFunction &MF = *MBB.getParent();
437 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
438 int BOpc = AFI->isThumbFunction() ? ARM::tB : ARM::B;
439 int BccOpc = AFI->isThumbFunction() ? ARM::tBcc : ARM::Bcc;
441 // Shouldn't be a fall through.
442 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
443 assert((Cond.size() == 2 || Cond.size() == 0) &&
444 "ARM branch conditions have two components!");
447 if (Cond.empty()) // Unconditional branch?
448 BuildMI(&MBB, get(BOpc)).addMBB(TBB);
450 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
451 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
455 // Two-way conditional branch.
456 BuildMI(&MBB, get(BccOpc)).addMBB(TBB)
457 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
458 BuildMI(&MBB, get(BOpc)).addMBB(FBB);
462 void ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
463 MachineBasicBlock::iterator I,
464 unsigned DestReg, unsigned SrcReg,
465 const TargetRegisterClass *DestRC,
466 const TargetRegisterClass *SrcRC) const {
467 if (DestRC != SrcRC) {
468 cerr << "Not yet supported!";
472 if (DestRC == ARM::GPRRegisterClass) {
473 MachineFunction &MF = *MBB.getParent();
474 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
475 if (AFI->isThumbFunction())
476 BuildMI(MBB, I, get(ARM::tMOVr), DestReg).addReg(SrcReg);
478 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, get(ARM::MOVr), DestReg)
480 } else if (DestRC == ARM::SPRRegisterClass)
481 AddDefaultPred(BuildMI(MBB, I, get(ARM::FCPYS), DestReg)
483 else if (DestRC == ARM::DPRRegisterClass)
484 AddDefaultPred(BuildMI(MBB, I, get(ARM::FCPYD), DestReg)
490 static const MachineInstrBuilder &ARMInstrAddOperand(MachineInstrBuilder &MIB,
491 MachineOperand &MO) {
493 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit());
494 else if (MO.isImmediate())
495 MIB = MIB.addImm(MO.getImm());
496 else if (MO.isFrameIndex())
497 MIB = MIB.addFrameIndex(MO.getIndex());
499 assert(0 && "Unknown operand for ARMInstrAddOperand!");
505 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
506 unsigned SrcReg, bool isKill, int FI,
507 const TargetRegisterClass *RC) const {
508 if (RC == ARM::GPRRegisterClass) {
509 MachineFunction &MF = *MBB.getParent();
510 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
511 if (AFI->isThumbFunction())
512 BuildMI(MBB, I, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
513 .addFrameIndex(FI).addImm(0);
515 AddDefaultPred(BuildMI(MBB, I, get(ARM::STR))
516 .addReg(SrcReg, false, false, isKill)
517 .addFrameIndex(FI).addReg(0).addImm(0));
518 } else if (RC == ARM::DPRRegisterClass) {
519 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTD))
520 .addReg(SrcReg, false, false, isKill)
521 .addFrameIndex(FI).addImm(0));
523 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
524 AddDefaultPred(BuildMI(MBB, I, get(ARM::FSTS))
525 .addReg(SrcReg, false, false, isKill)
526 .addFrameIndex(FI).addImm(0));
530 void ARMInstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
532 SmallVectorImpl<MachineOperand> &Addr,
533 const TargetRegisterClass *RC,
534 SmallVectorImpl<MachineInstr*> &NewMIs) const {
536 if (RC == ARM::GPRRegisterClass) {
537 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
538 if (AFI->isThumbFunction()) {
539 Opc = Addr[0].isFrameIndex() ? ARM::tSpill : ARM::tSTR;
540 MachineInstrBuilder MIB =
541 BuildMI(MF, get(Opc)).addReg(SrcReg, false, false, isKill);
542 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
543 MIB = ARMInstrAddOperand(MIB, Addr[i]);
544 NewMIs.push_back(MIB);
548 } else if (RC == ARM::DPRRegisterClass) {
551 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
555 MachineInstrBuilder MIB =
556 BuildMI(MF, get(Opc)).addReg(SrcReg, false, false, isKill);
557 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
558 MIB = ARMInstrAddOperand(MIB, Addr[i]);
560 NewMIs.push_back(MIB);
565 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
566 unsigned DestReg, int FI,
567 const TargetRegisterClass *RC) const {
568 if (RC == ARM::GPRRegisterClass) {
569 MachineFunction &MF = *MBB.getParent();
570 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
571 if (AFI->isThumbFunction())
572 BuildMI(MBB, I, get(ARM::tRestore), DestReg)
573 .addFrameIndex(FI).addImm(0);
575 AddDefaultPred(BuildMI(MBB, I, get(ARM::LDR), DestReg)
576 .addFrameIndex(FI).addReg(0).addImm(0));
577 } else if (RC == ARM::DPRRegisterClass) {
578 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDD), DestReg)
579 .addFrameIndex(FI).addImm(0));
581 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
582 AddDefaultPred(BuildMI(MBB, I, get(ARM::FLDS), DestReg)
583 .addFrameIndex(FI).addImm(0));
587 void ARMInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
588 SmallVectorImpl<MachineOperand> &Addr,
589 const TargetRegisterClass *RC,
590 SmallVectorImpl<MachineInstr*> &NewMIs) const {
592 if (RC == ARM::GPRRegisterClass) {
593 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
594 if (AFI->isThumbFunction()) {
595 Opc = Addr[0].isFrameIndex() ? ARM::tRestore : ARM::tLDR;
596 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
597 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
598 MIB = ARMInstrAddOperand(MIB, Addr[i]);
599 NewMIs.push_back(MIB);
603 } else if (RC == ARM::DPRRegisterClass) {
606 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
610 MachineInstrBuilder MIB = BuildMI(MF, get(Opc), DestReg);
611 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
612 MIB = ARMInstrAddOperand(MIB, Addr[i]);
614 NewMIs.push_back(MIB);
618 bool ARMInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
619 MachineBasicBlock::iterator MI,
620 const std::vector<CalleeSavedInfo> &CSI) const {
621 MachineFunction &MF = *MBB.getParent();
622 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
623 if (!AFI->isThumbFunction() || CSI.empty())
626 MachineInstrBuilder MIB = BuildMI(MBB, MI, get(ARM::tPUSH));
627 for (unsigned i = CSI.size(); i != 0; --i) {
628 unsigned Reg = CSI[i-1].getReg();
629 // Add the callee-saved register as live-in. It's killed at the spill.
631 MIB.addReg(Reg, false/*isDef*/,false/*isImp*/,true/*isKill*/);
636 bool ARMInstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
637 MachineBasicBlock::iterator MI,
638 const std::vector<CalleeSavedInfo> &CSI) const {
639 MachineFunction &MF = *MBB.getParent();
640 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
641 if (!AFI->isThumbFunction() || CSI.empty())
644 bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
645 MachineInstr *PopMI = MF.CreateMachineInstr(get(ARM::tPOP));
646 MBB.insert(MI, PopMI);
647 for (unsigned i = CSI.size(); i != 0; --i) {
648 unsigned Reg = CSI[i-1].getReg();
649 if (Reg == ARM::LR) {
650 // Special epilogue for vararg functions. See emitEpilogue
654 PopMI->setDesc(get(ARM::tPOP_RET));
657 PopMI->addOperand(MachineOperand::CreateReg(Reg, true));
662 MachineInstr *ARMInstrInfo::foldMemoryOperand(MachineFunction &MF,
664 SmallVectorImpl<unsigned> &Ops,
666 if (Ops.size() != 1) return NULL;
668 unsigned OpNum = Ops[0];
669 unsigned Opc = MI->getOpcode();
670 MachineInstr *NewMI = NULL;
674 if (MI->getOperand(4).getReg() == ARM::CPSR)
675 // If it is updating CPSR, then it cannot be foled.
677 unsigned Pred = MI->getOperand(2).getImm();
678 unsigned PredReg = MI->getOperand(3).getReg();
679 if (OpNum == 0) { // move -> store
680 unsigned SrcReg = MI->getOperand(1).getReg();
681 bool isKill = MI->getOperand(1).isKill();
682 NewMI = BuildMI(MF, get(ARM::STR)).addReg(SrcReg, false, false, isKill)
683 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
684 } else { // move -> load
685 unsigned DstReg = MI->getOperand(0).getReg();
686 bool isDead = MI->getOperand(0).isDead();
687 NewMI = BuildMI(MF, get(ARM::LDR)).addReg(DstReg, true, false, false, isDead)
688 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
693 if (OpNum == 0) { // move -> store
694 unsigned SrcReg = MI->getOperand(1).getReg();
695 bool isKill = MI->getOperand(1).isKill();
696 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
697 // tSpill cannot take a high register operand.
699 NewMI = BuildMI(MF, get(ARM::tSpill)).addReg(SrcReg, false, false, isKill)
700 .addFrameIndex(FI).addImm(0);
701 } else { // move -> load
702 unsigned DstReg = MI->getOperand(0).getReg();
703 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
704 // tRestore cannot target a high register operand.
706 bool isDead = MI->getOperand(0).isDead();
707 NewMI = BuildMI(MF, get(ARM::tRestore))
708 .addReg(DstReg, true, false, false, isDead)
709 .addFrameIndex(FI).addImm(0);
714 unsigned Pred = MI->getOperand(2).getImm();
715 unsigned PredReg = MI->getOperand(3).getReg();
716 if (OpNum == 0) { // move -> store
717 unsigned SrcReg = MI->getOperand(1).getReg();
718 NewMI = BuildMI(MF, get(ARM::FSTS)).addReg(SrcReg).addFrameIndex(FI)
719 .addImm(0).addImm(Pred).addReg(PredReg);
720 } else { // move -> load
721 unsigned DstReg = MI->getOperand(0).getReg();
722 NewMI = BuildMI(MF, get(ARM::FLDS), DstReg).addFrameIndex(FI)
723 .addImm(0).addImm(Pred).addReg(PredReg);
728 unsigned Pred = MI->getOperand(2).getImm();
729 unsigned PredReg = MI->getOperand(3).getReg();
730 if (OpNum == 0) { // move -> store
731 unsigned SrcReg = MI->getOperand(1).getReg();
732 bool isKill = MI->getOperand(1).isKill();
733 NewMI = BuildMI(MF, get(ARM::FSTD)).addReg(SrcReg, false, false, isKill)
734 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
735 } else { // move -> load
736 unsigned DstReg = MI->getOperand(0).getReg();
737 bool isDead = MI->getOperand(0).isDead();
738 NewMI = BuildMI(MF, get(ARM::FLDD)).addReg(DstReg, true, false, false, isDead)
739 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
748 bool ARMInstrInfo::canFoldMemoryOperand(MachineInstr *MI,
749 SmallVectorImpl<unsigned> &Ops) const {
750 if (Ops.size() != 1) return false;
752 unsigned OpNum = Ops[0];
753 unsigned Opc = MI->getOpcode();
757 // If it is updating CPSR, then it cannot be foled.
758 return MI->getOperand(4).getReg() != ARM::CPSR;
760 if (OpNum == 0) { // move -> store
761 unsigned SrcReg = MI->getOperand(1).getReg();
762 if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))
763 // tSpill cannot take a high register operand.
765 } else { // move -> load
766 unsigned DstReg = MI->getOperand(0).getReg();
767 if (RI.isPhysicalRegister(DstReg) && !RI.isLowRegister(DstReg))
768 // tRestore cannot target a high register operand.
781 bool ARMInstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
782 if (MBB.empty()) return false;
784 switch (MBB.back().getOpcode()) {
785 case ARM::BX_RET: // Return.
788 case ARM::tBX_RET_vararg:
791 case ARM::tB: // Uncond branch.
793 case ARM::BR_JTr: // Jumptable branch.
794 case ARM::BR_JTm: // Jumptable branch through mem.
795 case ARM::BR_JTadd: // Jumptable branch add to pc.
797 default: return false;
802 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
803 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
804 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
808 bool ARMInstrInfo::isPredicated(const MachineInstr *MI) const {
809 int PIdx = MI->findFirstPredOperandIdx();
810 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
813 bool ARMInstrInfo::PredicateInstruction(MachineInstr *MI,
814 const SmallVectorImpl<MachineOperand> &Pred) const {
815 unsigned Opc = MI->getOpcode();
816 if (Opc == ARM::B || Opc == ARM::tB) {
817 MI->setDesc(get(Opc == ARM::B ? ARM::Bcc : ARM::tBcc));
818 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
819 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
823 int PIdx = MI->findFirstPredOperandIdx();
825 MachineOperand &PMO = MI->getOperand(PIdx);
826 PMO.setImm(Pred[0].getImm());
827 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
834 ARMInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
835 const SmallVectorImpl<MachineOperand> &Pred2) const{
836 if (Pred1.size() > 2 || Pred2.size() > 2)
839 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
840 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
850 return CC2 == ARMCC::HI;
852 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
854 return CC2 == ARMCC::GT;
856 return CC2 == ARMCC::LT;
860 bool ARMInstrInfo::DefinesPredicate(MachineInstr *MI,
861 std::vector<MachineOperand> &Pred) const {
862 const TargetInstrDesc &TID = MI->getDesc();
863 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
867 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
868 const MachineOperand &MO = MI->getOperand(i);
869 if (MO.isRegister() && MO.getReg() == ARM::CPSR) {
879 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
880 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
881 unsigned JTI) DISABLE_INLINE;
882 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
884 return JT[JTI].MBBs.size();
887 /// GetInstSize - Return the size of the specified MachineInstr.
889 unsigned ARMInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
890 const MachineBasicBlock &MBB = *MI->getParent();
891 const MachineFunction *MF = MBB.getParent();
892 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
894 // Basic size info comes from the TSFlags field.
895 const TargetInstrDesc &TID = MI->getDesc();
896 unsigned TSFlags = TID.TSFlags;
898 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
900 // If this machine instr is an inline asm, measure it.
901 if (MI->getOpcode() == ARM::INLINEASM)
902 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
905 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
907 assert(0 && "Unknown or unset size field for instr!");
909 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
910 case ARMII::Size4Bytes: return 4; // Arm instruction.
911 case ARMII::Size2Bytes: return 2; // Thumb instruction.
912 case ARMII::SizeSpecial: {
913 switch (MI->getOpcode()) {
914 case ARM::CONSTPOOL_ENTRY:
915 // If this machine instr is a constant pool entry, its size is recorded as
917 return MI->getOperand(2).getImm();
922 // These are jumptable branches, i.e. a branch followed by an inlined
923 // jumptable. The size is 4 + 4 * number of entries.
924 unsigned NumOps = TID.getNumOperands();
925 MachineOperand JTOP =
926 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
927 unsigned JTI = JTOP.getIndex();
928 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
929 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
930 assert(JTI < JT.size());
931 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
932 // 4 aligned. The assembler / linker may add 2 byte padding just before
933 // the JT entries. The size does not include this padding; the
934 // constant islands pass does separate bookkeeping for it.
935 // FIXME: If we know the size of the function is less than (1 << 16) *2
936 // bytes, we can use 16-bit entries instead. Then there won't be an
938 return getNumJTEntries(JT, JTI) * 4 +
939 (MI->getOpcode()==ARM::tBR_JTr ? 2 : 4);
942 // Otherwise, pseudo-instruction sizes are zero.
947 return 0; // Not reached