1 //===- ARMBaseInstrInfo.cpp - ARM Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Base ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMGenInstrInfo.inc"
18 #include "ARMMachineFunctionInfo.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/LiveVariables.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineJumpTableInfo.h"
24 #include "llvm/Target/TargetAsmInfo.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/ErrorHandling.h"
30 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
31 cl::desc("Enable ARM 2-addr to 3-addr conv"));
33 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget &STI)
34 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)) {
38 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
39 MachineBasicBlock::iterator &MBBI,
40 LiveVariables *LV) const {
44 MachineInstr *MI = MBBI;
45 MachineFunction &MF = *MI->getParent()->getParent();
46 unsigned TSFlags = MI->getDesc().TSFlags;
48 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
50 case ARMII::IndexModePre:
53 case ARMII::IndexModePost:
57 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
59 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
63 MachineInstr *UpdateMI = NULL;
64 MachineInstr *MemMI = NULL;
65 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
66 const TargetInstrDesc &TID = MI->getDesc();
67 unsigned NumOps = TID.getNumOperands();
68 bool isLoad = !TID.mayStore();
69 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
70 const MachineOperand &Base = MI->getOperand(2);
71 const MachineOperand &Offset = MI->getOperand(NumOps-3);
72 unsigned WBReg = WB.getReg();
73 unsigned BaseReg = Base.getReg();
74 unsigned OffReg = Offset.getReg();
75 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
76 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
79 assert(false && "Unknown indexed op!");
81 case ARMII::AddrMode2: {
82 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
83 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
85 if (ARM_AM::getSOImmVal(Amt) == -1)
86 // Can't encode it in a so_imm operand. This transformation will
87 // add more than 1 instruction. Abandon!
89 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
90 get(isSub ? getOpcode(ARMII::SUBri) :
91 getOpcode(ARMII::ADDri)), WBReg)
92 .addReg(BaseReg).addImm(Amt)
93 .addImm(Pred).addReg(0).addReg(0);
94 } else if (Amt != 0) {
95 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
96 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
97 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
98 get(isSub ? getOpcode(ARMII::SUBrs) :
99 getOpcode(ARMII::ADDrs)), WBReg)
100 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
101 .addImm(Pred).addReg(0).addReg(0);
103 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
104 get(isSub ? getOpcode(ARMII::SUBrr) :
105 getOpcode(ARMII::ADDrr)), WBReg)
106 .addReg(BaseReg).addReg(OffReg)
107 .addImm(Pred).addReg(0).addReg(0);
110 case ARMII::AddrMode3 : {
111 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
112 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
114 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
115 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
116 get(isSub ? getOpcode(ARMII::SUBri) :
117 getOpcode(ARMII::ADDri)), WBReg)
118 .addReg(BaseReg).addImm(Amt)
119 .addImm(Pred).addReg(0).addReg(0);
121 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
122 get(isSub ? getOpcode(ARMII::SUBrr) :
123 getOpcode(ARMII::ADDrr)), WBReg)
124 .addReg(BaseReg).addReg(OffReg)
125 .addImm(Pred).addReg(0).addReg(0);
130 std::vector<MachineInstr*> NewMIs;
133 MemMI = BuildMI(MF, MI->getDebugLoc(),
134 get(MemOpc), MI->getOperand(0).getReg())
135 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
137 MemMI = BuildMI(MF, MI->getDebugLoc(),
138 get(MemOpc)).addReg(MI->getOperand(1).getReg())
139 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
140 NewMIs.push_back(MemMI);
141 NewMIs.push_back(UpdateMI);
144 MemMI = BuildMI(MF, MI->getDebugLoc(),
145 get(MemOpc), MI->getOperand(0).getReg())
146 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
148 MemMI = BuildMI(MF, MI->getDebugLoc(),
149 get(MemOpc)).addReg(MI->getOperand(1).getReg())
150 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
152 UpdateMI->getOperand(0).setIsDead();
153 NewMIs.push_back(UpdateMI);
154 NewMIs.push_back(MemMI);
157 // Transfer LiveVariables states, kill / dead info.
159 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
160 MachineOperand &MO = MI->getOperand(i);
161 if (MO.isReg() && MO.getReg() &&
162 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
163 unsigned Reg = MO.getReg();
165 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
167 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
169 LV->addVirtualRegisterDead(Reg, NewMI);
171 if (MO.isUse() && MO.isKill()) {
172 for (unsigned j = 0; j < 2; ++j) {
173 // Look at the two new MI's in reverse order.
174 MachineInstr *NewMI = NewMIs[j];
175 if (!NewMI->readsRegister(Reg))
177 LV->addVirtualRegisterKilled(Reg, NewMI);
178 if (VI.removeKill(MI))
179 VI.Kills.push_back(NewMI);
187 MFI->insert(MBBI, NewMIs[1]);
188 MFI->insert(MBBI, NewMIs[0]);
194 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
195 MachineBasicBlock *&FBB,
196 SmallVectorImpl<MachineOperand> &Cond,
197 bool AllowModify) const {
198 // If the block has no terminators, it just falls into the block after it.
199 MachineBasicBlock::iterator I = MBB.end();
200 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
203 // Get the last instruction in the block.
204 MachineInstr *LastInst = I;
206 // If there is only one terminator instruction, process it.
207 unsigned LastOpc = LastInst->getOpcode();
208 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
209 if (LastOpc == getOpcode(ARMII::B)) {
210 TBB = LastInst->getOperand(0).getMBB();
213 if (LastOpc == getOpcode(ARMII::Bcc)) {
214 // Block ends with fall-through condbranch.
215 TBB = LastInst->getOperand(0).getMBB();
216 Cond.push_back(LastInst->getOperand(1));
217 Cond.push_back(LastInst->getOperand(2));
220 return true; // Can't handle indirect branch.
223 // Get the instruction before it if it is a terminator.
224 MachineInstr *SecondLastInst = I;
226 // If there are three terminators, we don't know what sort of block this is.
227 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
230 // If the block ends with ARMII::B and a ARMII::Bcc, handle it.
231 unsigned SecondLastOpc = SecondLastInst->getOpcode();
232 if ((SecondLastOpc == getOpcode(ARMII::Bcc)) &&
233 (LastOpc == getOpcode(ARMII::B))) {
234 TBB = SecondLastInst->getOperand(0).getMBB();
235 Cond.push_back(SecondLastInst->getOperand(1));
236 Cond.push_back(SecondLastInst->getOperand(2));
237 FBB = LastInst->getOperand(0).getMBB();
241 // If the block ends with two unconditional branches, handle it. The second
242 // one is not executed, so remove it.
243 if ((SecondLastOpc == getOpcode(ARMII::B)) &&
244 (LastOpc == getOpcode(ARMII::B))) {
245 TBB = SecondLastInst->getOperand(0).getMBB();
248 I->eraseFromParent();
252 // ...likewise if it ends with a branch table followed by an unconditional
253 // branch. The branch folder can create these, and we must get rid of them for
254 // correctness of Thumb constant islands.
255 if ((SecondLastOpc == ARM::BR_JTr ||
256 SecondLastOpc == ARM::BR_JTm ||
257 SecondLastOpc == ARM::BR_JTadd ||
258 SecondLastOpc == ARM::tBR_JTr ||
259 SecondLastOpc == ARM::t2BR_JT) &&
260 (LastOpc == getOpcode(ARMII::B))) {
263 I->eraseFromParent();
267 // Otherwise, can't handle this.
272 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
273 int BOpc = getOpcode(ARMII::B);
274 int BccOpc = getOpcode(ARMII::Bcc);
276 MachineBasicBlock::iterator I = MBB.end();
277 if (I == MBB.begin()) return 0;
279 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc)
282 // Remove the branch.
283 I->eraseFromParent();
287 if (I == MBB.begin()) return 1;
289 if (I->getOpcode() != BccOpc)
292 // Remove the branch.
293 I->eraseFromParent();
298 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
299 MachineBasicBlock *FBB,
300 const SmallVectorImpl<MachineOperand> &Cond) const {
301 // FIXME this should probably have a DebugLoc argument
302 DebugLoc dl = DebugLoc::getUnknownLoc();
303 int BOpc = getOpcode(ARMII::B);
304 int BccOpc = getOpcode(ARMII::Bcc);
306 // Shouldn't be a fall through.
307 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
308 assert((Cond.size() == 2 || Cond.size() == 0) &&
309 "ARM branch conditions have two components!");
312 if (Cond.empty()) // Unconditional branch?
313 BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
315 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
316 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
320 // Two-way conditional branch.
321 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
322 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
323 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
327 bool ARMBaseInstrInfo::
328 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
329 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
330 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
334 bool ARMBaseInstrInfo::
335 PredicateInstruction(MachineInstr *MI,
336 const SmallVectorImpl<MachineOperand> &Pred) const {
337 unsigned Opc = MI->getOpcode();
338 if (Opc == getOpcode(ARMII::B)) {
339 MI->setDesc(get(getOpcode(ARMII::Bcc)));
340 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
341 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
345 int PIdx = MI->findFirstPredOperandIdx();
347 MachineOperand &PMO = MI->getOperand(PIdx);
348 PMO.setImm(Pred[0].getImm());
349 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
355 bool ARMBaseInstrInfo::
356 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
357 const SmallVectorImpl<MachineOperand> &Pred2) const {
358 if (Pred1.size() > 2 || Pred2.size() > 2)
361 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
362 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
372 return CC2 == ARMCC::HI;
374 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
376 return CC2 == ARMCC::GT;
378 return CC2 == ARMCC::LT;
382 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
383 std::vector<MachineOperand> &Pred) const {
384 const TargetInstrDesc &TID = MI->getDesc();
385 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
389 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
390 const MachineOperand &MO = MI->getOperand(i);
391 if (MO.isReg() && MO.getReg() == ARM::CPSR) {
401 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
402 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
403 unsigned JTI) DISABLE_INLINE;
404 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
406 return JT[JTI].MBBs.size();
409 /// GetInstSize - Return the size of the specified MachineInstr.
411 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
412 const MachineBasicBlock &MBB = *MI->getParent();
413 const MachineFunction *MF = MBB.getParent();
414 const TargetAsmInfo *TAI = MF->getTarget().getTargetAsmInfo();
416 // Basic size info comes from the TSFlags field.
417 const TargetInstrDesc &TID = MI->getDesc();
418 unsigned TSFlags = TID.TSFlags;
420 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
422 // If this machine instr is an inline asm, measure it.
423 if (MI->getOpcode() == ARM::INLINEASM)
424 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
427 switch (MI->getOpcode()) {
429 llvm_unreachable("Unknown or unset size field for instr!");
430 case TargetInstrInfo::IMPLICIT_DEF:
431 case TargetInstrInfo::DECLARE:
432 case TargetInstrInfo::DBG_LABEL:
433 case TargetInstrInfo::EH_LABEL:
438 case ARMII::Size8Bytes: return 8; // ARM instruction x 2.
439 case ARMII::Size4Bytes: return 4; // ARM / Thumb2 instruction.
440 case ARMII::Size2Bytes: return 2; // Thumb1 instruction.
441 case ARMII::SizeSpecial: {
442 bool IsThumb1JT = false;
443 switch (MI->getOpcode()) {
444 case ARM::CONSTPOOL_ENTRY:
445 // If this machine instr is a constant pool entry, its size is recorded as
447 return MI->getOperand(2).getImm();
448 case ARM::Int_eh_sjlj_setjmp:
457 // These are jumptable branches, i.e. a branch followed by an inlined
458 // jumptable. The size is 4 + 4 * number of entries.
459 unsigned NumOps = TID.getNumOperands();
460 MachineOperand JTOP =
461 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
462 unsigned JTI = JTOP.getIndex();
463 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
464 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
465 assert(JTI < JT.size());
466 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
467 // 4 aligned. The assembler / linker may add 2 byte padding just before
468 // the JT entries. The size does not include this padding; the
469 // constant islands pass does separate bookkeeping for it.
470 // FIXME: If we know the size of the function is less than (1 << 16) *2
471 // bytes, we can use 16-bit entries instead. Then there won't be an
473 return getNumJTEntries(JT, JTI) * 4 + (IsThumb1JT ? 2 : 4);
476 // Otherwise, pseudo-instruction sizes are zero.
481 return 0; // Not reached
484 /// Return true if the instruction is a register to register move and
485 /// leave the source and dest operands in the passed parameters.
488 ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
489 unsigned &SrcReg, unsigned &DstReg,
490 unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
491 SrcSubIdx = DstSubIdx = 0; // No sub-registers.
493 switch (MI.getOpcode()) {
499 SrcReg = MI.getOperand(1).getReg();
500 DstReg = MI.getOperand(0).getReg();
505 case ARM::tMOVgpr2tgpr:
506 case ARM::tMOVtgpr2gpr:
507 case ARM::tMOVgpr2gpr:
509 assert(MI.getDesc().getNumOperands() >= 2 &&
510 MI.getOperand(0).isReg() &&
511 MI.getOperand(1).isReg() &&
512 "Invalid ARM MOV instruction");
513 SrcReg = MI.getOperand(1).getReg();
514 DstReg = MI.getOperand(0).getReg();
523 ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
524 int &FrameIndex) const {
525 switch (MI->getOpcode()) {
528 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
529 if (MI->getOperand(1).isFI() &&
530 MI->getOperand(2).isReg() &&
531 MI->getOperand(3).isImm() &&
532 MI->getOperand(2).getReg() == 0 &&
533 MI->getOperand(3).getImm() == 0) {
534 FrameIndex = MI->getOperand(1).getIndex();
535 return MI->getOperand(0).getReg();
540 if (MI->getOperand(1).isFI() &&
541 MI->getOperand(2).isImm() &&
542 MI->getOperand(2).getImm() == 0) {
543 FrameIndex = MI->getOperand(1).getIndex();
544 return MI->getOperand(0).getReg();
549 if (MI->getOperand(1).isFI() &&
550 MI->getOperand(2).isImm() &&
551 MI->getOperand(2).getImm() == 0) {
552 FrameIndex = MI->getOperand(1).getIndex();
553 return MI->getOperand(0).getReg();
562 ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
563 int &FrameIndex) const {
564 switch (MI->getOpcode()) {
567 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
568 if (MI->getOperand(1).isFI() &&
569 MI->getOperand(2).isReg() &&
570 MI->getOperand(3).isImm() &&
571 MI->getOperand(2).getReg() == 0 &&
572 MI->getOperand(3).getImm() == 0) {
573 FrameIndex = MI->getOperand(1).getIndex();
574 return MI->getOperand(0).getReg();
579 if (MI->getOperand(1).isFI() &&
580 MI->getOperand(2).isImm() &&
581 MI->getOperand(2).getImm() == 0) {
582 FrameIndex = MI->getOperand(1).getIndex();
583 return MI->getOperand(0).getReg();
588 if (MI->getOperand(1).isFI() &&
589 MI->getOperand(2).isImm() &&
590 MI->getOperand(2).getImm() == 0) {
591 FrameIndex = MI->getOperand(1).getIndex();
592 return MI->getOperand(0).getReg();
601 ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
602 MachineBasicBlock::iterator I,
603 unsigned DestReg, unsigned SrcReg,
604 const TargetRegisterClass *DestRC,
605 const TargetRegisterClass *SrcRC) const {
606 DebugLoc DL = DebugLoc::getUnknownLoc();
607 if (I != MBB.end()) DL = I->getDebugLoc();
609 if (DestRC != SrcRC) {
610 // Not yet supported!
614 if (DestRC == ARM::GPRRegisterClass)
615 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::MOVr)),
616 DestReg).addReg(SrcReg)));
617 else if (DestRC == ARM::SPRRegisterClass)
618 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
620 else if (DestRC == ARM::DPRRegisterClass)
621 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYD), DestReg)
623 else if (DestRC == ARM::QPRRegisterClass)
624 BuildMI(MBB, I, DL, get(ARM::VMOVQ), DestReg).addReg(SrcReg);
631 void ARMBaseInstrInfo::
632 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
633 unsigned SrcReg, bool isKill, int FI,
634 const TargetRegisterClass *RC) const {
635 DebugLoc DL = DebugLoc::getUnknownLoc();
636 if (I != MBB.end()) DL = I->getDebugLoc();
638 if (RC == ARM::GPRRegisterClass) {
639 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::STRrr)))
640 .addReg(SrcReg, getKillRegState(isKill))
641 .addFrameIndex(FI).addReg(0).addImm(0));
642 } else if (RC == ARM::DPRRegisterClass) {
643 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
644 .addReg(SrcReg, getKillRegState(isKill))
645 .addFrameIndex(FI).addImm(0));
647 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
648 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTS))
649 .addReg(SrcReg, getKillRegState(isKill))
650 .addFrameIndex(FI).addImm(0));
654 void ARMBaseInstrInfo::
655 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
656 unsigned DestReg, int FI,
657 const TargetRegisterClass *RC) const {
658 DebugLoc DL = DebugLoc::getUnknownLoc();
659 if (I != MBB.end()) DL = I->getDebugLoc();
661 if (RC == ARM::GPRRegisterClass) {
662 AddDefaultPred(BuildMI(MBB, I, DL, get(getOpcode(ARMII::LDRrr)), DestReg)
663 .addFrameIndex(FI).addReg(0).addImm(0));
664 } else if (RC == ARM::DPRRegisterClass) {
665 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
666 .addFrameIndex(FI).addImm(0));
668 assert(RC == ARM::SPRRegisterClass && "Unknown regclass!");
669 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDS), DestReg)
670 .addFrameIndex(FI).addImm(0));
674 MachineInstr *ARMBaseInstrInfo::
675 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
676 const SmallVectorImpl<unsigned> &Ops, int FI) const {
677 if (Ops.size() != 1) return NULL;
679 unsigned OpNum = Ops[0];
680 unsigned Opc = MI->getOpcode();
681 MachineInstr *NewMI = NULL;
682 if (Opc == getOpcode(ARMII::MOVr)) {
683 // If it is updating CPSR, then it cannot be folded.
684 if (MI->getOperand(4).getReg() != ARM::CPSR) {
685 unsigned Pred = MI->getOperand(2).getImm();
686 unsigned PredReg = MI->getOperand(3).getReg();
687 if (OpNum == 0) { // move -> store
688 unsigned SrcReg = MI->getOperand(1).getReg();
689 bool isKill = MI->getOperand(1).isKill();
690 bool isUndef = MI->getOperand(1).isUndef();
691 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::STRrr)))
692 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
693 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
694 } else { // move -> load
695 unsigned DstReg = MI->getOperand(0).getReg();
696 bool isDead = MI->getOperand(0).isDead();
697 bool isUndef = MI->getOperand(0).isUndef();
698 NewMI = BuildMI(MF, MI->getDebugLoc(), get(getOpcode(ARMII::LDRrr)))
701 getDeadRegState(isDead) |
702 getUndefRegState(isUndef))
703 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
707 else if (Opc == ARM::FCPYS) {
708 unsigned Pred = MI->getOperand(2).getImm();
709 unsigned PredReg = MI->getOperand(3).getReg();
710 if (OpNum == 0) { // move -> store
711 unsigned SrcReg = MI->getOperand(1).getReg();
712 bool isKill = MI->getOperand(1).isKill();
713 bool isUndef = MI->getOperand(1).isUndef();
714 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTS))
715 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
717 .addImm(0).addImm(Pred).addReg(PredReg);
718 } else { // move -> load
719 unsigned DstReg = MI->getOperand(0).getReg();
720 bool isDead = MI->getOperand(0).isDead();
721 bool isUndef = MI->getOperand(0).isUndef();
722 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDS))
725 getDeadRegState(isDead) |
726 getUndefRegState(isUndef))
727 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
730 else if (Opc == ARM::FCPYD) {
731 unsigned Pred = MI->getOperand(2).getImm();
732 unsigned PredReg = MI->getOperand(3).getReg();
733 if (OpNum == 0) { // move -> store
734 unsigned SrcReg = MI->getOperand(1).getReg();
735 bool isKill = MI->getOperand(1).isKill();
736 bool isUndef = MI->getOperand(1).isUndef();
737 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FSTD))
738 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef))
739 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
740 } else { // move -> load
741 unsigned DstReg = MI->getOperand(0).getReg();
742 bool isDead = MI->getOperand(0).isDead();
743 bool isUndef = MI->getOperand(0).isUndef();
744 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::FLDD))
747 getDeadRegState(isDead) |
748 getUndefRegState(isUndef))
749 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
757 ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
759 const SmallVectorImpl<unsigned> &Ops,
760 MachineInstr* LoadMI) const {
765 ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
766 const SmallVectorImpl<unsigned> &Ops) const {
767 if (Ops.size() != 1) return false;
769 unsigned Opc = MI->getOpcode();
770 if (Opc == getOpcode(ARMII::MOVr)) {
771 // If it is updating CPSR, then it cannot be folded.
772 return MI->getOperand(4).getReg() != ARM::CPSR;
773 } else if (Opc == ARM::FCPYS || Opc == ARM::FCPYD) {
775 } else if (Opc == ARM::VMOVD || Opc == ARM::VMOVQ) {
776 return false; // FIXME