1 //===- ARMBaseInstrInfo.cpp - ARM Instruction Information -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Base ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseInstrInfo.h"
16 #include "ARMAddressingModes.h"
17 #include "ARMConstantPoolValue.h"
18 #include "ARMGenInstrInfo.inc"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMRegisterInfo.h"
21 #include "llvm/Constants.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/CodeGen/LiveVariables.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineJumpTableInfo.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
39 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
40 cl::desc("Enable ARM 2-addr to 3-addr conv"));
43 PredicateNEON("predicate-neon", cl::Hidden,
44 cl::desc("Allow NEON instructions to be predicated"));
46 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
47 : TargetInstrInfoImpl(ARMInsts, array_lengthof(ARMInsts)),
52 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
53 MachineBasicBlock::iterator &MBBI,
54 LiveVariables *LV) const {
55 // FIXME: Thumb2 support.
60 MachineInstr *MI = MBBI;
61 MachineFunction &MF = *MI->getParent()->getParent();
62 unsigned TSFlags = MI->getDesc().TSFlags;
64 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
66 case ARMII::IndexModePre:
69 case ARMII::IndexModePost:
73 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
75 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
79 MachineInstr *UpdateMI = NULL;
80 MachineInstr *MemMI = NULL;
81 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
82 const TargetInstrDesc &TID = MI->getDesc();
83 unsigned NumOps = TID.getNumOperands();
84 bool isLoad = !TID.mayStore();
85 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
86 const MachineOperand &Base = MI->getOperand(2);
87 const MachineOperand &Offset = MI->getOperand(NumOps-3);
88 unsigned WBReg = WB.getReg();
89 unsigned BaseReg = Base.getReg();
90 unsigned OffReg = Offset.getReg();
91 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
92 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
95 assert(false && "Unknown indexed op!");
97 case ARMII::AddrMode2: {
98 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
99 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
101 if (ARM_AM::getSOImmVal(Amt) == -1)
102 // Can't encode it in a so_imm operand. This transformation will
103 // add more than 1 instruction. Abandon!
105 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
106 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
107 .addReg(BaseReg).addImm(Amt)
108 .addImm(Pred).addReg(0).addReg(0);
109 } else if (Amt != 0) {
110 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
111 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
112 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
113 get(isSub ? ARM::SUBrs : ARM::ADDrs), WBReg)
114 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
115 .addImm(Pred).addReg(0).addReg(0);
117 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
118 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
119 .addReg(BaseReg).addReg(OffReg)
120 .addImm(Pred).addReg(0).addReg(0);
123 case ARMII::AddrMode3 : {
124 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
125 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
127 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
128 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
129 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
130 .addReg(BaseReg).addImm(Amt)
131 .addImm(Pred).addReg(0).addReg(0);
133 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
134 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
135 .addReg(BaseReg).addReg(OffReg)
136 .addImm(Pred).addReg(0).addReg(0);
141 std::vector<MachineInstr*> NewMIs;
144 MemMI = BuildMI(MF, MI->getDebugLoc(),
145 get(MemOpc), MI->getOperand(0).getReg())
146 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
148 MemMI = BuildMI(MF, MI->getDebugLoc(),
149 get(MemOpc)).addReg(MI->getOperand(1).getReg())
150 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
151 NewMIs.push_back(MemMI);
152 NewMIs.push_back(UpdateMI);
155 MemMI = BuildMI(MF, MI->getDebugLoc(),
156 get(MemOpc), MI->getOperand(0).getReg())
157 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
159 MemMI = BuildMI(MF, MI->getDebugLoc(),
160 get(MemOpc)).addReg(MI->getOperand(1).getReg())
161 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
163 UpdateMI->getOperand(0).setIsDead();
164 NewMIs.push_back(UpdateMI);
165 NewMIs.push_back(MemMI);
168 // Transfer LiveVariables states, kill / dead info.
170 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
171 MachineOperand &MO = MI->getOperand(i);
172 if (MO.isReg() && MO.getReg() &&
173 TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
174 unsigned Reg = MO.getReg();
176 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
178 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
180 LV->addVirtualRegisterDead(Reg, NewMI);
182 if (MO.isUse() && MO.isKill()) {
183 for (unsigned j = 0; j < 2; ++j) {
184 // Look at the two new MI's in reverse order.
185 MachineInstr *NewMI = NewMIs[j];
186 if (!NewMI->readsRegister(Reg))
188 LV->addVirtualRegisterKilled(Reg, NewMI);
189 if (VI.removeKill(MI))
190 VI.Kills.push_back(NewMI);
198 MFI->insert(MBBI, NewMIs[1]);
199 MFI->insert(MBBI, NewMIs[0]);
205 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
206 MachineBasicBlock *&FBB,
207 SmallVectorImpl<MachineOperand> &Cond,
208 bool AllowModify) const {
209 // If the block has no terminators, it just falls into the block after it.
210 MachineBasicBlock::iterator I = MBB.end();
211 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
214 // Get the last instruction in the block.
215 MachineInstr *LastInst = I;
217 // If there is only one terminator instruction, process it.
218 unsigned LastOpc = LastInst->getOpcode();
219 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
220 if (isUncondBranchOpcode(LastOpc)) {
221 TBB = LastInst->getOperand(0).getMBB();
224 if (isCondBranchOpcode(LastOpc)) {
225 // Block ends with fall-through condbranch.
226 TBB = LastInst->getOperand(0).getMBB();
227 Cond.push_back(LastInst->getOperand(1));
228 Cond.push_back(LastInst->getOperand(2));
231 return true; // Can't handle indirect branch.
234 // Get the instruction before it if it is a terminator.
235 MachineInstr *SecondLastInst = I;
237 // If there are three terminators, we don't know what sort of block this is.
238 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
241 // If the block ends with a B and a Bcc, handle it.
242 unsigned SecondLastOpc = SecondLastInst->getOpcode();
243 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
244 TBB = SecondLastInst->getOperand(0).getMBB();
245 Cond.push_back(SecondLastInst->getOperand(1));
246 Cond.push_back(SecondLastInst->getOperand(2));
247 FBB = LastInst->getOperand(0).getMBB();
251 // If the block ends with two unconditional branches, handle it. The second
252 // one is not executed, so remove it.
253 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
254 TBB = SecondLastInst->getOperand(0).getMBB();
257 I->eraseFromParent();
261 // ...likewise if it ends with a branch table followed by an unconditional
262 // branch. The branch folder can create these, and we must get rid of them for
263 // correctness of Thumb constant islands.
264 if ((isJumpTableBranchOpcode(SecondLastOpc) ||
265 isIndirectBranchOpcode(SecondLastOpc)) &&
266 isUncondBranchOpcode(LastOpc)) {
269 I->eraseFromParent();
273 // Otherwise, can't handle this.
278 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
279 MachineBasicBlock::iterator I = MBB.end();
280 if (I == MBB.begin()) return 0;
282 if (!isUncondBranchOpcode(I->getOpcode()) &&
283 !isCondBranchOpcode(I->getOpcode()))
286 // Remove the branch.
287 I->eraseFromParent();
291 if (I == MBB.begin()) return 1;
293 if (!isCondBranchOpcode(I->getOpcode()))
296 // Remove the branch.
297 I->eraseFromParent();
302 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
303 MachineBasicBlock *FBB,
304 const SmallVectorImpl<MachineOperand> &Cond) const {
305 // FIXME this should probably have a DebugLoc argument
306 DebugLoc dl = DebugLoc::getUnknownLoc();
308 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
309 int BOpc = !AFI->isThumbFunction()
310 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
311 int BccOpc = !AFI->isThumbFunction()
312 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
314 // Shouldn't be a fall through.
315 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
316 assert((Cond.size() == 2 || Cond.size() == 0) &&
317 "ARM branch conditions have two components!");
320 if (Cond.empty()) // Unconditional branch?
321 BuildMI(&MBB, dl, get(BOpc)).addMBB(TBB);
323 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
324 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
328 // Two-way conditional branch.
329 BuildMI(&MBB, dl, get(BccOpc)).addMBB(TBB)
330 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
331 BuildMI(&MBB, dl, get(BOpc)).addMBB(FBB);
335 bool ARMBaseInstrInfo::
336 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
337 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
338 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
342 bool ARMBaseInstrInfo::
343 PredicateInstruction(MachineInstr *MI,
344 const SmallVectorImpl<MachineOperand> &Pred) const {
345 unsigned Opc = MI->getOpcode();
346 if (isUncondBranchOpcode(Opc)) {
347 MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
348 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
349 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
353 int PIdx = MI->findFirstPredOperandIdx();
355 MachineOperand &PMO = MI->getOperand(PIdx);
356 PMO.setImm(Pred[0].getImm());
357 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
363 bool ARMBaseInstrInfo::
364 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
365 const SmallVectorImpl<MachineOperand> &Pred2) const {
366 if (Pred1.size() > 2 || Pred2.size() > 2)
369 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
370 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
380 return CC2 == ARMCC::HI;
382 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
384 return CC2 == ARMCC::GT;
386 return CC2 == ARMCC::LT;
390 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
391 std::vector<MachineOperand> &Pred) const {
392 // FIXME: This confuses implicit_def with optional CPSR def.
393 const TargetInstrDesc &TID = MI->getDesc();
394 if (!TID.getImplicitDefs() && !TID.hasOptionalDef())
398 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
399 const MachineOperand &MO = MI->getOperand(i);
400 if (MO.isReg() && MO.getReg() == ARM::CPSR) {
409 /// isPredicable - Return true if the specified instruction can be predicated.
410 /// By default, this returns true for every instruction with a
411 /// PredicateOperand.
412 bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
413 const TargetInstrDesc &TID = MI->getDesc();
414 if (!TID.isPredicable())
417 if ((TID.TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
418 ARMFunctionInfo *AFI =
419 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
420 return PredicateNEON && AFI->isThumb2Function();
425 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
426 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
427 unsigned JTI) DISABLE_INLINE;
428 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
430 return JT[JTI].MBBs.size();
433 /// GetInstSize - Return the size of the specified MachineInstr.
435 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
436 const MachineBasicBlock &MBB = *MI->getParent();
437 const MachineFunction *MF = MBB.getParent();
438 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
440 // Basic size info comes from the TSFlags field.
441 const TargetInstrDesc &TID = MI->getDesc();
442 unsigned TSFlags = TID.TSFlags;
444 unsigned Opc = MI->getOpcode();
445 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
447 // If this machine instr is an inline asm, measure it.
448 if (MI->getOpcode() == ARM::INLINEASM)
449 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
454 llvm_unreachable("Unknown or unset size field for instr!");
455 case TargetInstrInfo::IMPLICIT_DEF:
456 case TargetInstrInfo::KILL:
457 case TargetInstrInfo::DBG_LABEL:
458 case TargetInstrInfo::EH_LABEL:
463 case ARMII::Size8Bytes: return 8; // ARM instruction x 2.
464 case ARMII::Size4Bytes: return 4; // ARM / Thumb2 instruction.
465 case ARMII::Size2Bytes: return 2; // Thumb1 instruction.
466 case ARMII::SizeSpecial: {
468 case ARM::CONSTPOOL_ENTRY:
469 // If this machine instr is a constant pool entry, its size is recorded as
471 return MI->getOperand(2).getImm();
472 case ARM::Int_eh_sjlj_setjmp:
474 case ARM::t2Int_eh_sjlj_setjmp:
483 // These are jumptable branches, i.e. a branch followed by an inlined
484 // jumptable. The size is 4 + 4 * number of entries. For TBB, each
485 // entry is one byte; TBH two byte each.
486 unsigned EntrySize = (Opc == ARM::t2TBB)
487 ? 1 : ((Opc == ARM::t2TBH) ? 2 : 4);
488 unsigned NumOps = TID.getNumOperands();
489 MachineOperand JTOP =
490 MI->getOperand(NumOps - (TID.isPredicable() ? 3 : 2));
491 unsigned JTI = JTOP.getIndex();
492 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
493 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
494 assert(JTI < JT.size());
495 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
496 // 4 aligned. The assembler / linker may add 2 byte padding just before
497 // the JT entries. The size does not include this padding; the
498 // constant islands pass does separate bookkeeping for it.
499 // FIXME: If we know the size of the function is less than (1 << 16) *2
500 // bytes, we can use 16-bit entries instead. Then there won't be an
502 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
503 unsigned NumEntries = getNumJTEntries(JT, JTI);
504 if (Opc == ARM::t2TBB && (NumEntries & 1))
505 // Make sure the instruction that follows TBB is 2-byte aligned.
506 // FIXME: Constant island pass should insert an "ALIGN" instruction
509 return NumEntries * EntrySize + InstSize;
512 // Otherwise, pseudo-instruction sizes are zero.
517 return 0; // Not reached
520 /// Return true if the instruction is a register to register move and
521 /// leave the source and dest operands in the passed parameters.
524 ARMBaseInstrInfo::isMoveInstr(const MachineInstr &MI,
525 unsigned &SrcReg, unsigned &DstReg,
526 unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
527 SrcSubIdx = DstSubIdx = 0; // No sub-registers.
529 switch (MI.getOpcode()) {
535 SrcReg = MI.getOperand(1).getReg();
536 DstReg = MI.getOperand(0).getReg();
541 case ARM::tMOVgpr2tgpr:
542 case ARM::tMOVtgpr2gpr:
543 case ARM::tMOVgpr2gpr:
545 assert(MI.getDesc().getNumOperands() >= 2 &&
546 MI.getOperand(0).isReg() &&
547 MI.getOperand(1).isReg() &&
548 "Invalid ARM MOV instruction");
549 SrcReg = MI.getOperand(1).getReg();
550 DstReg = MI.getOperand(0).getReg();
559 ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
560 int &FrameIndex) const {
561 switch (MI->getOpcode()) {
564 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
565 if (MI->getOperand(1).isFI() &&
566 MI->getOperand(2).isReg() &&
567 MI->getOperand(3).isImm() &&
568 MI->getOperand(2).getReg() == 0 &&
569 MI->getOperand(3).getImm() == 0) {
570 FrameIndex = MI->getOperand(1).getIndex();
571 return MI->getOperand(0).getReg();
576 if (MI->getOperand(1).isFI() &&
577 MI->getOperand(2).isImm() &&
578 MI->getOperand(2).getImm() == 0) {
579 FrameIndex = MI->getOperand(1).getIndex();
580 return MI->getOperand(0).getReg();
585 if (MI->getOperand(1).isFI() &&
586 MI->getOperand(2).isImm() &&
587 MI->getOperand(2).getImm() == 0) {
588 FrameIndex = MI->getOperand(1).getIndex();
589 return MI->getOperand(0).getReg();
598 ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
599 int &FrameIndex) const {
600 switch (MI->getOpcode()) {
603 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
604 if (MI->getOperand(1).isFI() &&
605 MI->getOperand(2).isReg() &&
606 MI->getOperand(3).isImm() &&
607 MI->getOperand(2).getReg() == 0 &&
608 MI->getOperand(3).getImm() == 0) {
609 FrameIndex = MI->getOperand(1).getIndex();
610 return MI->getOperand(0).getReg();
615 if (MI->getOperand(1).isFI() &&
616 MI->getOperand(2).isImm() &&
617 MI->getOperand(2).getImm() == 0) {
618 FrameIndex = MI->getOperand(1).getIndex();
619 return MI->getOperand(0).getReg();
624 if (MI->getOperand(1).isFI() &&
625 MI->getOperand(2).isImm() &&
626 MI->getOperand(2).getImm() == 0) {
627 FrameIndex = MI->getOperand(1).getIndex();
628 return MI->getOperand(0).getReg();
637 ARMBaseInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
638 MachineBasicBlock::iterator I,
639 unsigned DestReg, unsigned SrcReg,
640 const TargetRegisterClass *DestRC,
641 const TargetRegisterClass *SrcRC) const {
642 DebugLoc DL = DebugLoc::getUnknownLoc();
643 if (I != MBB.end()) DL = I->getDebugLoc();
645 if (DestRC != SrcRC) {
646 if (DestRC->getSize() != SrcRC->getSize())
649 // Allow DPR / DPR_VFP2 / DPR_8 cross-class copies.
650 // Allow QPR / QPR_VFP2 / QPR_8 cross-class copies.
651 if (DestRC->getSize() != 8 && DestRC->getSize() != 16)
655 if (DestRC == ARM::GPRRegisterClass) {
656 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr),
657 DestReg).addReg(SrcReg)));
658 } else if (DestRC == ARM::SPRRegisterClass) {
659 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVS), DestReg)
661 } else if (DestRC == ARM::DPRRegisterClass) {
662 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVD), DestReg)
664 } else if (DestRC == ARM::DPR_VFP2RegisterClass ||
665 DestRC == ARM::DPR_8RegisterClass ||
666 SrcRC == ARM::DPR_VFP2RegisterClass ||
667 SrcRC == ARM::DPR_8RegisterClass) {
668 // Always use neon reg-reg move if source or dest is NEON-only regclass.
669 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVDneon),
670 DestReg).addReg(SrcReg));
671 } else if (DestRC == ARM::QPRRegisterClass ||
672 DestRC == ARM::QPR_VFP2RegisterClass ||
673 DestRC == ARM::QPR_8RegisterClass) {
674 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VMOVQ),
675 DestReg).addReg(SrcReg));
683 void ARMBaseInstrInfo::
684 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
685 unsigned SrcReg, bool isKill, int FI,
686 const TargetRegisterClass *RC) const {
687 DebugLoc DL = DebugLoc::getUnknownLoc();
688 if (I != MBB.end()) DL = I->getDebugLoc();
689 MachineFunction &MF = *MBB.getParent();
690 MachineFrameInfo &MFI = *MF.getFrameInfo();
691 unsigned Align = MFI.getObjectAlignment(FI);
693 MachineMemOperand *MMO =
694 MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
695 MachineMemOperand::MOStore, 0,
696 MFI.getObjectSize(FI),
699 if (RC == ARM::GPRRegisterClass) {
700 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
701 .addReg(SrcReg, getKillRegState(isKill))
702 .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
703 } else if (RC == ARM::DPRRegisterClass ||
704 RC == ARM::DPR_VFP2RegisterClass ||
705 RC == ARM::DPR_8RegisterClass) {
706 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
707 .addReg(SrcReg, getKillRegState(isKill))
708 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
709 } else if (RC == ARM::SPRRegisterClass) {
710 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
711 .addReg(SrcReg, getKillRegState(isKill))
712 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
714 assert((RC == ARM::QPRRegisterClass ||
715 RC == ARM::QPR_VFP2RegisterClass) && "Unknown regclass!");
716 // FIXME: Neon instructions should support predicates
718 && (getRegisterInfo().needsStackRealignment(MF))) {
719 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
720 .addFrameIndex(FI).addImm(0).addImm(0).addImm(128)
722 .addReg(SrcReg, getKillRegState(isKill)));
724 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRQ)).
725 addReg(SrcReg, getKillRegState(isKill))
726 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
731 void ARMBaseInstrInfo::
732 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
733 unsigned DestReg, int FI,
734 const TargetRegisterClass *RC) const {
735 DebugLoc DL = DebugLoc::getUnknownLoc();
736 if (I != MBB.end()) DL = I->getDebugLoc();
737 MachineFunction &MF = *MBB.getParent();
738 MachineFrameInfo &MFI = *MF.getFrameInfo();
739 unsigned Align = MFI.getObjectAlignment(FI);
741 MachineMemOperand *MMO =
742 MF.getMachineMemOperand(PseudoSourceValue::getFixedStack(FI),
743 MachineMemOperand::MOLoad, 0,
744 MFI.getObjectSize(FI),
747 if (RC == ARM::GPRRegisterClass) {
748 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
749 .addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO));
750 } else if (RC == ARM::DPRRegisterClass ||
751 RC == ARM::DPR_VFP2RegisterClass ||
752 RC == ARM::DPR_8RegisterClass) {
753 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
754 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
755 } else if (RC == ARM::SPRRegisterClass) {
756 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
757 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
759 assert((RC == ARM::QPRRegisterClass ||
760 RC == ARM::QPR_VFP2RegisterClass ||
761 RC == ARM::QPR_8RegisterClass) && "Unknown regclass!");
762 // FIXME: Neon instructions should support predicates
764 && (getRegisterInfo().needsStackRealignment(MF))) {
765 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
766 .addFrameIndex(FI).addImm(0).addImm(0).addImm(128)
767 .addMemOperand(MMO));
769 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRQ), DestReg)
770 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
775 MachineInstr *ARMBaseInstrInfo::
776 foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
777 const SmallVectorImpl<unsigned> &Ops, int FI) const {
778 if (Ops.size() != 1) return NULL;
780 unsigned OpNum = Ops[0];
781 unsigned Opc = MI->getOpcode();
782 MachineInstr *NewMI = NULL;
783 if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
784 // If it is updating CPSR, then it cannot be folded.
785 if (MI->getOperand(4).getReg() == ARM::CPSR && !MI->getOperand(4).isDead())
787 unsigned Pred = MI->getOperand(2).getImm();
788 unsigned PredReg = MI->getOperand(3).getReg();
789 if (OpNum == 0) { // move -> store
790 unsigned SrcReg = MI->getOperand(1).getReg();
791 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
792 bool isKill = MI->getOperand(1).isKill();
793 bool isUndef = MI->getOperand(1).isUndef();
794 if (Opc == ARM::MOVr)
795 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::STR))
797 getKillRegState(isKill) | getUndefRegState(isUndef),
799 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
801 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
803 getKillRegState(isKill) | getUndefRegState(isUndef),
805 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
806 } else { // move -> load
807 unsigned DstReg = MI->getOperand(0).getReg();
808 unsigned DstSubReg = MI->getOperand(0).getSubReg();
809 bool isDead = MI->getOperand(0).isDead();
810 bool isUndef = MI->getOperand(0).isUndef();
811 if (Opc == ARM::MOVr)
812 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::LDR))
815 getDeadRegState(isDead) |
816 getUndefRegState(isUndef), DstSubReg)
817 .addFrameIndex(FI).addReg(0).addImm(0).addImm(Pred).addReg(PredReg);
819 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
822 getDeadRegState(isDead) |
823 getUndefRegState(isUndef), DstSubReg)
824 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
826 } else if (Opc == ARM::tMOVgpr2gpr ||
827 Opc == ARM::tMOVtgpr2gpr ||
828 Opc == ARM::tMOVgpr2tgpr) {
829 if (OpNum == 0) { // move -> store
830 unsigned SrcReg = MI->getOperand(1).getReg();
831 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
832 bool isKill = MI->getOperand(1).isKill();
833 bool isUndef = MI->getOperand(1).isUndef();
834 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2STRi12))
836 getKillRegState(isKill) | getUndefRegState(isUndef),
838 .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
839 } else { // move -> load
840 unsigned DstReg = MI->getOperand(0).getReg();
841 unsigned DstSubReg = MI->getOperand(0).getSubReg();
842 bool isDead = MI->getOperand(0).isDead();
843 bool isUndef = MI->getOperand(0).isUndef();
844 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::t2LDRi12))
847 getDeadRegState(isDead) |
848 getUndefRegState(isUndef),
850 .addFrameIndex(FI).addImm(0).addImm(ARMCC::AL).addReg(0);
852 } else if (Opc == ARM::VMOVS) {
853 unsigned Pred = MI->getOperand(2).getImm();
854 unsigned PredReg = MI->getOperand(3).getReg();
855 if (OpNum == 0) { // move -> store
856 unsigned SrcReg = MI->getOperand(1).getReg();
857 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
858 bool isKill = MI->getOperand(1).isKill();
859 bool isUndef = MI->getOperand(1).isUndef();
860 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRS))
861 .addReg(SrcReg, getKillRegState(isKill) | getUndefRegState(isUndef),
864 .addImm(0).addImm(Pred).addReg(PredReg);
865 } else { // move -> load
866 unsigned DstReg = MI->getOperand(0).getReg();
867 unsigned DstSubReg = MI->getOperand(0).getSubReg();
868 bool isDead = MI->getOperand(0).isDead();
869 bool isUndef = MI->getOperand(0).isUndef();
870 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRS))
873 getDeadRegState(isDead) |
874 getUndefRegState(isUndef),
876 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
879 else if (Opc == ARM::VMOVD) {
880 unsigned Pred = MI->getOperand(2).getImm();
881 unsigned PredReg = MI->getOperand(3).getReg();
882 if (OpNum == 0) { // move -> store
883 unsigned SrcReg = MI->getOperand(1).getReg();
884 unsigned SrcSubReg = MI->getOperand(1).getSubReg();
885 bool isKill = MI->getOperand(1).isKill();
886 bool isUndef = MI->getOperand(1).isUndef();
887 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VSTRD))
889 getKillRegState(isKill) | getUndefRegState(isUndef),
891 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
892 } else { // move -> load
893 unsigned DstReg = MI->getOperand(0).getReg();
894 unsigned DstSubReg = MI->getOperand(0).getSubReg();
895 bool isDead = MI->getOperand(0).isDead();
896 bool isUndef = MI->getOperand(0).isUndef();
897 NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::VLDRD))
900 getDeadRegState(isDead) |
901 getUndefRegState(isUndef),
903 .addFrameIndex(FI).addImm(0).addImm(Pred).addReg(PredReg);
911 ARMBaseInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
913 const SmallVectorImpl<unsigned> &Ops,
914 MachineInstr* LoadMI) const {
920 ARMBaseInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
921 const SmallVectorImpl<unsigned> &Ops) const {
922 if (Ops.size() != 1) return false;
924 unsigned Opc = MI->getOpcode();
925 if (Opc == ARM::MOVr || Opc == ARM::t2MOVr) {
926 // If it is updating CPSR, then it cannot be folded.
927 return MI->getOperand(4).getReg() != ARM::CPSR ||
928 MI->getOperand(4).isDead();
929 } else if (Opc == ARM::tMOVgpr2gpr ||
930 Opc == ARM::tMOVtgpr2gpr ||
931 Opc == ARM::tMOVgpr2tgpr) {
933 } else if (Opc == ARM::VMOVS || Opc == ARM::VMOVD) {
935 } else if (Opc == ARM::VMOVDneon || Opc == ARM::VMOVQ) {
936 return false; // FIXME
942 void ARMBaseInstrInfo::
943 reMaterialize(MachineBasicBlock &MBB,
944 MachineBasicBlock::iterator I,
945 unsigned DestReg, unsigned SubIdx,
946 const MachineInstr *Orig,
947 const TargetRegisterInfo *TRI) const {
948 DebugLoc dl = Orig->getDebugLoc();
950 if (SubIdx && TargetRegisterInfo::isPhysicalRegister(DestReg)) {
951 DestReg = TRI->getSubReg(DestReg, SubIdx);
955 unsigned Opcode = Orig->getOpcode();
958 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
959 MI->getOperand(0).setReg(DestReg);
963 case ARM::tLDRpci_pic:
964 case ARM::t2LDRpci_pic: {
965 MachineFunction &MF = *MBB.getParent();
966 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
967 MachineConstantPool *MCP = MF.getConstantPool();
968 unsigned CPI = Orig->getOperand(1).getIndex();
969 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
970 assert(MCPE.isMachineConstantPoolEntry() &&
971 "Expecting a machine constantpool entry!");
972 ARMConstantPoolValue *ACPV =
973 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
974 unsigned PCLabelId = AFI->createConstPoolEntryUId();
975 ARMConstantPoolValue *NewCPV = 0;
976 if (ACPV->isGlobalValue())
977 NewCPV = new ARMConstantPoolValue(ACPV->getGV(), PCLabelId,
979 else if (ACPV->isExtSymbol())
980 NewCPV = new ARMConstantPoolValue(MF.getFunction()->getContext(),
981 ACPV->getSymbol(), PCLabelId, 4);
982 else if (ACPV->isBlockAddress())
983 NewCPV = new ARMConstantPoolValue(ACPV->getBlockAddress(), PCLabelId,
984 ARMCP::CPBlockAddress, 4);
986 llvm_unreachable("Unexpected ARM constantpool value type!!");
987 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
988 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
990 .addConstantPoolIndex(CPI).addImm(PCLabelId);
991 (*MIB).setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
996 MachineInstr *NewMI = prior(I);
997 NewMI->getOperand(0).setSubReg(SubIdx);
1000 bool ARMBaseInstrInfo::isIdentical(const MachineInstr *MI0,
1001 const MachineInstr *MI1,
1002 const MachineRegisterInfo *MRI) const {
1003 int Opcode = MI0->getOpcode();
1004 if (Opcode == ARM::t2LDRpci ||
1005 Opcode == ARM::t2LDRpci_pic ||
1006 Opcode == ARM::tLDRpci ||
1007 Opcode == ARM::tLDRpci_pic) {
1008 if (MI1->getOpcode() != Opcode)
1010 if (MI0->getNumOperands() != MI1->getNumOperands())
1013 const MachineOperand &MO0 = MI0->getOperand(1);
1014 const MachineOperand &MO1 = MI1->getOperand(1);
1015 if (MO0.getOffset() != MO1.getOffset())
1018 const MachineFunction *MF = MI0->getParent()->getParent();
1019 const MachineConstantPool *MCP = MF->getConstantPool();
1020 int CPI0 = MO0.getIndex();
1021 int CPI1 = MO1.getIndex();
1022 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1023 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1024 ARMConstantPoolValue *ACPV0 =
1025 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1026 ARMConstantPoolValue *ACPV1 =
1027 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1028 return ACPV0->hasSameValue(ACPV1);
1031 return TargetInstrInfoImpl::isIdentical(MI0, MI1, MRI);
1034 unsigned ARMBaseInstrInfo::TailDuplicationLimit(const MachineBasicBlock &MBB,
1035 unsigned DefaultLimit) const {
1036 // If the target processor can predict indirect branches, it is highly
1037 // desirable to duplicate them, since it can often make them predictable.
1038 if (!MBB.empty() && isIndirectBranchOpcode(MBB.back().getOpcode()) &&
1039 getSubtarget().hasBranchTargetBuffer())
1040 return DefaultLimit + 2;
1041 return DefaultLimit;
1044 /// getInstrPredicate - If instruction is predicated, returns its predicate
1045 /// condition, otherwise returns AL. It also returns the condition code
1046 /// register by reference.
1048 llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1049 int PIdx = MI->findFirstPredOperandIdx();
1055 PredReg = MI->getOperand(PIdx+1).getReg();
1056 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1060 int llvm::getMatchingCondBranchOpcode(int Opc) {
1063 else if (Opc == ARM::tB)
1065 else if (Opc == ARM::t2B)
1068 llvm_unreachable("Unknown unconditional branch opcode!");
1073 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1074 MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1075 unsigned DestReg, unsigned BaseReg, int NumBytes,
1076 ARMCC::CondCodes Pred, unsigned PredReg,
1077 const ARMBaseInstrInfo &TII) {
1078 bool isSub = NumBytes < 0;
1079 if (isSub) NumBytes = -NumBytes;
1082 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1083 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1084 assert(ThisVal && "Didn't extract field correctly");
1086 // We will handle these bits from offset, clear them.
1087 NumBytes &= ~ThisVal;
1089 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1091 // Build the new ADD / SUB.
1092 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1093 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1094 .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1095 .addImm((unsigned)Pred).addReg(PredReg).addReg(0);
1100 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1101 unsigned FrameReg, int &Offset,
1102 const ARMBaseInstrInfo &TII) {
1103 unsigned Opcode = MI.getOpcode();
1104 const TargetInstrDesc &Desc = MI.getDesc();
1105 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1108 // Memory operands in inline assembly always use AddrMode2.
1109 if (Opcode == ARM::INLINEASM)
1110 AddrMode = ARMII::AddrMode2;
1112 if (Opcode == ARM::ADDri) {
1113 Offset += MI.getOperand(FrameRegIdx+1).getImm();
1115 // Turn it into a move.
1116 MI.setDesc(TII.get(ARM::MOVr));
1117 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1118 MI.RemoveOperand(FrameRegIdx+1);
1121 } else if (Offset < 0) {
1124 MI.setDesc(TII.get(ARM::SUBri));
1127 // Common case: small offset, fits into instruction.
1128 if (ARM_AM::getSOImmVal(Offset) != -1) {
1129 // Replace the FrameIndex with sp / fp
1130 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1131 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1136 // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1138 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1139 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1141 // We will handle these bits from offset, clear them.
1142 Offset &= ~ThisImmVal;
1144 // Get the properly encoded SOImmVal field.
1145 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1146 "Bit extraction didn't work?");
1147 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1149 unsigned ImmIdx = 0;
1151 unsigned NumBits = 0;
1154 case ARMII::AddrMode2: {
1155 ImmIdx = FrameRegIdx+2;
1156 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1157 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1162 case ARMII::AddrMode3: {
1163 ImmIdx = FrameRegIdx+2;
1164 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1165 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1170 case ARMII::AddrMode4:
1171 case ARMII::AddrMode6:
1172 // Can't fold any offset even if it's zero.
1174 case ARMII::AddrMode5: {
1175 ImmIdx = FrameRegIdx+1;
1176 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1177 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1184 llvm_unreachable("Unsupported addressing mode!");
1188 Offset += InstrOffs * Scale;
1189 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1195 // Attempt to fold address comp. if opcode has offset bits
1197 // Common case: small offset, fits into instruction.
1198 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1199 int ImmedOffset = Offset / Scale;
1200 unsigned Mask = (1 << NumBits) - 1;
1201 if ((unsigned)Offset <= Mask * Scale) {
1202 // Replace the FrameIndex with sp
1203 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1205 ImmedOffset |= 1 << NumBits;
1206 ImmOp.ChangeToImmediate(ImmedOffset);
1211 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1212 ImmedOffset = ImmedOffset & Mask;
1214 ImmedOffset |= 1 << NumBits;
1215 ImmOp.ChangeToImmediate(ImmedOffset);
1216 Offset &= ~(Mask*Scale);
1220 Offset = (isSub) ? -Offset : Offset;