1 //===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Base ARM implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMConstantPoolValue.h"
18 #include "ARMHazardRecognizer.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "MCTargetDesc/ARMAddressingModes.h"
21 #include "llvm/Constants.h"
22 #include "llvm/Function.h"
23 #include "llvm/GlobalValue.h"
24 #include "llvm/CodeGen/LiveVariables.h"
25 #include "llvm/CodeGen/MachineConstantPool.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineJumpTableInfo.h"
29 #include "llvm/CodeGen/MachineMemOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/SelectionDAGNodes.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/Support/BranchProbability.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/ADT/STLExtras.h"
39 #define GET_INSTRINFO_CTOR
40 #include "ARMGenInstrInfo.inc"
45 EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
46 cl::desc("Enable ARM 2-addr to 3-addr conv"));
49 WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
50 cl::desc("Widen ARM vmovs to vmovd when possible"));
52 /// ARM_MLxEntry - Record information about MLA / MLS instructions.
54 uint16_t MLxOpc; // MLA / MLS opcode
55 uint16_t MulOpc; // Expanded multiplication opcode
56 uint16_t AddSubOpc; // Expanded add / sub opcode
57 bool NegAcc; // True if the acc is negated before the add / sub.
58 bool HasLane; // True if instruction has an extra "lane" operand.
61 static const ARM_MLxEntry ARM_MLxTable[] = {
62 // MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
64 { ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
65 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
66 { ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
67 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
68 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
69 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
70 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
71 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
74 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
75 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
76 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
77 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
78 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
79 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
80 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
81 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
84 ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
85 : ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
87 for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
88 if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
89 assert(false && "Duplicated entries?");
90 MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
91 MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
95 // Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
96 // currently defaults to no prepass hazard recognizer.
97 ScheduleHazardRecognizer *ARMBaseInstrInfo::
98 CreateTargetHazardRecognizer(const TargetMachine *TM,
99 const ScheduleDAG *DAG) const {
100 if (usePreRAHazardRecognizer()) {
101 const InstrItineraryData *II = TM->getInstrItineraryData();
102 return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
104 return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG);
107 ScheduleHazardRecognizer *ARMBaseInstrInfo::
108 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
109 const ScheduleDAG *DAG) const {
110 if (Subtarget.isThumb2() || Subtarget.hasVFP2())
111 return (ScheduleHazardRecognizer *)
112 new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG);
113 return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG);
117 ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
118 MachineBasicBlock::iterator &MBBI,
119 LiveVariables *LV) const {
120 // FIXME: Thumb2 support.
125 MachineInstr *MI = MBBI;
126 MachineFunction &MF = *MI->getParent()->getParent();
127 uint64_t TSFlags = MI->getDesc().TSFlags;
129 switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
130 default: return NULL;
131 case ARMII::IndexModePre:
134 case ARMII::IndexModePost:
138 // Try splitting an indexed load/store to an un-indexed one plus an add/sub
140 unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
144 MachineInstr *UpdateMI = NULL;
145 MachineInstr *MemMI = NULL;
146 unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
147 const MCInstrDesc &MCID = MI->getDesc();
148 unsigned NumOps = MCID.getNumOperands();
149 bool isLoad = !MI->mayStore();
150 const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
151 const MachineOperand &Base = MI->getOperand(2);
152 const MachineOperand &Offset = MI->getOperand(NumOps-3);
153 unsigned WBReg = WB.getReg();
154 unsigned BaseReg = Base.getReg();
155 unsigned OffReg = Offset.getReg();
156 unsigned OffImm = MI->getOperand(NumOps-2).getImm();
157 ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
159 default: llvm_unreachable("Unknown indexed op!");
160 case ARMII::AddrMode2: {
161 bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
162 unsigned Amt = ARM_AM::getAM2Offset(OffImm);
164 if (ARM_AM::getSOImmVal(Amt) == -1)
165 // Can't encode it in a so_imm operand. This transformation will
166 // add more than 1 instruction. Abandon!
168 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
169 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
170 .addReg(BaseReg).addImm(Amt)
171 .addImm(Pred).addReg(0).addReg(0);
172 } else if (Amt != 0) {
173 ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
174 unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
175 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
176 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
177 .addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
178 .addImm(Pred).addReg(0).addReg(0);
180 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
181 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
182 .addReg(BaseReg).addReg(OffReg)
183 .addImm(Pred).addReg(0).addReg(0);
186 case ARMII::AddrMode3 : {
187 bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
188 unsigned Amt = ARM_AM::getAM3Offset(OffImm);
190 // Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
191 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
192 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
193 .addReg(BaseReg).addImm(Amt)
194 .addImm(Pred).addReg(0).addReg(0);
196 UpdateMI = BuildMI(MF, MI->getDebugLoc(),
197 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
198 .addReg(BaseReg).addReg(OffReg)
199 .addImm(Pred).addReg(0).addReg(0);
204 std::vector<MachineInstr*> NewMIs;
207 MemMI = BuildMI(MF, MI->getDebugLoc(),
208 get(MemOpc), MI->getOperand(0).getReg())
209 .addReg(WBReg).addImm(0).addImm(Pred);
211 MemMI = BuildMI(MF, MI->getDebugLoc(),
212 get(MemOpc)).addReg(MI->getOperand(1).getReg())
213 .addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
214 NewMIs.push_back(MemMI);
215 NewMIs.push_back(UpdateMI);
218 MemMI = BuildMI(MF, MI->getDebugLoc(),
219 get(MemOpc), MI->getOperand(0).getReg())
220 .addReg(BaseReg).addImm(0).addImm(Pred);
222 MemMI = BuildMI(MF, MI->getDebugLoc(),
223 get(MemOpc)).addReg(MI->getOperand(1).getReg())
224 .addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
226 UpdateMI->getOperand(0).setIsDead();
227 NewMIs.push_back(UpdateMI);
228 NewMIs.push_back(MemMI);
231 // Transfer LiveVariables states, kill / dead info.
233 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
234 MachineOperand &MO = MI->getOperand(i);
235 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
236 unsigned Reg = MO.getReg();
238 LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
240 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
242 LV->addVirtualRegisterDead(Reg, NewMI);
244 if (MO.isUse() && MO.isKill()) {
245 for (unsigned j = 0; j < 2; ++j) {
246 // Look at the two new MI's in reverse order.
247 MachineInstr *NewMI = NewMIs[j];
248 if (!NewMI->readsRegister(Reg))
250 LV->addVirtualRegisterKilled(Reg, NewMI);
251 if (VI.removeKill(MI))
252 VI.Kills.push_back(NewMI);
260 MFI->insert(MBBI, NewMIs[1]);
261 MFI->insert(MBBI, NewMIs[0]);
267 ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
268 MachineBasicBlock *&FBB,
269 SmallVectorImpl<MachineOperand> &Cond,
270 bool AllowModify) const {
271 // If the block has no terminators, it just falls into the block after it.
272 MachineBasicBlock::iterator I = MBB.end();
273 if (I == MBB.begin())
276 while (I->isDebugValue()) {
277 if (I == MBB.begin())
281 if (!isUnpredicatedTerminator(I))
284 // Get the last instruction in the block.
285 MachineInstr *LastInst = I;
287 // If there is only one terminator instruction, process it.
288 unsigned LastOpc = LastInst->getOpcode();
289 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
290 if (isUncondBranchOpcode(LastOpc)) {
291 TBB = LastInst->getOperand(0).getMBB();
294 if (isCondBranchOpcode(LastOpc)) {
295 // Block ends with fall-through condbranch.
296 TBB = LastInst->getOperand(0).getMBB();
297 Cond.push_back(LastInst->getOperand(1));
298 Cond.push_back(LastInst->getOperand(2));
301 return true; // Can't handle indirect branch.
304 // Get the instruction before it if it is a terminator.
305 MachineInstr *SecondLastInst = I;
306 unsigned SecondLastOpc = SecondLastInst->getOpcode();
308 // If AllowModify is true and the block ends with two or more unconditional
309 // branches, delete all but the first unconditional branch.
310 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
311 while (isUncondBranchOpcode(SecondLastOpc)) {
312 LastInst->eraseFromParent();
313 LastInst = SecondLastInst;
314 LastOpc = LastInst->getOpcode();
315 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
316 // Return now the only terminator is an unconditional branch.
317 TBB = LastInst->getOperand(0).getMBB();
321 SecondLastOpc = SecondLastInst->getOpcode();
326 // If there are three terminators, we don't know what sort of block this is.
327 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
330 // If the block ends with a B and a Bcc, handle it.
331 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
332 TBB = SecondLastInst->getOperand(0).getMBB();
333 Cond.push_back(SecondLastInst->getOperand(1));
334 Cond.push_back(SecondLastInst->getOperand(2));
335 FBB = LastInst->getOperand(0).getMBB();
339 // If the block ends with two unconditional branches, handle it. The second
340 // one is not executed, so remove it.
341 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
342 TBB = SecondLastInst->getOperand(0).getMBB();
345 I->eraseFromParent();
349 // ...likewise if it ends with a branch table followed by an unconditional
350 // branch. The branch folder can create these, and we must get rid of them for
351 // correctness of Thumb constant islands.
352 if ((isJumpTableBranchOpcode(SecondLastOpc) ||
353 isIndirectBranchOpcode(SecondLastOpc)) &&
354 isUncondBranchOpcode(LastOpc)) {
357 I->eraseFromParent();
361 // Otherwise, can't handle this.
366 unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
367 MachineBasicBlock::iterator I = MBB.end();
368 if (I == MBB.begin()) return 0;
370 while (I->isDebugValue()) {
371 if (I == MBB.begin())
375 if (!isUncondBranchOpcode(I->getOpcode()) &&
376 !isCondBranchOpcode(I->getOpcode()))
379 // Remove the branch.
380 I->eraseFromParent();
384 if (I == MBB.begin()) return 1;
386 if (!isCondBranchOpcode(I->getOpcode()))
389 // Remove the branch.
390 I->eraseFromParent();
395 ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
396 MachineBasicBlock *FBB,
397 const SmallVectorImpl<MachineOperand> &Cond,
399 ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
400 int BOpc = !AFI->isThumbFunction()
401 ? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
402 int BccOpc = !AFI->isThumbFunction()
403 ? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
404 bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
406 // Shouldn't be a fall through.
407 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
408 assert((Cond.size() == 2 || Cond.size() == 0) &&
409 "ARM branch conditions have two components!");
412 if (Cond.empty()) { // Unconditional branch?
414 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0);
416 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
418 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
419 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
423 // Two-way conditional branch.
424 BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
425 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
427 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0);
429 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
433 bool ARMBaseInstrInfo::
434 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
435 ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
436 Cond[0].setImm(ARMCC::getOppositeCondition(CC));
440 bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
441 if (MI->isBundle()) {
442 MachineBasicBlock::const_instr_iterator I = MI;
443 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
444 while (++I != E && I->isInsideBundle()) {
445 int PIdx = I->findFirstPredOperandIdx();
446 if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
452 int PIdx = MI->findFirstPredOperandIdx();
453 return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
456 bool ARMBaseInstrInfo::
457 PredicateInstruction(MachineInstr *MI,
458 const SmallVectorImpl<MachineOperand> &Pred) const {
459 unsigned Opc = MI->getOpcode();
460 if (isUncondBranchOpcode(Opc)) {
461 MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
462 MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
463 MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
467 int PIdx = MI->findFirstPredOperandIdx();
469 MachineOperand &PMO = MI->getOperand(PIdx);
470 PMO.setImm(Pred[0].getImm());
471 MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
477 bool ARMBaseInstrInfo::
478 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
479 const SmallVectorImpl<MachineOperand> &Pred2) const {
480 if (Pred1.size() > 2 || Pred2.size() > 2)
483 ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
484 ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
494 return CC2 == ARMCC::HI;
496 return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
498 return CC2 == ARMCC::GT;
500 return CC2 == ARMCC::LT;
504 bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
505 std::vector<MachineOperand> &Pred) const {
507 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
508 const MachineOperand &MO = MI->getOperand(i);
509 if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
510 (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
519 /// isPredicable - Return true if the specified instruction can be predicated.
520 /// By default, this returns true for every instruction with a
521 /// PredicateOperand.
522 bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
523 if (!MI->isPredicable())
526 if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
527 ARMFunctionInfo *AFI =
528 MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
529 return AFI->isThumb2Function();
534 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
535 LLVM_ATTRIBUTE_NOINLINE
536 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
538 static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
540 assert(JTI < JT.size());
541 return JT[JTI].MBBs.size();
544 /// GetInstSize - Return the size of the specified MachineInstr.
546 unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
547 const MachineBasicBlock &MBB = *MI->getParent();
548 const MachineFunction *MF = MBB.getParent();
549 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
551 const MCInstrDesc &MCID = MI->getDesc();
553 return MCID.getSize();
555 // If this machine instr is an inline asm, measure it.
556 if (MI->getOpcode() == ARM::INLINEASM)
557 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
560 unsigned Opc = MI->getOpcode();
562 case TargetOpcode::IMPLICIT_DEF:
563 case TargetOpcode::KILL:
564 case TargetOpcode::PROLOG_LABEL:
565 case TargetOpcode::EH_LABEL:
566 case TargetOpcode::DBG_VALUE:
568 case TargetOpcode::BUNDLE:
569 return getInstBundleLength(MI);
570 case ARM::MOVi16_ga_pcrel:
571 case ARM::MOVTi16_ga_pcrel:
572 case ARM::t2MOVi16_ga_pcrel:
573 case ARM::t2MOVTi16_ga_pcrel:
576 case ARM::t2MOVi32imm:
578 case ARM::CONSTPOOL_ENTRY:
579 // If this machine instr is a constant pool entry, its size is recorded as
581 return MI->getOperand(2).getImm();
582 case ARM::Int_eh_sjlj_longjmp:
584 case ARM::tInt_eh_sjlj_longjmp:
586 case ARM::Int_eh_sjlj_setjmp:
587 case ARM::Int_eh_sjlj_setjmp_nofp:
589 case ARM::tInt_eh_sjlj_setjmp:
590 case ARM::t2Int_eh_sjlj_setjmp:
591 case ARM::t2Int_eh_sjlj_setjmp_nofp:
599 case ARM::t2TBH_JT: {
600 // These are jumptable branches, i.e. a branch followed by an inlined
601 // jumptable. The size is 4 + 4 * number of entries. For TBB, each
602 // entry is one byte; TBH two byte each.
603 unsigned EntrySize = (Opc == ARM::t2TBB_JT)
604 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
605 unsigned NumOps = MCID.getNumOperands();
606 MachineOperand JTOP =
607 MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
608 unsigned JTI = JTOP.getIndex();
609 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
611 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
612 assert(JTI < JT.size());
613 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
614 // 4 aligned. The assembler / linker may add 2 byte padding just before
615 // the JT entries. The size does not include this padding; the
616 // constant islands pass does separate bookkeeping for it.
617 // FIXME: If we know the size of the function is less than (1 << 16) *2
618 // bytes, we can use 16-bit entries instead. Then there won't be an
620 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
621 unsigned NumEntries = getNumJTEntries(JT, JTI);
622 if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
623 // Make sure the instruction that follows TBB is 2-byte aligned.
624 // FIXME: Constant island pass should insert an "ALIGN" instruction
627 return NumEntries * EntrySize + InstSize;
630 // Otherwise, pseudo-instruction sizes are zero.
635 unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const {
637 MachineBasicBlock::const_instr_iterator I = MI;
638 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
639 while (++I != E && I->isInsideBundle()) {
640 assert(!I->isBundle() && "No nested bundle!");
641 Size += GetInstSizeInBytes(&*I);
646 void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
647 MachineBasicBlock::iterator I, DebugLoc DL,
648 unsigned DestReg, unsigned SrcReg,
649 bool KillSrc) const {
650 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
651 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
653 if (GPRDest && GPRSrc) {
654 AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
655 .addReg(SrcReg, getKillRegState(KillSrc))));
659 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
660 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
663 if (SPRDest && SPRSrc)
665 else if (GPRDest && SPRSrc)
667 else if (SPRDest && GPRSrc)
669 else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
671 else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
675 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
676 MIB.addReg(SrcReg, getKillRegState(KillSrc));
677 if (Opc == ARM::VORRq)
678 MIB.addReg(SrcReg, getKillRegState(KillSrc));
683 // Handle register classes that require multiple instructions.
684 unsigned BeginIdx = 0;
685 unsigned SubRegs = 0;
688 // Use VORRq when possible.
689 if (ARM::QQPRRegClass.contains(DestReg, SrcReg))
690 Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 2;
691 else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg))
692 Opc = ARM::VORRq, BeginIdx = ARM::qsub_0, SubRegs = 4;
693 // Fall back to VMOVD.
694 else if (ARM::DPairRegClass.contains(DestReg, SrcReg))
695 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2;
696 else if (ARM::DTripleRegClass.contains(DestReg, SrcReg))
697 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3;
698 else if (ARM::DQuadRegClass.contains(DestReg, SrcReg))
699 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4;
701 else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg))
702 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 2, Spacing = 2;
703 else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg))
704 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 3, Spacing = 2;
705 else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg))
706 Opc = ARM::VMOVD, BeginIdx = ARM::dsub_0, SubRegs = 4, Spacing = 2;
708 assert(Opc && "Impossible reg-to-reg copy");
710 const TargetRegisterInfo *TRI = &getRegisterInfo();
711 MachineInstrBuilder Mov;
713 // Copy register tuples backward when the first Dest reg overlaps with SrcReg.
714 if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
715 BeginIdx = BeginIdx + ((SubRegs-1)*Spacing);
719 SmallSet<unsigned, 4> DstRegs;
721 for (unsigned i = 0; i != SubRegs; ++i) {
722 unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i*Spacing);
723 unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i*Spacing);
724 assert(Dst && Src && "Bad sub-register");
726 assert(!DstRegs.count(Src) && "destructive vector copy");
729 Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst)
731 // VORR takes two source operands.
732 if (Opc == ARM::VORRq)
734 Mov = AddDefaultPred(Mov);
736 // Add implicit super-register defs and kills to the last instruction.
737 Mov->addRegisterDefined(DestReg, TRI);
739 Mov->addRegisterKilled(SrcReg, TRI);
743 MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB,
744 unsigned Reg, unsigned SubIdx, unsigned State,
745 const TargetRegisterInfo *TRI) {
747 return MIB.addReg(Reg, State);
749 if (TargetRegisterInfo::isPhysicalRegister(Reg))
750 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
751 return MIB.addReg(Reg, State, SubIdx);
754 void ARMBaseInstrInfo::
755 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
756 unsigned SrcReg, bool isKill, int FI,
757 const TargetRegisterClass *RC,
758 const TargetRegisterInfo *TRI) const {
760 if (I != MBB.end()) DL = I->getDebugLoc();
761 MachineFunction &MF = *MBB.getParent();
762 MachineFrameInfo &MFI = *MF.getFrameInfo();
763 unsigned Align = MFI.getObjectAlignment(FI);
765 MachineMemOperand *MMO =
766 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
767 MachineMemOperand::MOStore,
768 MFI.getObjectSize(FI),
771 switch (RC->getSize()) {
773 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
774 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12))
775 .addReg(SrcReg, getKillRegState(isKill))
776 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
777 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
778 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
779 .addReg(SrcReg, getKillRegState(isKill))
780 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
782 llvm_unreachable("Unknown reg class!");
785 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
786 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
787 .addReg(SrcReg, getKillRegState(isKill))
788 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
790 llvm_unreachable("Unknown reg class!");
793 if (ARM::DPairRegClass.hasSubClassEq(RC)) {
794 // Use aligned spills if the stack can be realigned.
795 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
796 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
797 .addFrameIndex(FI).addImm(16)
798 .addReg(SrcReg, getKillRegState(isKill))
799 .addMemOperand(MMO));
801 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA))
802 .addReg(SrcReg, getKillRegState(isKill))
804 .addMemOperand(MMO));
807 llvm_unreachable("Unknown reg class!");
810 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
811 // Use aligned spills if the stack can be realigned.
812 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
813 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo))
814 .addFrameIndex(FI).addImm(16)
815 .addReg(SrcReg, getKillRegState(isKill))
816 .addMemOperand(MMO));
818 MachineInstrBuilder MIB =
819 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
822 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
823 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
824 AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
827 llvm_unreachable("Unknown reg class!");
830 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
831 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
832 // FIXME: It's possible to only store part of the QQ register if the
833 // spilled def has a sub-register index.
834 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo))
835 .addFrameIndex(FI).addImm(16)
836 .addReg(SrcReg, getKillRegState(isKill))
837 .addMemOperand(MMO));
839 MachineInstrBuilder MIB =
840 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
843 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
844 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
845 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
846 AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
849 llvm_unreachable("Unknown reg class!");
852 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
853 MachineInstrBuilder MIB =
854 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
857 MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
858 MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
859 MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
860 MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
861 MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
862 MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
863 MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
864 AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
866 llvm_unreachable("Unknown reg class!");
869 llvm_unreachable("Unknown reg class!");
874 ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
875 int &FrameIndex) const {
876 switch (MI->getOpcode()) {
879 case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
880 if (MI->getOperand(1).isFI() &&
881 MI->getOperand(2).isReg() &&
882 MI->getOperand(3).isImm() &&
883 MI->getOperand(2).getReg() == 0 &&
884 MI->getOperand(3).getImm() == 0) {
885 FrameIndex = MI->getOperand(1).getIndex();
886 return MI->getOperand(0).getReg();
894 if (MI->getOperand(1).isFI() &&
895 MI->getOperand(2).isImm() &&
896 MI->getOperand(2).getImm() == 0) {
897 FrameIndex = MI->getOperand(1).getIndex();
898 return MI->getOperand(0).getReg();
902 case ARM::VST1d64TPseudo:
903 case ARM::VST1d64QPseudo:
904 if (MI->getOperand(0).isFI() &&
905 MI->getOperand(2).getSubReg() == 0) {
906 FrameIndex = MI->getOperand(0).getIndex();
907 return MI->getOperand(2).getReg();
911 if (MI->getOperand(1).isFI() &&
912 MI->getOperand(0).getSubReg() == 0) {
913 FrameIndex = MI->getOperand(1).getIndex();
914 return MI->getOperand(0).getReg();
922 unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
923 int &FrameIndex) const {
924 const MachineMemOperand *Dummy;
925 return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
928 void ARMBaseInstrInfo::
929 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
930 unsigned DestReg, int FI,
931 const TargetRegisterClass *RC,
932 const TargetRegisterInfo *TRI) const {
934 if (I != MBB.end()) DL = I->getDebugLoc();
935 MachineFunction &MF = *MBB.getParent();
936 MachineFrameInfo &MFI = *MF.getFrameInfo();
937 unsigned Align = MFI.getObjectAlignment(FI);
938 MachineMemOperand *MMO =
939 MF.getMachineMemOperand(
940 MachinePointerInfo::getFixedStack(FI),
941 MachineMemOperand::MOLoad,
942 MFI.getObjectSize(FI),
945 switch (RC->getSize()) {
947 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
948 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
949 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
951 } else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
952 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
953 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
955 llvm_unreachable("Unknown reg class!");
958 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
959 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
960 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
962 llvm_unreachable("Unknown reg class!");
965 if (ARM::DPairRegClass.hasSubClassEq(RC)) {
966 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
967 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
968 .addFrameIndex(FI).addImm(16)
969 .addMemOperand(MMO));
971 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
973 .addMemOperand(MMO));
976 llvm_unreachable("Unknown reg class!");
979 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
980 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
981 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
982 .addFrameIndex(FI).addImm(16)
983 .addMemOperand(MMO));
985 MachineInstrBuilder MIB =
986 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
988 .addMemOperand(MMO));
989 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
990 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
991 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
992 if (TargetRegisterInfo::isPhysicalRegister(DestReg))
993 MIB.addReg(DestReg, RegState::ImplicitDefine);
996 llvm_unreachable("Unknown reg class!");
999 if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
1000 if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
1001 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
1002 .addFrameIndex(FI).addImm(16)
1003 .addMemOperand(MMO));
1005 MachineInstrBuilder MIB =
1006 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1008 .addMemOperand(MMO);
1009 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1010 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1011 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1012 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1013 if (TargetRegisterInfo::isPhysicalRegister(DestReg))
1014 MIB.addReg(DestReg, RegState::ImplicitDefine);
1017 llvm_unreachable("Unknown reg class!");
1020 if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1021 MachineInstrBuilder MIB =
1022 AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
1024 .addMemOperand(MMO);
1025 MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
1026 MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
1027 MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
1028 MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
1029 MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
1030 MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
1031 MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
1032 MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
1033 if (TargetRegisterInfo::isPhysicalRegister(DestReg))
1034 MIB.addReg(DestReg, RegState::ImplicitDefine);
1036 llvm_unreachable("Unknown reg class!");
1039 llvm_unreachable("Unknown regclass!");
1044 ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1045 int &FrameIndex) const {
1046 switch (MI->getOpcode()) {
1049 case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
1050 if (MI->getOperand(1).isFI() &&
1051 MI->getOperand(2).isReg() &&
1052 MI->getOperand(3).isImm() &&
1053 MI->getOperand(2).getReg() == 0 &&
1054 MI->getOperand(3).getImm() == 0) {
1055 FrameIndex = MI->getOperand(1).getIndex();
1056 return MI->getOperand(0).getReg();
1064 if (MI->getOperand(1).isFI() &&
1065 MI->getOperand(2).isImm() &&
1066 MI->getOperand(2).getImm() == 0) {
1067 FrameIndex = MI->getOperand(1).getIndex();
1068 return MI->getOperand(0).getReg();
1072 case ARM::VLD1d64TPseudo:
1073 case ARM::VLD1d64QPseudo:
1074 if (MI->getOperand(1).isFI() &&
1075 MI->getOperand(0).getSubReg() == 0) {
1076 FrameIndex = MI->getOperand(1).getIndex();
1077 return MI->getOperand(0).getReg();
1081 if (MI->getOperand(1).isFI() &&
1082 MI->getOperand(0).getSubReg() == 0) {
1083 FrameIndex = MI->getOperand(1).getIndex();
1084 return MI->getOperand(0).getReg();
1092 unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
1093 int &FrameIndex) const {
1094 const MachineMemOperand *Dummy;
1095 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
1098 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
1099 // This hook gets to expand COPY instructions before they become
1100 // copyPhysReg() calls. Look for VMOVS instructions that can legally be
1101 // widened to VMOVD. We prefer the VMOVD when possible because it may be
1102 // changed into a VORR that can go down the NEON pipeline.
1103 if (!WidenVMOVS || !MI->isCopy())
1106 // Look for a copy between even S-registers. That is where we keep floats
1107 // when using NEON v2f32 instructions for f32 arithmetic.
1108 unsigned DstRegS = MI->getOperand(0).getReg();
1109 unsigned SrcRegS = MI->getOperand(1).getReg();
1110 if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
1113 const TargetRegisterInfo *TRI = &getRegisterInfo();
1114 unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1116 unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1118 if (!DstRegD || !SrcRegD)
1121 // We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
1122 // legal if the COPY already defines the full DstRegD, and it isn't a
1123 // sub-register insertion.
1124 if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI))
1127 // A dead copy shouldn't show up here, but reject it just in case.
1128 if (MI->getOperand(0).isDead())
1131 // All clear, widen the COPY.
1132 DEBUG(dbgs() << "widening: " << *MI);
1134 // Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg
1135 // or some other super-register.
1136 int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD);
1137 if (ImpDefIdx != -1)
1138 MI->RemoveOperand(ImpDefIdx);
1140 // Change the opcode and operands.
1141 MI->setDesc(get(ARM::VMOVD));
1142 MI->getOperand(0).setReg(DstRegD);
1143 MI->getOperand(1).setReg(SrcRegD);
1144 AddDefaultPred(MachineInstrBuilder(MI));
1146 // We are now reading SrcRegD instead of SrcRegS. This may upset the
1147 // register scavenger and machine verifier, so we need to indicate that we
1148 // are reading an undefined value from SrcRegD, but a proper value from
1150 MI->getOperand(1).setIsUndef();
1151 MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit);
1153 // SrcRegD may actually contain an unrelated value in the ssub_1
1154 // sub-register. Don't kill it. Only kill the ssub_0 sub-register.
1155 if (MI->getOperand(1).isKill()) {
1156 MI->getOperand(1).setIsKill(false);
1157 MI->addRegisterKilled(SrcRegS, TRI, true);
1160 DEBUG(dbgs() << "replaced by: " << *MI);
1165 ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1166 int FrameIx, uint64_t Offset,
1167 const MDNode *MDPtr,
1168 DebugLoc DL) const {
1169 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
1170 .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
1174 /// Create a copy of a const pool value. Update CPI to the new index and return
1176 static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
1177 MachineConstantPool *MCP = MF.getConstantPool();
1178 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1180 const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
1181 assert(MCPE.isMachineConstantPoolEntry() &&
1182 "Expecting a machine constantpool entry!");
1183 ARMConstantPoolValue *ACPV =
1184 static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
1186 unsigned PCLabelId = AFI->createPICLabelUId();
1187 ARMConstantPoolValue *NewCPV = 0;
1188 // FIXME: The below assumes PIC relocation model and that the function
1189 // is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
1190 // zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
1191 // instructions, so that's probably OK, but is PIC always correct when
1193 if (ACPV->isGlobalValue())
1194 NewCPV = ARMConstantPoolConstant::
1195 Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
1197 else if (ACPV->isExtSymbol())
1198 NewCPV = ARMConstantPoolSymbol::
1199 Create(MF.getFunction()->getContext(),
1200 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1201 else if (ACPV->isBlockAddress())
1202 NewCPV = ARMConstantPoolConstant::
1203 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1204 ARMCP::CPBlockAddress, 4);
1205 else if (ACPV->isLSDA())
1206 NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId,
1208 else if (ACPV->isMachineBasicBlock())
1209 NewCPV = ARMConstantPoolMBB::
1210 Create(MF.getFunction()->getContext(),
1211 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1213 llvm_unreachable("Unexpected ARM constantpool value type!!");
1214 CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
1218 void ARMBaseInstrInfo::
1219 reMaterialize(MachineBasicBlock &MBB,
1220 MachineBasicBlock::iterator I,
1221 unsigned DestReg, unsigned SubIdx,
1222 const MachineInstr *Orig,
1223 const TargetRegisterInfo &TRI) const {
1224 unsigned Opcode = Orig->getOpcode();
1227 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
1228 MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
1232 case ARM::tLDRpci_pic:
1233 case ARM::t2LDRpci_pic: {
1234 MachineFunction &MF = *MBB.getParent();
1235 unsigned CPI = Orig->getOperand(1).getIndex();
1236 unsigned PCLabelId = duplicateCPV(MF, CPI);
1237 MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
1239 .addConstantPoolIndex(CPI).addImm(PCLabelId);
1240 MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
1247 ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
1248 MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
1249 switch(Orig->getOpcode()) {
1250 case ARM::tLDRpci_pic:
1251 case ARM::t2LDRpci_pic: {
1252 unsigned CPI = Orig->getOperand(1).getIndex();
1253 unsigned PCLabelId = duplicateCPV(MF, CPI);
1254 Orig->getOperand(1).setIndex(CPI);
1255 Orig->getOperand(2).setImm(PCLabelId);
1262 bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
1263 const MachineInstr *MI1,
1264 const MachineRegisterInfo *MRI) const {
1265 int Opcode = MI0->getOpcode();
1266 if (Opcode == ARM::t2LDRpci ||
1267 Opcode == ARM::t2LDRpci_pic ||
1268 Opcode == ARM::tLDRpci ||
1269 Opcode == ARM::tLDRpci_pic ||
1270 Opcode == ARM::MOV_ga_dyn ||
1271 Opcode == ARM::MOV_ga_pcrel ||
1272 Opcode == ARM::MOV_ga_pcrel_ldr ||
1273 Opcode == ARM::t2MOV_ga_dyn ||
1274 Opcode == ARM::t2MOV_ga_pcrel) {
1275 if (MI1->getOpcode() != Opcode)
1277 if (MI0->getNumOperands() != MI1->getNumOperands())
1280 const MachineOperand &MO0 = MI0->getOperand(1);
1281 const MachineOperand &MO1 = MI1->getOperand(1);
1282 if (MO0.getOffset() != MO1.getOffset())
1285 if (Opcode == ARM::MOV_ga_dyn ||
1286 Opcode == ARM::MOV_ga_pcrel ||
1287 Opcode == ARM::MOV_ga_pcrel_ldr ||
1288 Opcode == ARM::t2MOV_ga_dyn ||
1289 Opcode == ARM::t2MOV_ga_pcrel)
1290 // Ignore the PC labels.
1291 return MO0.getGlobal() == MO1.getGlobal();
1293 const MachineFunction *MF = MI0->getParent()->getParent();
1294 const MachineConstantPool *MCP = MF->getConstantPool();
1295 int CPI0 = MO0.getIndex();
1296 int CPI1 = MO1.getIndex();
1297 const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
1298 const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
1299 bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
1300 bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
1301 if (isARMCP0 && isARMCP1) {
1302 ARMConstantPoolValue *ACPV0 =
1303 static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
1304 ARMConstantPoolValue *ACPV1 =
1305 static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
1306 return ACPV0->hasSameValue(ACPV1);
1307 } else if (!isARMCP0 && !isARMCP1) {
1308 return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
1311 } else if (Opcode == ARM::PICLDR) {
1312 if (MI1->getOpcode() != Opcode)
1314 if (MI0->getNumOperands() != MI1->getNumOperands())
1317 unsigned Addr0 = MI0->getOperand(1).getReg();
1318 unsigned Addr1 = MI1->getOperand(1).getReg();
1319 if (Addr0 != Addr1) {
1321 !TargetRegisterInfo::isVirtualRegister(Addr0) ||
1322 !TargetRegisterInfo::isVirtualRegister(Addr1))
1325 // This assumes SSA form.
1326 MachineInstr *Def0 = MRI->getVRegDef(Addr0);
1327 MachineInstr *Def1 = MRI->getVRegDef(Addr1);
1328 // Check if the loaded value, e.g. a constantpool of a global address, are
1330 if (!produceSameValue(Def0, Def1, MRI))
1334 for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
1335 // %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
1336 const MachineOperand &MO0 = MI0->getOperand(i);
1337 const MachineOperand &MO1 = MI1->getOperand(i);
1338 if (!MO0.isIdenticalTo(MO1))
1344 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
1347 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
1348 /// determine if two loads are loading from the same base address. It should
1349 /// only return true if the base pointers are the same and the only differences
1350 /// between the two addresses is the offset. It also returns the offsets by
1352 bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
1354 int64_t &Offset2) const {
1355 // Don't worry about Thumb: just ARM and Thumb2.
1356 if (Subtarget.isThumb1Only()) return false;
1358 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
1361 switch (Load1->getMachineOpcode()) {
1374 case ARM::t2LDRSHi8:
1376 case ARM::t2LDRSHi12:
1380 switch (Load2->getMachineOpcode()) {
1393 case ARM::t2LDRSHi8:
1395 case ARM::t2LDRSHi12:
1399 // Check if base addresses and chain operands match.
1400 if (Load1->getOperand(0) != Load2->getOperand(0) ||
1401 Load1->getOperand(4) != Load2->getOperand(4))
1404 // Index should be Reg0.
1405 if (Load1->getOperand(3) != Load2->getOperand(3))
1408 // Determine the offsets.
1409 if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
1410 isa<ConstantSDNode>(Load2->getOperand(1))) {
1411 Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
1412 Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
1419 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
1420 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
1421 /// be scheduled togther. On some targets if two loads are loading from
1422 /// addresses in the same cache line, it's better if they are scheduled
1423 /// together. This function takes two integers that represent the load offsets
1424 /// from the common base address. It returns true if it decides it's desirable
1425 /// to schedule the two loads together. "NumLoads" is the number of loads that
1426 /// have already been scheduled after Load1.
1427 bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
1428 int64_t Offset1, int64_t Offset2,
1429 unsigned NumLoads) const {
1430 // Don't worry about Thumb: just ARM and Thumb2.
1431 if (Subtarget.isThumb1Only()) return false;
1433 assert(Offset2 > Offset1);
1435 if ((Offset2 - Offset1) / 8 > 64)
1438 if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
1439 return false; // FIXME: overly conservative?
1441 // Four loads in a row should be sufficient.
1448 bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
1449 const MachineBasicBlock *MBB,
1450 const MachineFunction &MF) const {
1451 // Debug info is never a scheduling boundary. It's necessary to be explicit
1452 // due to the special treatment of IT instructions below, otherwise a
1453 // dbg_value followed by an IT will result in the IT instruction being
1454 // considered a scheduling hazard, which is wrong. It should be the actual
1455 // instruction preceding the dbg_value instruction(s), just like it is
1456 // when debug info is not present.
1457 if (MI->isDebugValue())
1460 // Terminators and labels can't be scheduled around.
1461 if (MI->isTerminator() || MI->isLabel())
1464 // Treat the start of the IT block as a scheduling boundary, but schedule
1465 // t2IT along with all instructions following it.
1466 // FIXME: This is a big hammer. But the alternative is to add all potential
1467 // true and anti dependencies to IT block instructions as implicit operands
1468 // to the t2IT instruction. The added compile time and complexity does not
1470 MachineBasicBlock::const_iterator I = MI;
1471 // Make sure to skip any dbg_value instructions
1472 while (++I != MBB->end() && I->isDebugValue())
1474 if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
1477 // Don't attempt to schedule around any instruction that defines
1478 // a stack-oriented pointer, as it's unlikely to be profitable. This
1479 // saves compile time, because it doesn't require every single
1480 // stack slot reference to depend on the instruction that does the
1482 // Calls don't actually change the stack pointer, even if they have imp-defs.
1483 // No ARM calling conventions change the stack pointer. (X86 calling
1484 // conventions sometimes do).
1485 if (!MI->isCall() && MI->definesRegister(ARM::SP))
1491 bool ARMBaseInstrInfo::
1492 isProfitableToIfCvt(MachineBasicBlock &MBB,
1493 unsigned NumCycles, unsigned ExtraPredCycles,
1494 const BranchProbability &Probability) const {
1498 // Attempt to estimate the relative costs of predication versus branching.
1499 unsigned UnpredCost = Probability.getNumerator() * NumCycles;
1500 UnpredCost /= Probability.getDenominator();
1501 UnpredCost += 1; // The branch itself
1502 UnpredCost += Subtarget.getMispredictionPenalty() / 10;
1504 return (NumCycles + ExtraPredCycles) <= UnpredCost;
1507 bool ARMBaseInstrInfo::
1508 isProfitableToIfCvt(MachineBasicBlock &TMBB,
1509 unsigned TCycles, unsigned TExtra,
1510 MachineBasicBlock &FMBB,
1511 unsigned FCycles, unsigned FExtra,
1512 const BranchProbability &Probability) const {
1513 if (!TCycles || !FCycles)
1516 // Attempt to estimate the relative costs of predication versus branching.
1517 unsigned TUnpredCost = Probability.getNumerator() * TCycles;
1518 TUnpredCost /= Probability.getDenominator();
1520 uint32_t Comp = Probability.getDenominator() - Probability.getNumerator();
1521 unsigned FUnpredCost = Comp * FCycles;
1522 FUnpredCost /= Probability.getDenominator();
1524 unsigned UnpredCost = TUnpredCost + FUnpredCost;
1525 UnpredCost += 1; // The branch itself
1526 UnpredCost += Subtarget.getMispredictionPenalty() / 10;
1528 return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost;
1531 /// getInstrPredicate - If instruction is predicated, returns its predicate
1532 /// condition, otherwise returns AL. It also returns the condition code
1533 /// register by reference.
1535 llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
1536 int PIdx = MI->findFirstPredOperandIdx();
1542 PredReg = MI->getOperand(PIdx+1).getReg();
1543 return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
1547 int llvm::getMatchingCondBranchOpcode(int Opc) {
1552 if (Opc == ARM::t2B)
1555 llvm_unreachable("Unknown unconditional branch opcode!");
1558 /// commuteInstruction - Handle commutable instructions.
1560 ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
1561 switch (MI->getOpcode()) {
1563 case ARM::t2MOVCCr: {
1564 // MOVCC can be commuted by inverting the condition.
1565 unsigned PredReg = 0;
1566 ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
1567 // MOVCC AL can't be inverted. Shouldn't happen.
1568 if (CC == ARMCC::AL || PredReg != ARM::CPSR)
1570 MI = TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
1573 // After swapping the MOVCC operands, also invert the condition.
1574 MI->getOperand(MI->findFirstPredOperandIdx())
1575 .setImm(ARMCC::getOppositeCondition(CC));
1579 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
1582 /// Identify instructions that can be folded into a MOVCC instruction, and
1583 /// return the corresponding opcode for the predicated pseudo-instruction.
1584 static unsigned canFoldIntoMOVCC(unsigned Reg, MachineInstr *&MI,
1585 const MachineRegisterInfo &MRI) {
1586 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1588 if (!MRI.hasOneNonDBGUse(Reg))
1590 MI = MRI.getVRegDef(Reg);
1593 // Check if MI has any non-dead defs or physreg uses. This also detects
1594 // predicated instructions which will be reading CPSR.
1595 for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
1596 const MachineOperand &MO = MI->getOperand(i);
1597 // Reject frame index operands, PEI can't handle the predicated pseudos.
1598 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1602 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1604 if (MO.isDef() && !MO.isDead())
1607 switch (MI->getOpcode()) {
1609 case ARM::ANDri: return ARM::ANDCCri;
1610 case ARM::ANDrr: return ARM::ANDCCrr;
1611 case ARM::ANDrsi: return ARM::ANDCCrsi;
1612 case ARM::ANDrsr: return ARM::ANDCCrsr;
1613 case ARM::t2ANDri: return ARM::t2ANDCCri;
1614 case ARM::t2ANDrr: return ARM::t2ANDCCrr;
1615 case ARM::t2ANDrs: return ARM::t2ANDCCrs;
1616 case ARM::EORri: return ARM::EORCCri;
1617 case ARM::EORrr: return ARM::EORCCrr;
1618 case ARM::EORrsi: return ARM::EORCCrsi;
1619 case ARM::EORrsr: return ARM::EORCCrsr;
1620 case ARM::t2EORri: return ARM::t2EORCCri;
1621 case ARM::t2EORrr: return ARM::t2EORCCrr;
1622 case ARM::t2EORrs: return ARM::t2EORCCrs;
1623 case ARM::ORRri: return ARM::ORRCCri;
1624 case ARM::ORRrr: return ARM::ORRCCrr;
1625 case ARM::ORRrsi: return ARM::ORRCCrsi;
1626 case ARM::ORRrsr: return ARM::ORRCCrsr;
1627 case ARM::t2ORRri: return ARM::t2ORRCCri;
1628 case ARM::t2ORRrr: return ARM::t2ORRCCrr;
1629 case ARM::t2ORRrs: return ARM::t2ORRCCrs;
1632 case ARM::ADDri: return ARM::ADDCCri;
1633 case ARM::ADDrr: return ARM::ADDCCrr;
1634 case ARM::ADDrsi: return ARM::ADDCCrsi;
1635 case ARM::ADDrsr: return ARM::ADDCCrsr;
1636 case ARM::SUBri: return ARM::SUBCCri;
1637 case ARM::SUBrr: return ARM::SUBCCrr;
1638 case ARM::SUBrsi: return ARM::SUBCCrsi;
1639 case ARM::SUBrsr: return ARM::SUBCCrsr;
1642 case ARM::t2ADDri: return ARM::t2ADDCCri;
1643 case ARM::t2ADDri12: return ARM::t2ADDCCri12;
1644 case ARM::t2ADDrr: return ARM::t2ADDCCrr;
1645 case ARM::t2ADDrs: return ARM::t2ADDCCrs;
1646 case ARM::t2SUBri: return ARM::t2SUBCCri;
1647 case ARM::t2SUBri12: return ARM::t2SUBCCri12;
1648 case ARM::t2SUBrr: return ARM::t2SUBCCrr;
1649 case ARM::t2SUBrs: return ARM::t2SUBCCrs;
1653 bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
1654 SmallVectorImpl<MachineOperand> &Cond,
1655 unsigned &TrueOp, unsigned &FalseOp,
1656 bool &Optimizable) const {
1657 assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
1658 "Unknown select instruction");
1663 // 3: Condition code.
1667 Cond.push_back(MI->getOperand(3));
1668 Cond.push_back(MI->getOperand(4));
1669 // We can always fold a def.
1674 MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
1675 bool PreferFalse) const {
1676 assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
1677 "Unknown select instruction");
1678 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
1679 MachineInstr *DefMI = 0;
1680 unsigned Opc = canFoldIntoMOVCC(MI->getOperand(2).getReg(), DefMI, MRI);
1683 Opc = canFoldIntoMOVCC(MI->getOperand(1).getReg(), DefMI, MRI);
1687 // Create a new predicated version of DefMI.
1688 // Rfalse is the first use.
1689 MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
1690 get(Opc), MI->getOperand(0).getReg())
1691 .addOperand(MI->getOperand(Invert ? 2 : 1));
1693 // Copy all the DefMI operands, excluding its (null) predicate.
1694 const MCInstrDesc &DefDesc = DefMI->getDesc();
1695 for (unsigned i = 1, e = DefDesc.getNumOperands();
1696 i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
1697 NewMI.addOperand(DefMI->getOperand(i));
1699 unsigned CondCode = MI->getOperand(3).getImm();
1701 NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
1703 NewMI.addImm(CondCode);
1704 NewMI.addOperand(MI->getOperand(4));
1706 // DefMI is not the -S version that sets CPSR, so add an optional %noreg.
1707 if (NewMI->hasOptionalDef())
1708 AddDefaultCC(NewMI);
1710 // The caller will erase MI, but not DefMI.
1711 DefMI->eraseFromParent();
1715 /// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
1716 /// instruction is encoded with an 'S' bit is determined by the optional CPSR
1719 /// This will go away once we can teach tblgen how to set the optional CPSR def
1721 struct AddSubFlagsOpcodePair {
1723 uint16_t MachineOpc;
1726 static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
1727 {ARM::ADDSri, ARM::ADDri},
1728 {ARM::ADDSrr, ARM::ADDrr},
1729 {ARM::ADDSrsi, ARM::ADDrsi},
1730 {ARM::ADDSrsr, ARM::ADDrsr},
1732 {ARM::SUBSri, ARM::SUBri},
1733 {ARM::SUBSrr, ARM::SUBrr},
1734 {ARM::SUBSrsi, ARM::SUBrsi},
1735 {ARM::SUBSrsr, ARM::SUBrsr},
1737 {ARM::RSBSri, ARM::RSBri},
1738 {ARM::RSBSrsi, ARM::RSBrsi},
1739 {ARM::RSBSrsr, ARM::RSBrsr},
1741 {ARM::t2ADDSri, ARM::t2ADDri},
1742 {ARM::t2ADDSrr, ARM::t2ADDrr},
1743 {ARM::t2ADDSrs, ARM::t2ADDrs},
1745 {ARM::t2SUBSri, ARM::t2SUBri},
1746 {ARM::t2SUBSrr, ARM::t2SUBrr},
1747 {ARM::t2SUBSrs, ARM::t2SUBrs},
1749 {ARM::t2RSBSri, ARM::t2RSBri},
1750 {ARM::t2RSBSrs, ARM::t2RSBrs},
1753 unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
1754 for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
1755 if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
1756 return AddSubFlagsOpcodeMap[i].MachineOpc;
1760 void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
1761 MachineBasicBlock::iterator &MBBI, DebugLoc dl,
1762 unsigned DestReg, unsigned BaseReg, int NumBytes,
1763 ARMCC::CondCodes Pred, unsigned PredReg,
1764 const ARMBaseInstrInfo &TII, unsigned MIFlags) {
1765 bool isSub = NumBytes < 0;
1766 if (isSub) NumBytes = -NumBytes;
1769 unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
1770 unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
1771 assert(ThisVal && "Didn't extract field correctly");
1773 // We will handle these bits from offset, clear them.
1774 NumBytes &= ~ThisVal;
1776 assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
1778 // Build the new ADD / SUB.
1779 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
1780 BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
1781 .addReg(BaseReg, RegState::Kill).addImm(ThisVal)
1782 .addImm((unsigned)Pred).addReg(PredReg).addReg(0)
1783 .setMIFlags(MIFlags);
1788 bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
1789 unsigned FrameReg, int &Offset,
1790 const ARMBaseInstrInfo &TII) {
1791 unsigned Opcode = MI.getOpcode();
1792 const MCInstrDesc &Desc = MI.getDesc();
1793 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1796 // Memory operands in inline assembly always use AddrMode2.
1797 if (Opcode == ARM::INLINEASM)
1798 AddrMode = ARMII::AddrMode2;
1800 if (Opcode == ARM::ADDri) {
1801 Offset += MI.getOperand(FrameRegIdx+1).getImm();
1803 // Turn it into a move.
1804 MI.setDesc(TII.get(ARM::MOVr));
1805 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1806 MI.RemoveOperand(FrameRegIdx+1);
1809 } else if (Offset < 0) {
1812 MI.setDesc(TII.get(ARM::SUBri));
1815 // Common case: small offset, fits into instruction.
1816 if (ARM_AM::getSOImmVal(Offset) != -1) {
1817 // Replace the FrameIndex with sp / fp
1818 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1819 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
1824 // Otherwise, pull as much of the immedidate into this ADDri/SUBri
1826 unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
1827 unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
1829 // We will handle these bits from offset, clear them.
1830 Offset &= ~ThisImmVal;
1832 // Get the properly encoded SOImmVal field.
1833 assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
1834 "Bit extraction didn't work?");
1835 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
1837 unsigned ImmIdx = 0;
1839 unsigned NumBits = 0;
1842 case ARMII::AddrMode_i12: {
1843 ImmIdx = FrameRegIdx + 1;
1844 InstrOffs = MI.getOperand(ImmIdx).getImm();
1848 case ARMII::AddrMode2: {
1849 ImmIdx = FrameRegIdx+2;
1850 InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
1851 if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1856 case ARMII::AddrMode3: {
1857 ImmIdx = FrameRegIdx+2;
1858 InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
1859 if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1864 case ARMII::AddrMode4:
1865 case ARMII::AddrMode6:
1866 // Can't fold any offset even if it's zero.
1868 case ARMII::AddrMode5: {
1869 ImmIdx = FrameRegIdx+1;
1870 InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
1871 if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
1878 llvm_unreachable("Unsupported addressing mode!");
1881 Offset += InstrOffs * Scale;
1882 assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
1888 // Attempt to fold address comp. if opcode has offset bits
1890 // Common case: small offset, fits into instruction.
1891 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
1892 int ImmedOffset = Offset / Scale;
1893 unsigned Mask = (1 << NumBits) - 1;
1894 if ((unsigned)Offset <= Mask * Scale) {
1895 // Replace the FrameIndex with sp
1896 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
1897 // FIXME: When addrmode2 goes away, this will simplify (like the
1898 // T2 version), as the LDR.i12 versions don't need the encoding
1899 // tricks for the offset value.
1901 if (AddrMode == ARMII::AddrMode_i12)
1902 ImmedOffset = -ImmedOffset;
1904 ImmedOffset |= 1 << NumBits;
1906 ImmOp.ChangeToImmediate(ImmedOffset);
1911 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
1912 ImmedOffset = ImmedOffset & Mask;
1914 if (AddrMode == ARMII::AddrMode_i12)
1915 ImmedOffset = -ImmedOffset;
1917 ImmedOffset |= 1 << NumBits;
1919 ImmOp.ChangeToImmediate(ImmedOffset);
1920 Offset &= ~(Mask*Scale);
1924 Offset = (isSub) ? -Offset : Offset;
1928 /// analyzeCompare - For a comparison instruction, return the source registers
1929 /// in SrcReg and SrcReg2 if having two register operands, and the value it
1930 /// compares against in CmpValue. Return true if the comparison instruction
1931 /// can be analyzed.
1932 bool ARMBaseInstrInfo::
1933 analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
1934 int &CmpMask, int &CmpValue) const {
1935 switch (MI->getOpcode()) {
1939 SrcReg = MI->getOperand(0).getReg();
1942 CmpValue = MI->getOperand(1).getImm();
1946 SrcReg = MI->getOperand(0).getReg();
1947 SrcReg2 = MI->getOperand(1).getReg();
1953 SrcReg = MI->getOperand(0).getReg();
1955 CmpMask = MI->getOperand(1).getImm();
1963 /// isSuitableForMask - Identify a suitable 'and' instruction that
1964 /// operates on the given source register and applies the same mask
1965 /// as a 'tst' instruction. Provide a limited look-through for copies.
1966 /// When successful, MI will hold the found instruction.
1967 static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
1968 int CmpMask, bool CommonUse) {
1969 switch (MI->getOpcode()) {
1972 if (CmpMask != MI->getOperand(2).getImm())
1974 if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
1978 // Walk down one instruction which is potentially an 'and'.
1979 const MachineInstr &Copy = *MI;
1980 MachineBasicBlock::iterator AND(
1981 llvm::next(MachineBasicBlock::iterator(MI)));
1982 if (AND == MI->getParent()->end()) return false;
1984 return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
1992 /// getSwappedCondition - assume the flags are set by MI(a,b), return
1993 /// the condition code if we modify the instructions such that flags are
1995 inline static ARMCC::CondCodes getSwappedCondition(ARMCC::CondCodes CC) {
1997 default: return ARMCC::AL;
1998 case ARMCC::EQ: return ARMCC::EQ;
1999 case ARMCC::NE: return ARMCC::NE;
2000 case ARMCC::HS: return ARMCC::LS;
2001 case ARMCC::LO: return ARMCC::HI;
2002 case ARMCC::HI: return ARMCC::LO;
2003 case ARMCC::LS: return ARMCC::HS;
2004 case ARMCC::GE: return ARMCC::LE;
2005 case ARMCC::LT: return ARMCC::GT;
2006 case ARMCC::GT: return ARMCC::LT;
2007 case ARMCC::LE: return ARMCC::GE;
2011 /// isRedundantFlagInstr - check whether the first instruction, whose only
2012 /// purpose is to update flags, can be made redundant.
2013 /// CMPrr can be made redundant by SUBrr if the operands are the same.
2014 /// CMPri can be made redundant by SUBri if the operands are the same.
2015 /// This function can be extended later on.
2016 inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg,
2017 unsigned SrcReg2, int ImmValue,
2019 if ((CmpI->getOpcode() == ARM::CMPrr ||
2020 CmpI->getOpcode() == ARM::t2CMPrr) &&
2021 (OI->getOpcode() == ARM::SUBrr ||
2022 OI->getOpcode() == ARM::t2SUBrr) &&
2023 ((OI->getOperand(1).getReg() == SrcReg &&
2024 OI->getOperand(2).getReg() == SrcReg2) ||
2025 (OI->getOperand(1).getReg() == SrcReg2 &&
2026 OI->getOperand(2).getReg() == SrcReg)))
2029 if ((CmpI->getOpcode() == ARM::CMPri ||
2030 CmpI->getOpcode() == ARM::t2CMPri) &&
2031 (OI->getOpcode() == ARM::SUBri ||
2032 OI->getOpcode() == ARM::t2SUBri) &&
2033 OI->getOperand(1).getReg() == SrcReg &&
2034 OI->getOperand(2).getImm() == ImmValue)
2039 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
2040 /// comparison into one that sets the zero bit in the flags register;
2041 /// Remove a redundant Compare instruction if an earlier instruction can set the
2042 /// flags in the same way as Compare.
2043 /// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
2044 /// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
2045 /// condition code of instructions which use the flags.
2046 bool ARMBaseInstrInfo::
2047 optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
2048 int CmpMask, int CmpValue,
2049 const MachineRegisterInfo *MRI) const {
2050 // Get the unique definition of SrcReg.
2051 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
2052 if (!MI) return false;
2054 // Masked compares sometimes use the same register as the corresponding 'and'.
2055 if (CmpMask != ~0) {
2056 if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) {
2058 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
2059 UE = MRI->use_end(); UI != UE; ++UI) {
2060 if (UI->getParent() != CmpInstr->getParent()) continue;
2061 MachineInstr *PotentialAND = &*UI;
2062 if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true))
2067 if (!MI) return false;
2071 // Get ready to iterate backward from CmpInstr.
2072 MachineBasicBlock::iterator I = CmpInstr, E = MI,
2073 B = CmpInstr->getParent()->begin();
2075 // Early exit if CmpInstr is at the beginning of the BB.
2076 if (I == B) return false;
2078 // There are two possible candidates which can be changed to set CPSR:
2079 // One is MI, the other is a SUB instruction.
2080 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
2081 // For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
2082 MachineInstr *Sub = NULL;
2084 // MI is not a candidate for CMPrr.
2086 else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
2087 // Conservatively refuse to convert an instruction which isn't in the same
2088 // BB as the comparison.
2089 // For CMPri, we need to check Sub, thus we can't return here.
2090 if (CmpInstr->getOpcode() == ARM::CMPri ||
2091 CmpInstr->getOpcode() == ARM::t2CMPri)
2097 // Check that CPSR isn't set between the comparison instruction and the one we
2098 // want to change. At the same time, search for Sub.
2099 const TargetRegisterInfo *TRI = &getRegisterInfo();
2101 for (; I != E; --I) {
2102 const MachineInstr &Instr = *I;
2104 if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
2105 Instr.readsRegister(ARM::CPSR, TRI))
2106 // This instruction modifies or uses CPSR after the one we want to
2107 // change. We can't do this transformation.
2110 // Check whether CmpInstr can be made redundant by the current instruction.
2111 if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) {
2117 // The 'and' is below the comparison instruction.
2121 // Return false if no candidates exist.
2125 // The single candidate is called MI.
2128 switch (MI->getOpcode()) {
2162 case ARM::t2EORri: {
2163 // Scan forward for the use of CPSR
2164 // When checking against MI: if it's a conditional code requires
2165 // checking of V bit, then this is not safe to do.
2166 // It is safe to remove CmpInstr if CPSR is redefined or killed.
2167 // If we are done with the basic block, we need to check whether CPSR is
2169 SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4>
2171 bool isSafe = false;
2173 E = CmpInstr->getParent()->end();
2174 while (!isSafe && ++I != E) {
2175 const MachineInstr &Instr = *I;
2176 for (unsigned IO = 0, EO = Instr.getNumOperands();
2177 !isSafe && IO != EO; ++IO) {
2178 const MachineOperand &MO = Instr.getOperand(IO);
2179 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
2183 if (!MO.isReg() || MO.getReg() != ARM::CPSR)
2189 // Condition code is after the operand before CPSR.
2190 ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm();
2192 ARMCC::CondCodes NewCC = getSwappedCondition(CC);
2193 if (NewCC == ARMCC::AL)
2195 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
2196 // on CMP needs to be updated to be based on SUB.
2197 // Push the condition code operands to OperandsToUpdate.
2198 // If it is safe to remove CmpInstr, the condition code of these
2199 // operands will be modified.
2200 if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
2201 Sub->getOperand(2).getReg() == SrcReg)
2202 OperandsToUpdate.push_back(std::make_pair(&((*I).getOperand(IO-1)),
2208 // CPSR can be used multiple times, we should continue.
2221 // If CPSR is not killed nor re-defined, we should check whether it is
2222 // live-out. If it is live-out, do not optimize.
2224 MachineBasicBlock *MBB = CmpInstr->getParent();
2225 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
2226 SE = MBB->succ_end(); SI != SE; ++SI)
2227 if ((*SI)->isLiveIn(ARM::CPSR))
2231 // Toggle the optional operand to CPSR.
2232 MI->getOperand(5).setReg(ARM::CPSR);
2233 MI->getOperand(5).setIsDef(true);
2234 CmpInstr->eraseFromParent();
2236 // Modify the condition code of operands in OperandsToUpdate.
2237 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
2238 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
2239 for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
2240 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
2248 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
2249 MachineInstr *DefMI, unsigned Reg,
2250 MachineRegisterInfo *MRI) const {
2251 // Fold large immediates into add, sub, or, xor.
2252 unsigned DefOpc = DefMI->getOpcode();
2253 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
2255 if (!DefMI->getOperand(1).isImm())
2256 // Could be t2MOVi32imm <ga:xx>
2259 if (!MRI->hasOneNonDBGUse(Reg))
2262 const MCInstrDesc &DefMCID = DefMI->getDesc();
2263 if (DefMCID.hasOptionalDef()) {
2264 unsigned NumOps = DefMCID.getNumOperands();
2265 const MachineOperand &MO = DefMI->getOperand(NumOps-1);
2266 if (MO.getReg() == ARM::CPSR && !MO.isDead())
2267 // If DefMI defines CPSR and it is not dead, it's obviously not safe
2272 const MCInstrDesc &UseMCID = UseMI->getDesc();
2273 if (UseMCID.hasOptionalDef()) {
2274 unsigned NumOps = UseMCID.getNumOperands();
2275 if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR)
2276 // If the instruction sets the flag, do not attempt this optimization
2277 // since it may change the semantics of the code.
2281 unsigned UseOpc = UseMI->getOpcode();
2282 unsigned NewUseOpc = 0;
2283 uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
2284 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
2285 bool Commute = false;
2287 default: return false;
2295 case ARM::t2EORrr: {
2296 Commute = UseMI->getOperand(2).getReg() != Reg;
2303 NewUseOpc = ARM::SUBri;
2309 if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
2311 SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
2312 SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
2315 case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
2316 case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
2317 case ARM::EORrr: NewUseOpc = ARM::EORri; break;
2321 case ARM::t2SUBrr: {
2325 NewUseOpc = ARM::t2SUBri;
2330 case ARM::t2EORrr: {
2331 if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
2333 SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
2334 SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
2337 case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
2338 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
2339 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
2347 unsigned OpIdx = Commute ? 2 : 1;
2348 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
2349 bool isKill = UseMI->getOperand(OpIdx).isKill();
2350 unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
2351 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
2352 UseMI, UseMI->getDebugLoc(),
2353 get(NewUseOpc), NewReg)
2354 .addReg(Reg1, getKillRegState(isKill))
2355 .addImm(SOImmValV1)));
2356 UseMI->setDesc(get(NewUseOpc));
2357 UseMI->getOperand(1).setReg(NewReg);
2358 UseMI->getOperand(1).setIsKill();
2359 UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
2360 DefMI->eraseFromParent();
2365 ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
2366 const MachineInstr *MI) const {
2367 if (!ItinData || ItinData->isEmpty())
2370 const MCInstrDesc &Desc = MI->getDesc();
2371 unsigned Class = Desc.getSchedClass();
2372 int ItinUOps = ItinData->getNumMicroOps(Class);
2376 unsigned Opc = MI->getOpcode();
2379 llvm_unreachable("Unexpected multi-uops instruction!");
2384 // The number of uOps for load / store multiple are determined by the number
2387 // On Cortex-A8, each pair of register loads / stores can be scheduled on the
2388 // same cycle. The scheduling for the first load / store must be done
2389 // separately by assuming the address is not 64-bit aligned.
2391 // On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
2392 // is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
2393 // load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
2395 case ARM::VLDMDIA_UPD:
2396 case ARM::VLDMDDB_UPD:
2398 case ARM::VLDMSIA_UPD:
2399 case ARM::VLDMSDB_UPD:
2401 case ARM::VSTMDIA_UPD:
2402 case ARM::VSTMDDB_UPD:
2404 case ARM::VSTMSIA_UPD:
2405 case ARM::VSTMSDB_UPD: {
2406 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
2407 return (NumRegs / 2) + (NumRegs % 2) + 1;
2410 case ARM::LDMIA_RET:
2415 case ARM::LDMIA_UPD:
2416 case ARM::LDMDA_UPD:
2417 case ARM::LDMDB_UPD:
2418 case ARM::LDMIB_UPD:
2423 case ARM::STMIA_UPD:
2424 case ARM::STMDA_UPD:
2425 case ARM::STMDB_UPD:
2426 case ARM::STMIB_UPD:
2428 case ARM::tLDMIA_UPD:
2429 case ARM::tSTMIA_UPD:
2433 case ARM::t2LDMIA_RET:
2436 case ARM::t2LDMIA_UPD:
2437 case ARM::t2LDMDB_UPD:
2440 case ARM::t2STMIA_UPD:
2441 case ARM::t2STMDB_UPD: {
2442 unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
2443 if (Subtarget.isCortexA8()) {
2446 // 4 registers would be issued: 2, 2.
2447 // 5 registers would be issued: 2, 2, 1.
2448 int A8UOps = (NumRegs / 2);
2452 } else if (Subtarget.isCortexA9()) {
2453 int A9UOps = (NumRegs / 2);
2454 // If there are odd number of registers or if it's not 64-bit aligned,
2455 // then it takes an extra AGU (Address Generation Unit) cycle.
2456 if ((NumRegs % 2) ||
2457 !MI->hasOneMemOperand() ||
2458 (*MI->memoperands_begin())->getAlignment() < 8)
2462 // Assume the worst.
2470 ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
2471 const MCInstrDesc &DefMCID,
2473 unsigned DefIdx, unsigned DefAlign) const {
2474 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
2476 // Def is the address writeback.
2477 return ItinData->getOperandCycle(DefClass, DefIdx);
2480 if (Subtarget.isCortexA8()) {
2481 // (regno / 2) + (regno % 2) + 1
2482 DefCycle = RegNo / 2 + 1;
2485 } else if (Subtarget.isCortexA9()) {
2487 bool isSLoad = false;
2489 switch (DefMCID.getOpcode()) {
2492 case ARM::VLDMSIA_UPD:
2493 case ARM::VLDMSDB_UPD:
2498 // If there are odd number of 'S' registers or if it's not 64-bit aligned,
2499 // then it takes an extra cycle.
2500 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
2503 // Assume the worst.
2504 DefCycle = RegNo + 2;
2511 ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
2512 const MCInstrDesc &DefMCID,
2514 unsigned DefIdx, unsigned DefAlign) const {
2515 int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
2517 // Def is the address writeback.
2518 return ItinData->getOperandCycle(DefClass, DefIdx);
2521 if (Subtarget.isCortexA8()) {
2522 // 4 registers would be issued: 1, 2, 1.
2523 // 5 registers would be issued: 1, 2, 2.
2524 DefCycle = RegNo / 2;
2527 // Result latency is issue cycle + 2: E2.
2529 } else if (Subtarget.isCortexA9()) {
2530 DefCycle = (RegNo / 2);
2531 // If there are odd number of registers or if it's not 64-bit aligned,
2532 // then it takes an extra AGU (Address Generation Unit) cycle.
2533 if ((RegNo % 2) || DefAlign < 8)
2535 // Result latency is AGU cycles + 2.
2538 // Assume the worst.
2539 DefCycle = RegNo + 2;
2546 ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
2547 const MCInstrDesc &UseMCID,
2549 unsigned UseIdx, unsigned UseAlign) const {
2550 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
2552 return ItinData->getOperandCycle(UseClass, UseIdx);
2555 if (Subtarget.isCortexA8()) {
2556 // (regno / 2) + (regno % 2) + 1
2557 UseCycle = RegNo / 2 + 1;
2560 } else if (Subtarget.isCortexA9()) {
2562 bool isSStore = false;
2564 switch (UseMCID.getOpcode()) {
2567 case ARM::VSTMSIA_UPD:
2568 case ARM::VSTMSDB_UPD:
2573 // If there are odd number of 'S' registers or if it's not 64-bit aligned,
2574 // then it takes an extra cycle.
2575 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
2578 // Assume the worst.
2579 UseCycle = RegNo + 2;
2586 ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
2587 const MCInstrDesc &UseMCID,
2589 unsigned UseIdx, unsigned UseAlign) const {
2590 int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
2592 return ItinData->getOperandCycle(UseClass, UseIdx);
2595 if (Subtarget.isCortexA8()) {
2596 UseCycle = RegNo / 2;
2601 } else if (Subtarget.isCortexA9()) {
2602 UseCycle = (RegNo / 2);
2603 // If there are odd number of registers or if it's not 64-bit aligned,
2604 // then it takes an extra AGU (Address Generation Unit) cycle.
2605 if ((RegNo % 2) || UseAlign < 8)
2608 // Assume the worst.
2615 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
2616 const MCInstrDesc &DefMCID,
2617 unsigned DefIdx, unsigned DefAlign,
2618 const MCInstrDesc &UseMCID,
2619 unsigned UseIdx, unsigned UseAlign) const {
2620 unsigned DefClass = DefMCID.getSchedClass();
2621 unsigned UseClass = UseMCID.getSchedClass();
2623 if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
2624 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
2626 // This may be a def / use of a variable_ops instruction, the operand
2627 // latency might be determinable dynamically. Let the target try to
2630 bool LdmBypass = false;
2631 switch (DefMCID.getOpcode()) {
2633 DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
2637 case ARM::VLDMDIA_UPD:
2638 case ARM::VLDMDDB_UPD:
2640 case ARM::VLDMSIA_UPD:
2641 case ARM::VLDMSDB_UPD:
2642 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
2645 case ARM::LDMIA_RET:
2650 case ARM::LDMIA_UPD:
2651 case ARM::LDMDA_UPD:
2652 case ARM::LDMDB_UPD:
2653 case ARM::LDMIB_UPD:
2655 case ARM::tLDMIA_UPD:
2657 case ARM::t2LDMIA_RET:
2660 case ARM::t2LDMIA_UPD:
2661 case ARM::t2LDMDB_UPD:
2663 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
2668 // We can't seem to determine the result latency of the def, assume it's 2.
2672 switch (UseMCID.getOpcode()) {
2674 UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
2678 case ARM::VSTMDIA_UPD:
2679 case ARM::VSTMDDB_UPD:
2681 case ARM::VSTMSIA_UPD:
2682 case ARM::VSTMSDB_UPD:
2683 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
2690 case ARM::STMIA_UPD:
2691 case ARM::STMDA_UPD:
2692 case ARM::STMDB_UPD:
2693 case ARM::STMIB_UPD:
2694 case ARM::tSTMIA_UPD:
2699 case ARM::t2STMIA_UPD:
2700 case ARM::t2STMDB_UPD:
2701 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
2706 // Assume it's read in the first stage.
2709 UseCycle = DefCycle - UseCycle + 1;
2712 // It's a variable_ops instruction so we can't use DefIdx here. Just use
2713 // first def operand.
2714 if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
2717 } else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
2718 UseClass, UseIdx)) {
2726 static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
2727 const MachineInstr *MI, unsigned Reg,
2728 unsigned &DefIdx, unsigned &Dist) {
2731 MachineBasicBlock::const_iterator I = MI; ++I;
2732 MachineBasicBlock::const_instr_iterator II =
2733 llvm::prior(I.getInstrIterator());
2734 assert(II->isInsideBundle() && "Empty bundle?");
2737 while (II->isInsideBundle()) {
2738 Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
2745 assert(Idx != -1 && "Cannot find bundled definition!");
2750 static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
2751 const MachineInstr *MI, unsigned Reg,
2752 unsigned &UseIdx, unsigned &Dist) {
2755 MachineBasicBlock::const_instr_iterator II = MI; ++II;
2756 assert(II->isInsideBundle() && "Empty bundle?");
2757 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
2759 // FIXME: This doesn't properly handle multiple uses.
2761 while (II != E && II->isInsideBundle()) {
2762 Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
2765 if (II->getOpcode() != ARM::t2IT)
2779 /// Return the number of cycles to add to (or subtract from) the static
2780 /// itinerary based on the def opcode and alignment. The caller will ensure that
2781 /// adjusted latency is at least one cycle.
2782 static int adjustDefLatency(const ARMSubtarget &Subtarget,
2783 const MachineInstr *DefMI,
2784 const MCInstrDesc *DefMCID, unsigned DefAlign) {
2786 if (Subtarget.isCortexA8() || Subtarget.isCortexA9()) {
2787 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
2788 // variants are one cycle cheaper.
2789 switch (DefMCID->getOpcode()) {
2793 unsigned ShOpVal = DefMI->getOperand(3).getImm();
2794 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
2796 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
2803 case ARM::t2LDRSHs: {
2804 // Thumb2 mode: lsl only.
2805 unsigned ShAmt = DefMI->getOperand(3).getImm();
2806 if (ShAmt == 0 || ShAmt == 2)
2813 if (DefAlign < 8 && Subtarget.isCortexA9()) {
2814 switch (DefMCID->getOpcode()) {
2820 case ARM::VLD1q8wb_fixed:
2821 case ARM::VLD1q16wb_fixed:
2822 case ARM::VLD1q32wb_fixed:
2823 case ARM::VLD1q64wb_fixed:
2824 case ARM::VLD1q8wb_register:
2825 case ARM::VLD1q16wb_register:
2826 case ARM::VLD1q32wb_register:
2827 case ARM::VLD1q64wb_register:
2834 case ARM::VLD2d8wb_fixed:
2835 case ARM::VLD2d16wb_fixed:
2836 case ARM::VLD2d32wb_fixed:
2837 case ARM::VLD2q8wb_fixed:
2838 case ARM::VLD2q16wb_fixed:
2839 case ARM::VLD2q32wb_fixed:
2840 case ARM::VLD2d8wb_register:
2841 case ARM::VLD2d16wb_register:
2842 case ARM::VLD2d32wb_register:
2843 case ARM::VLD2q8wb_register:
2844 case ARM::VLD2q16wb_register:
2845 case ARM::VLD2q32wb_register:
2850 case ARM::VLD3d8_UPD:
2851 case ARM::VLD3d16_UPD:
2852 case ARM::VLD3d32_UPD:
2853 case ARM::VLD1d64Twb_fixed:
2854 case ARM::VLD1d64Twb_register:
2855 case ARM::VLD3q8_UPD:
2856 case ARM::VLD3q16_UPD:
2857 case ARM::VLD3q32_UPD:
2862 case ARM::VLD4d8_UPD:
2863 case ARM::VLD4d16_UPD:
2864 case ARM::VLD4d32_UPD:
2865 case ARM::VLD1d64Qwb_fixed:
2866 case ARM::VLD1d64Qwb_register:
2867 case ARM::VLD4q8_UPD:
2868 case ARM::VLD4q16_UPD:
2869 case ARM::VLD4q32_UPD:
2870 case ARM::VLD1DUPq8:
2871 case ARM::VLD1DUPq16:
2872 case ARM::VLD1DUPq32:
2873 case ARM::VLD1DUPq8wb_fixed:
2874 case ARM::VLD1DUPq16wb_fixed:
2875 case ARM::VLD1DUPq32wb_fixed:
2876 case ARM::VLD1DUPq8wb_register:
2877 case ARM::VLD1DUPq16wb_register:
2878 case ARM::VLD1DUPq32wb_register:
2879 case ARM::VLD2DUPd8:
2880 case ARM::VLD2DUPd16:
2881 case ARM::VLD2DUPd32:
2882 case ARM::VLD2DUPd8wb_fixed:
2883 case ARM::VLD2DUPd16wb_fixed:
2884 case ARM::VLD2DUPd32wb_fixed:
2885 case ARM::VLD2DUPd8wb_register:
2886 case ARM::VLD2DUPd16wb_register:
2887 case ARM::VLD2DUPd32wb_register:
2888 case ARM::VLD4DUPd8:
2889 case ARM::VLD4DUPd16:
2890 case ARM::VLD4DUPd32:
2891 case ARM::VLD4DUPd8_UPD:
2892 case ARM::VLD4DUPd16_UPD:
2893 case ARM::VLD4DUPd32_UPD:
2895 case ARM::VLD1LNd16:
2896 case ARM::VLD1LNd32:
2897 case ARM::VLD1LNd8_UPD:
2898 case ARM::VLD1LNd16_UPD:
2899 case ARM::VLD1LNd32_UPD:
2901 case ARM::VLD2LNd16:
2902 case ARM::VLD2LNd32:
2903 case ARM::VLD2LNq16:
2904 case ARM::VLD2LNq32:
2905 case ARM::VLD2LNd8_UPD:
2906 case ARM::VLD2LNd16_UPD:
2907 case ARM::VLD2LNd32_UPD:
2908 case ARM::VLD2LNq16_UPD:
2909 case ARM::VLD2LNq32_UPD:
2911 case ARM::VLD4LNd16:
2912 case ARM::VLD4LNd32:
2913 case ARM::VLD4LNq16:
2914 case ARM::VLD4LNq32:
2915 case ARM::VLD4LNd8_UPD:
2916 case ARM::VLD4LNd16_UPD:
2917 case ARM::VLD4LNd32_UPD:
2918 case ARM::VLD4LNq16_UPD:
2919 case ARM::VLD4LNq32_UPD:
2920 // If the address is not 64-bit aligned, the latencies of these
2921 // instructions increases by one.
2932 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
2933 const MachineInstr *DefMI, unsigned DefIdx,
2934 const MachineInstr *UseMI,
2935 unsigned UseIdx) const {
2936 // No operand latency. The caller may fall back to getInstrLatency.
2937 if (!ItinData || ItinData->isEmpty())
2940 const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
2941 unsigned Reg = DefMO.getReg();
2942 const MCInstrDesc *DefMCID = &DefMI->getDesc();
2943 const MCInstrDesc *UseMCID = &UseMI->getDesc();
2945 unsigned DefAdj = 0;
2946 if (DefMI->isBundle()) {
2947 DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
2948 DefMCID = &DefMI->getDesc();
2950 if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
2951 DefMI->isRegSequence() || DefMI->isImplicitDef()) {
2955 unsigned UseAdj = 0;
2956 if (UseMI->isBundle()) {
2958 const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
2959 Reg, NewUseIdx, UseAdj);
2965 UseMCID = &UseMI->getDesc();
2968 if (Reg == ARM::CPSR) {
2969 if (DefMI->getOpcode() == ARM::FMSTAT) {
2970 // fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
2971 return Subtarget.isCortexA9() ? 1 : 20;
2974 // CPSR set and branch can be paired in the same cycle.
2975 if (UseMI->isBranch())
2978 // Otherwise it takes the instruction latency (generally one).
2979 unsigned Latency = getInstrLatency(ItinData, DefMI);
2981 // For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
2982 // its uses. Instructions which are otherwise scheduled between them may
2983 // incur a code size penalty (not able to use the CPSR setting 16-bit
2985 if (Latency > 0 && Subtarget.isThumb2()) {
2986 const MachineFunction *MF = DefMI->getParent()->getParent();
2987 if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
2993 if (DefMO.isImplicit() || UseMI->getOperand(UseIdx).isImplicit())
2996 unsigned DefAlign = DefMI->hasOneMemOperand()
2997 ? (*DefMI->memoperands_begin())->getAlignment() : 0;
2998 unsigned UseAlign = UseMI->hasOneMemOperand()
2999 ? (*UseMI->memoperands_begin())->getAlignment() : 0;
3001 // Get the itinerary's latency if possible, and handle variable_ops.
3002 int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
3003 *UseMCID, UseIdx, UseAlign);
3004 // Unable to find operand latency. The caller may resort to getInstrLatency.
3008 // Adjust for IT block position.
3009 int Adj = DefAdj + UseAdj;
3011 // Adjust for dynamic def-side opcode variants not captured by the itinerary.
3012 Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
3013 if (Adj >= 0 || (int)Latency > -Adj) {
3014 return Latency + Adj;
3016 // Return the itinerary latency, which may be zero but not less than zero.
3021 ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
3022 SDNode *DefNode, unsigned DefIdx,
3023 SDNode *UseNode, unsigned UseIdx) const {
3024 if (!DefNode->isMachineOpcode())
3027 const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
3029 if (isZeroCost(DefMCID.Opcode))
3032 if (!ItinData || ItinData->isEmpty())
3033 return DefMCID.mayLoad() ? 3 : 1;
3035 if (!UseNode->isMachineOpcode()) {
3036 int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
3037 if (Subtarget.isCortexA9())
3038 return Latency <= 2 ? 1 : Latency - 1;
3040 return Latency <= 3 ? 1 : Latency - 2;
3043 const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
3044 const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
3045 unsigned DefAlign = !DefMN->memoperands_empty()
3046 ? (*DefMN->memoperands_begin())->getAlignment() : 0;
3047 const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
3048 unsigned UseAlign = !UseMN->memoperands_empty()
3049 ? (*UseMN->memoperands_begin())->getAlignment() : 0;
3050 int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
3051 UseMCID, UseIdx, UseAlign);
3054 (Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
3055 // FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
3056 // variants are one cycle cheaper.
3057 switch (DefMCID.getOpcode()) {
3062 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
3063 unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
3065 (ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
3072 case ARM::t2LDRSHs: {
3073 // Thumb2 mode: lsl only.
3075 cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
3076 if (ShAmt == 0 || ShAmt == 2)
3083 if (DefAlign < 8 && Subtarget.isCortexA9())
3084 switch (DefMCID.getOpcode()) {
3090 case ARM::VLD1q8wb_register:
3091 case ARM::VLD1q16wb_register:
3092 case ARM::VLD1q32wb_register:
3093 case ARM::VLD1q64wb_register:
3094 case ARM::VLD1q8wb_fixed:
3095 case ARM::VLD1q16wb_fixed:
3096 case ARM::VLD1q32wb_fixed:
3097 case ARM::VLD1q64wb_fixed:
3101 case ARM::VLD2q8Pseudo:
3102 case ARM::VLD2q16Pseudo:
3103 case ARM::VLD2q32Pseudo:
3104 case ARM::VLD2d8wb_fixed:
3105 case ARM::VLD2d16wb_fixed:
3106 case ARM::VLD2d32wb_fixed:
3107 case ARM::VLD2q8PseudoWB_fixed:
3108 case ARM::VLD2q16PseudoWB_fixed:
3109 case ARM::VLD2q32PseudoWB_fixed:
3110 case ARM::VLD2d8wb_register:
3111 case ARM::VLD2d16wb_register:
3112 case ARM::VLD2d32wb_register:
3113 case ARM::VLD2q8PseudoWB_register:
3114 case ARM::VLD2q16PseudoWB_register:
3115 case ARM::VLD2q32PseudoWB_register:
3116 case ARM::VLD3d8Pseudo:
3117 case ARM::VLD3d16Pseudo:
3118 case ARM::VLD3d32Pseudo:
3119 case ARM::VLD1d64TPseudo:
3120 case ARM::VLD3d8Pseudo_UPD:
3121 case ARM::VLD3d16Pseudo_UPD:
3122 case ARM::VLD3d32Pseudo_UPD:
3123 case ARM::VLD3q8Pseudo_UPD:
3124 case ARM::VLD3q16Pseudo_UPD:
3125 case ARM::VLD3q32Pseudo_UPD:
3126 case ARM::VLD3q8oddPseudo:
3127 case ARM::VLD3q16oddPseudo:
3128 case ARM::VLD3q32oddPseudo:
3129 case ARM::VLD3q8oddPseudo_UPD:
3130 case ARM::VLD3q16oddPseudo_UPD:
3131 case ARM::VLD3q32oddPseudo_UPD:
3132 case ARM::VLD4d8Pseudo:
3133 case ARM::VLD4d16Pseudo:
3134 case ARM::VLD4d32Pseudo:
3135 case ARM::VLD1d64QPseudo:
3136 case ARM::VLD4d8Pseudo_UPD:
3137 case ARM::VLD4d16Pseudo_UPD:
3138 case ARM::VLD4d32Pseudo_UPD:
3139 case ARM::VLD4q8Pseudo_UPD:
3140 case ARM::VLD4q16Pseudo_UPD:
3141 case ARM::VLD4q32Pseudo_UPD:
3142 case ARM::VLD4q8oddPseudo:
3143 case ARM::VLD4q16oddPseudo:
3144 case ARM::VLD4q32oddPseudo:
3145 case ARM::VLD4q8oddPseudo_UPD:
3146 case ARM::VLD4q16oddPseudo_UPD:
3147 case ARM::VLD4q32oddPseudo_UPD:
3148 case ARM::VLD1DUPq8:
3149 case ARM::VLD1DUPq16:
3150 case ARM::VLD1DUPq32:
3151 case ARM::VLD1DUPq8wb_fixed:
3152 case ARM::VLD1DUPq16wb_fixed:
3153 case ARM::VLD1DUPq32wb_fixed:
3154 case ARM::VLD1DUPq8wb_register:
3155 case ARM::VLD1DUPq16wb_register:
3156 case ARM::VLD1DUPq32wb_register:
3157 case ARM::VLD2DUPd8:
3158 case ARM::VLD2DUPd16:
3159 case ARM::VLD2DUPd32:
3160 case ARM::VLD2DUPd8wb_fixed:
3161 case ARM::VLD2DUPd16wb_fixed:
3162 case ARM::VLD2DUPd32wb_fixed:
3163 case ARM::VLD2DUPd8wb_register:
3164 case ARM::VLD2DUPd16wb_register:
3165 case ARM::VLD2DUPd32wb_register:
3166 case ARM::VLD4DUPd8Pseudo:
3167 case ARM::VLD4DUPd16Pseudo:
3168 case ARM::VLD4DUPd32Pseudo:
3169 case ARM::VLD4DUPd8Pseudo_UPD:
3170 case ARM::VLD4DUPd16Pseudo_UPD:
3171 case ARM::VLD4DUPd32Pseudo_UPD:
3172 case ARM::VLD1LNq8Pseudo:
3173 case ARM::VLD1LNq16Pseudo:
3174 case ARM::VLD1LNq32Pseudo:
3175 case ARM::VLD1LNq8Pseudo_UPD:
3176 case ARM::VLD1LNq16Pseudo_UPD:
3177 case ARM::VLD1LNq32Pseudo_UPD:
3178 case ARM::VLD2LNd8Pseudo:
3179 case ARM::VLD2LNd16Pseudo:
3180 case ARM::VLD2LNd32Pseudo:
3181 case ARM::VLD2LNq16Pseudo:
3182 case ARM::VLD2LNq32Pseudo:
3183 case ARM::VLD2LNd8Pseudo_UPD:
3184 case ARM::VLD2LNd16Pseudo_UPD:
3185 case ARM::VLD2LNd32Pseudo_UPD:
3186 case ARM::VLD2LNq16Pseudo_UPD:
3187 case ARM::VLD2LNq32Pseudo_UPD:
3188 case ARM::VLD4LNd8Pseudo:
3189 case ARM::VLD4LNd16Pseudo:
3190 case ARM::VLD4LNd32Pseudo:
3191 case ARM::VLD4LNq16Pseudo:
3192 case ARM::VLD4LNq32Pseudo:
3193 case ARM::VLD4LNd8Pseudo_UPD:
3194 case ARM::VLD4LNd16Pseudo_UPD:
3195 case ARM::VLD4LNd32Pseudo_UPD:
3196 case ARM::VLD4LNq16Pseudo_UPD:
3197 case ARM::VLD4LNq32Pseudo_UPD:
3198 // If the address is not 64-bit aligned, the latencies of these
3199 // instructions increases by one.
3208 ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData,
3209 const MachineInstr *DefMI, unsigned DefIdx,
3210 const MachineInstr *DepMI) const {
3211 unsigned Reg = DefMI->getOperand(DefIdx).getReg();
3212 if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI))
3215 // If the second MI is predicated, then there is an implicit use dependency.
3216 return getInstrLatency(ItinData, DefMI);
3219 unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
3220 const MachineInstr *MI,
3221 unsigned *PredCost) const {
3222 if (MI->isCopyLike() || MI->isInsertSubreg() ||
3223 MI->isRegSequence() || MI->isImplicitDef())
3226 // An instruction scheduler typically runs on unbundled instructions, however
3227 // other passes may query the latency of a bundled instruction.
3228 if (MI->isBundle()) {
3229 unsigned Latency = 0;
3230 MachineBasicBlock::const_instr_iterator I = MI;
3231 MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
3232 while (++I != E && I->isInsideBundle()) {
3233 if (I->getOpcode() != ARM::t2IT)
3234 Latency += getInstrLatency(ItinData, I, PredCost);
3239 const MCInstrDesc &MCID = MI->getDesc();
3240 if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) {
3241 // When predicated, CPSR is an additional source operand for CPSR updating
3242 // instructions, this apparently increases their latencies.
3245 // Be sure to call getStageLatency for an empty itinerary in case it has a
3246 // valid MinLatency property.
3248 return MI->mayLoad() ? 3 : 1;
3250 unsigned Class = MCID.getSchedClass();
3252 // For instructions with variable uops, use uops as latency.
3253 if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
3254 return getNumMicroOps(ItinData, MI);
3256 // For the common case, fall back on the itinerary's latency.
3257 unsigned Latency = ItinData->getStageLatency(Class);
3259 // Adjust for dynamic def-side opcode variants not captured by the itinerary.
3260 unsigned DefAlign = MI->hasOneMemOperand()
3261 ? (*MI->memoperands_begin())->getAlignment() : 0;
3262 int Adj = adjustDefLatency(Subtarget, MI, &MCID, DefAlign);
3263 if (Adj >= 0 || (int)Latency > -Adj) {
3264 return Latency + Adj;
3269 int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
3270 SDNode *Node) const {
3271 if (!Node->isMachineOpcode())
3274 if (!ItinData || ItinData->isEmpty())
3277 unsigned Opcode = Node->getMachineOpcode();
3280 return ItinData->getStageLatency(get(Opcode).getSchedClass());
3287 bool ARMBaseInstrInfo::
3288 hasHighOperandLatency(const InstrItineraryData *ItinData,
3289 const MachineRegisterInfo *MRI,
3290 const MachineInstr *DefMI, unsigned DefIdx,
3291 const MachineInstr *UseMI, unsigned UseIdx) const {
3292 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
3293 unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
3294 if (Subtarget.isCortexA8() &&
3295 (DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
3296 // CortexA8 VFP instructions are not pipelined.
3299 // Hoist VFP / NEON instructions with 4 or higher latency.
3300 int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx,
3303 Latency = getInstrLatency(ItinData, DefMI);
3306 return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
3307 UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
3310 bool ARMBaseInstrInfo::
3311 hasLowDefLatency(const InstrItineraryData *ItinData,
3312 const MachineInstr *DefMI, unsigned DefIdx) const {
3313 if (!ItinData || ItinData->isEmpty())
3316 unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
3317 if (DDomain == ARMII::DomainGeneral) {
3318 unsigned DefClass = DefMI->getDesc().getSchedClass();
3319 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
3320 return (DefCycle != -1 && DefCycle <= 2);
3325 bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
3326 StringRef &ErrInfo) const {
3327 if (convertAddSubFlagsOpcode(MI->getOpcode())) {
3328 ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
3335 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
3336 unsigned &AddSubOpc,
3337 bool &NegAcc, bool &HasLane) const {
3338 DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
3339 if (I == MLxEntryMap.end())
3342 const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
3343 MulOpc = Entry.MulOpc;
3344 AddSubOpc = Entry.AddSubOpc;
3345 NegAcc = Entry.NegAcc;
3346 HasLane = Entry.HasLane;
3350 //===----------------------------------------------------------------------===//
3351 // Execution domains.
3352 //===----------------------------------------------------------------------===//
3354 // Some instructions go down the NEON pipeline, some go down the VFP pipeline,
3355 // and some can go down both. The vmov instructions go down the VFP pipeline,
3356 // but they can be changed to vorr equivalents that are executed by the NEON
3359 // We use the following execution domain numbering:
3367 // Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
3369 std::pair<uint16_t, uint16_t>
3370 ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
3371 // VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
3372 // if they are not predicated.
3373 if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
3374 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
3376 // Cortex-A9 is particularly picky about mixing the two and wants these
3378 if (Subtarget.isCortexA9() && !isPredicated(MI) &&
3379 (MI->getOpcode() == ARM::VMOVRS ||
3380 MI->getOpcode() == ARM::VMOVSR ||
3381 MI->getOpcode() == ARM::VMOVS))
3382 return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
3384 // No other instructions can be swizzled, so just determine their domain.
3385 unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask;
3387 if (Domain & ARMII::DomainNEON)
3388 return std::make_pair(ExeNEON, 0);
3390 // Certain instructions can go either way on Cortex-A8.
3391 // Treat them as NEON instructions.
3392 if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
3393 return std::make_pair(ExeNEON, 0);
3395 if (Domain & ARMII::DomainVFP)
3396 return std::make_pair(ExeVFP, 0);
3398 return std::make_pair(ExeGeneric, 0);
3401 static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
3402 unsigned SReg, unsigned &Lane) {
3403 unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
3406 if (DReg != ARM::NoRegister)
3410 DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
3412 assert(DReg && "S-register with no D super-register?");
3418 ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
3419 unsigned DstReg, SrcReg, DReg;
3421 MachineInstrBuilder MIB(MI);
3422 const TargetRegisterInfo *TRI = &getRegisterInfo();
3423 switch (MI->getOpcode()) {
3425 llvm_unreachable("cannot handle opcode!");
3428 if (Domain != ExeNEON)
3431 // Zap the predicate operands.
3432 assert(!isPredicated(MI) && "Cannot predicate a VORRd");
3434 // Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
3435 DstReg = MI->getOperand(0).getReg();
3436 SrcReg = MI->getOperand(1).getReg();
3438 for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
3439 MI->RemoveOperand(i-1);
3441 // Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
3442 MI->setDesc(get(ARM::VORRd));
3443 AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
3448 if (Domain != ExeNEON)
3450 assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
3452 // Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
3453 DstReg = MI->getOperand(0).getReg();
3454 SrcReg = MI->getOperand(1).getReg();
3456 for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
3457 MI->RemoveOperand(i-1);
3459 DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
3461 // Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
3462 // Note that DSrc has been widened and the other lane may be undef, which
3463 // contaminates the entire register.
3464 MI->setDesc(get(ARM::VGETLNi32));
3465 AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
3466 .addReg(DReg, RegState::Undef)
3469 // The old source should be an implicit use, otherwise we might think it
3470 // was dead before here.
3471 MIB.addReg(SrcReg, RegState::Implicit);
3474 if (Domain != ExeNEON)
3476 assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
3478 // Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
3479 DstReg = MI->getOperand(0).getReg();
3480 SrcReg = MI->getOperand(1).getReg();
3482 DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
3484 // If we insert both a novel <def> and an <undef> on the DReg, we break
3485 // any existing dependency chain on the unused lane. Either already being
3486 // present means this instruction is in that chain anyway so we can make
3487 // the transformation.
3488 if (!MI->definesRegister(DReg, TRI) && !MI->readsRegister(DReg, TRI))
3491 for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
3492 MI->RemoveOperand(i-1);
3494 // Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
3495 // Again DDst may be undefined at the beginning of this instruction.
3496 MI->setDesc(get(ARM::VSETLNi32));
3497 MIB.addReg(DReg, RegState::Define)
3498 .addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI)))
3501 AddDefaultPred(MIB);
3503 // The narrower destination must be marked as set to keep previous chains
3505 MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
3508 if (Domain != ExeNEON)
3511 // Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
3512 DstReg = MI->getOperand(0).getReg();
3513 SrcReg = MI->getOperand(1).getReg();
3515 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
3516 DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
3517 DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
3519 // If we insert both a novel <def> and an <undef> on the DReg, we break
3520 // any existing dependency chain on the unused lane. Either already being
3521 // present means this instruction is in that chain anyway so we can make
3522 // the transformation.
3523 if (!MI->definesRegister(DDst, TRI) && !MI->readsRegister(DDst, TRI))
3526 for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
3527 MI->RemoveOperand(i-1);
3530 // Destination can be:
3531 // %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
3532 MI->setDesc(get(ARM::VDUPLN32d));
3533 MIB.addReg(DDst, RegState::Define)
3534 .addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI)))
3536 AddDefaultPred(MIB);
3538 // Neither the source or the destination are naturally represented any
3539 // more, so add them in manually.
3540 MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
3541 MIB.addReg(SrcReg, RegState::Implicit);
3545 // In general there's no single instruction that can perform an S <-> S
3546 // move in NEON space, but a pair of VEXT instructions *can* do the
3547 // job. It turns out that the VEXTs needed will only use DSrc once, with
3548 // the position based purely on the combination of lane-0 and lane-1
3549 // involved. For example
3550 // vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1
3551 // vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1
3552 // vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1
3553 // vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1
3555 // Pattern of the MachineInstrs is:
3556 // %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
3557 MachineInstrBuilder NewMIB;
3558 NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
3559 get(ARM::VEXTd32), DDst);
3561 // On the first instruction, both DSrc and DDst may be <undef> if present.
3562 // Specifically when the original instruction didn't have them as an
3564 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
3565 bool CurUndef = !MI->readsRegister(CurReg, TRI);
3566 NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
3568 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
3569 CurUndef = !MI->readsRegister(CurReg, TRI);
3570 NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
3573 AddDefaultPred(NewMIB);
3575 if (SrcLane == DstLane)
3576 NewMIB.addReg(SrcReg, RegState::Implicit);
3578 MI->setDesc(get(ARM::VEXTd32));
3579 MIB.addReg(DDst, RegState::Define);
3581 // On the second instruction, DDst has definitely been defined above, so
3582 // it is not <undef>. DSrc, if present, can be <undef> as above.
3583 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
3584 CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
3585 MIB.addReg(CurReg, getUndefRegState(CurUndef));
3587 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
3588 CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
3589 MIB.addReg(CurReg, getUndefRegState(CurUndef));
3592 AddDefaultPred(MIB);
3594 if (SrcLane != DstLane)
3595 MIB.addReg(SrcReg, RegState::Implicit);
3597 // As before, the original destination is no longer represented, add it
3599 MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
3606 bool ARMBaseInstrInfo::hasNOP() const {
3607 return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0;