1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/TargetRegistry.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AArch64GenInstrInfo.inc"
31 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
32 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
33 RI(this, &STI), Subtarget(STI) {}
35 /// GetInstSize - Return the number of bytes of code the specified
36 /// instruction may be. This returns the maximum number of bytes.
37 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
38 const MCInstrDesc &Desc = MI->getDesc();
40 switch (Desc.getOpcode()) {
42 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
44 case TargetOpcode::DBG_VALUE:
45 case TargetOpcode::EH_LABEL:
46 case TargetOpcode::IMPLICIT_DEF:
47 case TargetOpcode::KILL:
51 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
54 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
55 SmallVectorImpl<MachineOperand> &Cond) {
56 // Block ends with fall-through condbranch.
57 switch (LastInst->getOpcode()) {
59 llvm_unreachable("Unknown branch instruction?");
61 Target = LastInst->getOperand(1).getMBB();
62 Cond.push_back(LastInst->getOperand(0));
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(MachineOperand::CreateImm(-1));
70 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
71 Cond.push_back(LastInst->getOperand(0));
77 Target = LastInst->getOperand(2).getMBB();
78 Cond.push_back(MachineOperand::CreateImm(-1));
79 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
80 Cond.push_back(LastInst->getOperand(0));
81 Cond.push_back(LastInst->getOperand(1));
86 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
87 MachineBasicBlock *&TBB,
88 MachineBasicBlock *&FBB,
89 SmallVectorImpl<MachineOperand> &Cond,
90 bool AllowModify) const {
91 // If the block has no terminators, it just falls into the block after it.
92 MachineBasicBlock::iterator I = MBB.end();
96 while (I->isDebugValue()) {
101 if (!isUnpredicatedTerminator(I))
104 // Get the last instruction in the block.
105 MachineInstr *LastInst = I;
107 // If there is only one terminator instruction, process it.
108 unsigned LastOpc = LastInst->getOpcode();
109 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
110 if (isUncondBranchOpcode(LastOpc)) {
111 TBB = LastInst->getOperand(0).getMBB();
114 if (isCondBranchOpcode(LastOpc)) {
115 // Block ends with fall-through condbranch.
116 parseCondBranch(LastInst, TBB, Cond);
119 return true; // Can't handle indirect branch.
122 // Get the instruction before it if it is a terminator.
123 MachineInstr *SecondLastInst = I;
124 unsigned SecondLastOpc = SecondLastInst->getOpcode();
126 // If AllowModify is true and the block ends with two or more unconditional
127 // branches, delete all but the first unconditional branch.
128 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
129 while (isUncondBranchOpcode(SecondLastOpc)) {
130 LastInst->eraseFromParent();
131 LastInst = SecondLastInst;
132 LastOpc = LastInst->getOpcode();
133 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
134 // Return now the only terminator is an unconditional branch.
135 TBB = LastInst->getOperand(0).getMBB();
139 SecondLastOpc = SecondLastInst->getOpcode();
144 // If there are three terminators, we don't know what sort of block this is.
145 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
148 // If the block ends with a B and a Bcc, handle it.
149 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
150 parseCondBranch(SecondLastInst, TBB, Cond);
151 FBB = LastInst->getOperand(0).getMBB();
155 // If the block ends with two unconditional branches, handle it. The second
156 // one is not executed, so remove it.
157 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
158 TBB = SecondLastInst->getOperand(0).getMBB();
161 I->eraseFromParent();
165 // ...likewise if it ends with an indirect branch followed by an unconditional
167 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
170 I->eraseFromParent();
174 // Otherwise, can't handle this.
178 bool AArch64InstrInfo::ReverseBranchCondition(
179 SmallVectorImpl<MachineOperand> &Cond) const {
180 if (Cond[0].getImm() != -1) {
182 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
183 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
185 // Folded compare-and-branch
186 switch (Cond[1].getImm()) {
188 llvm_unreachable("Unknown conditional branch!");
190 Cond[1].setImm(AArch64::CBNZW);
193 Cond[1].setImm(AArch64::CBZW);
196 Cond[1].setImm(AArch64::CBNZX);
199 Cond[1].setImm(AArch64::CBZX);
202 Cond[1].setImm(AArch64::TBNZW);
205 Cond[1].setImm(AArch64::TBZW);
208 Cond[1].setImm(AArch64::TBNZX);
211 Cond[1].setImm(AArch64::TBZX);
219 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
220 MachineBasicBlock::iterator I = MBB.end();
221 if (I == MBB.begin())
224 while (I->isDebugValue()) {
225 if (I == MBB.begin())
229 if (!isUncondBranchOpcode(I->getOpcode()) &&
230 !isCondBranchOpcode(I->getOpcode()))
233 // Remove the branch.
234 I->eraseFromParent();
238 if (I == MBB.begin())
241 if (!isCondBranchOpcode(I->getOpcode()))
244 // Remove the branch.
245 I->eraseFromParent();
249 void AArch64InstrInfo::instantiateCondBranch(
250 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
251 const SmallVectorImpl<MachineOperand> &Cond) const {
252 if (Cond[0].getImm() != -1) {
254 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
256 // Folded compare-and-branch
257 const MachineInstrBuilder MIB =
258 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
260 MIB.addImm(Cond[3].getImm());
265 unsigned AArch64InstrInfo::InsertBranch(
266 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
267 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
268 // Shouldn't be a fall through.
269 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
272 if (Cond.empty()) // Unconditional branch?
273 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
275 instantiateCondBranch(MBB, DL, TBB, Cond);
279 // Two-way conditional branch.
280 instantiateCondBranch(MBB, DL, TBB, Cond);
281 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
285 // Find the original register that VReg is copied from.
286 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
287 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
288 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
289 if (!DefMI->isFullCopy())
291 VReg = DefMI->getOperand(1).getReg();
296 // Determine if VReg is defined by an instruction that can be folded into a
297 // csel instruction. If so, return the folded opcode, and the replacement
299 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
300 unsigned *NewVReg = nullptr) {
301 VReg = removeCopies(MRI, VReg);
302 if (!TargetRegisterInfo::isVirtualRegister(VReg))
305 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
306 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
308 unsigned SrcOpNum = 0;
309 switch (DefMI->getOpcode()) {
310 case AArch64::ADDSXri:
311 case AArch64::ADDSWri:
312 // if NZCV is used, do not fold.
313 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
315 // fall-through to ADDXri and ADDWri.
316 case AArch64::ADDXri:
317 case AArch64::ADDWri:
318 // add x, 1 -> csinc.
319 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
320 DefMI->getOperand(3).getImm() != 0)
323 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
326 case AArch64::ORNXrr:
327 case AArch64::ORNWrr: {
328 // not x -> csinv, represented as orn dst, xzr, src.
329 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
330 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
333 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
337 case AArch64::SUBSXrr:
338 case AArch64::SUBSWrr:
339 // if NZCV is used, do not fold.
340 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
342 // fall-through to SUBXrr and SUBWrr.
343 case AArch64::SUBXrr:
344 case AArch64::SUBWrr: {
345 // neg x -> csneg, represented as sub dst, xzr, src.
346 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
347 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
350 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
356 assert(Opc && SrcOpNum && "Missing parameters");
359 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
363 bool AArch64InstrInfo::canInsertSelect(
364 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
365 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
366 int &FalseCycles) const {
367 // Check register classes.
368 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
369 const TargetRegisterClass *RC =
370 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
374 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
375 unsigned ExtraCondLat = Cond.size() != 1;
377 // GPRs are handled by csel.
378 // FIXME: Fold in x+1, -x, and ~x when applicable.
379 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
380 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
381 // Single-cycle csel, csinc, csinv, and csneg.
382 CondCycles = 1 + ExtraCondLat;
383 TrueCycles = FalseCycles = 1;
384 if (canFoldIntoCSel(MRI, TrueReg))
386 else if (canFoldIntoCSel(MRI, FalseReg))
391 // Scalar floating point is handled by fcsel.
392 // FIXME: Form fabs, fmin, and fmax when applicable.
393 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
394 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
395 CondCycles = 5 + ExtraCondLat;
396 TrueCycles = FalseCycles = 2;
404 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
405 MachineBasicBlock::iterator I, DebugLoc DL,
407 const SmallVectorImpl<MachineOperand> &Cond,
408 unsigned TrueReg, unsigned FalseReg) const {
409 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
411 // Parse the condition code, see parseCondBranch() above.
412 AArch64CC::CondCode CC;
413 switch (Cond.size()) {
415 llvm_unreachable("Unknown condition opcode in Cond");
417 CC = AArch64CC::CondCode(Cond[0].getImm());
419 case 3: { // cbz/cbnz
420 // We must insert a compare against 0.
422 switch (Cond[1].getImm()) {
424 llvm_unreachable("Unknown branch opcode in Cond");
442 unsigned SrcReg = Cond[2].getReg();
444 // cmp reg, #0 is actually subs xzr, reg, #0.
445 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
446 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
451 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
452 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
459 case 4: { // tbz/tbnz
460 // We must insert a tst instruction.
461 switch (Cond[1].getImm()) {
463 llvm_unreachable("Unknown branch opcode in Cond");
473 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
474 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
475 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
476 .addReg(Cond[2].getReg())
478 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
480 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
481 .addReg(Cond[2].getReg())
483 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
489 const TargetRegisterClass *RC = nullptr;
490 bool TryFold = false;
491 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
492 RC = &AArch64::GPR64RegClass;
493 Opc = AArch64::CSELXr;
495 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
496 RC = &AArch64::GPR32RegClass;
497 Opc = AArch64::CSELWr;
499 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
500 RC = &AArch64::FPR64RegClass;
501 Opc = AArch64::FCSELDrrr;
502 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
503 RC = &AArch64::FPR32RegClass;
504 Opc = AArch64::FCSELSrrr;
506 assert(RC && "Unsupported regclass");
508 // Try folding simple instructions into the csel.
510 unsigned NewVReg = 0;
511 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
513 // The folded opcodes csinc, csinc and csneg apply the operation to
514 // FalseReg, so we need to invert the condition.
515 CC = AArch64CC::getInvertedCondCode(CC);
518 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
520 // Fold the operation. Leave any dead instructions for DCE to clean up.
524 // The extends the live range of NewVReg.
525 MRI.clearKillFlags(NewVReg);
529 // Pull all virtual register into the appropriate class.
530 MRI.constrainRegClass(TrueReg, RC);
531 MRI.constrainRegClass(FalseReg, RC);
534 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
538 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
539 unsigned &SrcReg, unsigned &DstReg,
540 unsigned &SubIdx) const {
541 switch (MI.getOpcode()) {
544 case AArch64::SBFMXri: // aka sxtw
545 case AArch64::UBFMXri: // aka uxtw
546 // Check for the 32 -> 64 bit extension case, these instructions can do
548 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
550 // This is a signed or unsigned 32 -> 64 bit extension.
551 SrcReg = MI.getOperand(1).getReg();
552 DstReg = MI.getOperand(0).getReg();
553 SubIdx = AArch64::sub_32;
558 /// analyzeCompare - For a comparison instruction, return the source registers
559 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
560 /// Return true if the comparison instruction can be analyzed.
561 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
562 unsigned &SrcReg2, int &CmpMask,
563 int &CmpValue) const {
564 switch (MI->getOpcode()) {
567 case AArch64::SUBSWrr:
568 case AArch64::SUBSWrs:
569 case AArch64::SUBSWrx:
570 case AArch64::SUBSXrr:
571 case AArch64::SUBSXrs:
572 case AArch64::SUBSXrx:
573 case AArch64::ADDSWrr:
574 case AArch64::ADDSWrs:
575 case AArch64::ADDSWrx:
576 case AArch64::ADDSXrr:
577 case AArch64::ADDSXrs:
578 case AArch64::ADDSXrx:
579 // Replace SUBSWrr with SUBWrr if NZCV is not used.
580 SrcReg = MI->getOperand(1).getReg();
581 SrcReg2 = MI->getOperand(2).getReg();
585 case AArch64::SUBSWri:
586 case AArch64::ADDSWri:
587 case AArch64::SUBSXri:
588 case AArch64::ADDSXri:
589 SrcReg = MI->getOperand(1).getReg();
592 CmpValue = MI->getOperand(2).getImm();
594 case AArch64::ANDSWri:
595 case AArch64::ANDSXri:
596 // ANDS does not use the same encoding scheme as the others xxxS
598 SrcReg = MI->getOperand(1).getReg();
601 CmpValue = AArch64_AM::decodeLogicalImmediate(
602 MI->getOperand(2).getImm(),
603 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64);
610 static bool UpdateOperandRegClass(MachineInstr *Instr) {
611 MachineBasicBlock *MBB = Instr->getParent();
612 assert(MBB && "Can't get MachineBasicBlock here");
613 MachineFunction *MF = MBB->getParent();
614 assert(MF && "Can't get MachineFunction here");
615 const TargetMachine *TM = &MF->getTarget();
616 const TargetInstrInfo *TII = TM->getInstrInfo();
617 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
618 MachineRegisterInfo *MRI = &MF->getRegInfo();
620 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
622 MachineOperand &MO = Instr->getOperand(OpIdx);
623 const TargetRegisterClass *OpRegCstraints =
624 Instr->getRegClassConstraint(OpIdx, TII, TRI);
626 // If there's no constraint, there's nothing to do.
629 // If the operand is a frame index, there's nothing to do here.
630 // A frame index operand will resolve correctly during PEI.
635 "Operand has register constraints without being a register!");
637 unsigned Reg = MO.getReg();
638 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
639 if (!OpRegCstraints->contains(Reg))
641 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
642 !MRI->constrainRegClass(Reg, OpRegCstraints))
649 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
650 /// comparison into one that sets the zero bit in the flags register.
651 bool AArch64InstrInfo::optimizeCompareInstr(
652 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
653 int CmpValue, const MachineRegisterInfo *MRI) const {
655 // Replace SUBSWrr with SUBWrr if NZCV is not used.
656 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
657 if (Cmp_NZCV != -1) {
659 switch (CmpInstr->getOpcode()) {
662 case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break;
663 case AArch64::ADDSWri: NewOpc = AArch64::ADDWri; break;
664 case AArch64::ADDSWrs: NewOpc = AArch64::ADDWrs; break;
665 case AArch64::ADDSWrx: NewOpc = AArch64::ADDWrx; break;
666 case AArch64::ADDSXrr: NewOpc = AArch64::ADDXrr; break;
667 case AArch64::ADDSXri: NewOpc = AArch64::ADDXri; break;
668 case AArch64::ADDSXrs: NewOpc = AArch64::ADDXrs; break;
669 case AArch64::ADDSXrx: NewOpc = AArch64::ADDXrx; break;
670 case AArch64::SUBSWrr: NewOpc = AArch64::SUBWrr; break;
671 case AArch64::SUBSWri: NewOpc = AArch64::SUBWri; break;
672 case AArch64::SUBSWrs: NewOpc = AArch64::SUBWrs; break;
673 case AArch64::SUBSWrx: NewOpc = AArch64::SUBWrx; break;
674 case AArch64::SUBSXrr: NewOpc = AArch64::SUBXrr; break;
675 case AArch64::SUBSXri: NewOpc = AArch64::SUBXri; break;
676 case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break;
677 case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break;
680 const MCInstrDesc &MCID = get(NewOpc);
681 CmpInstr->setDesc(MCID);
682 CmpInstr->RemoveOperand(Cmp_NZCV);
683 bool succeeded = UpdateOperandRegClass(CmpInstr);
685 assert(succeeded && "Some operands reg class are incompatible!");
689 // Continue only if we have a "ri" where immediate is zero.
690 if (CmpValue != 0 || SrcReg2 != 0)
693 // CmpInstr is a Compare instruction if destination register is not used.
694 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
697 // Get the unique definition of SrcReg.
698 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
702 // We iterate backward, starting from the instruction before CmpInstr and
703 // stop when reaching the definition of the source register or done with the
704 // basic block, to check whether NZCV is used or modified in between.
705 MachineBasicBlock::iterator I = CmpInstr, E = MI,
706 B = CmpInstr->getParent()->begin();
708 // Early exit if CmpInstr is at the beginning of the BB.
712 // Check whether the definition of SrcReg is in the same basic block as
713 // Compare. If not, we can't optimize away the Compare.
714 if (MI->getParent() != CmpInstr->getParent())
717 // Check that NZCV isn't set between the comparison instruction and the one we
719 const TargetRegisterInfo *TRI = &getRegisterInfo();
720 for (--I; I != E; --I) {
721 const MachineInstr &Instr = *I;
723 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
724 Instr.readsRegister(AArch64::NZCV, TRI))
725 // This instruction modifies or uses NZCV after the one we want to
726 // change. We can't do this transformation.
729 // The 'and' is below the comparison instruction.
733 unsigned NewOpc = MI->getOpcode();
734 switch (MI->getOpcode()) {
737 case AArch64::ADDSWrr:
738 case AArch64::ADDSWri:
739 case AArch64::ADDSXrr:
740 case AArch64::ADDSXri:
741 case AArch64::SUBSWrr:
742 case AArch64::SUBSWri:
743 case AArch64::SUBSXrr:
744 case AArch64::SUBSXri:
746 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
747 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
748 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
749 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
750 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
751 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
752 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
753 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
754 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
755 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
756 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
757 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
758 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
759 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
762 // Scan forward for the use of NZCV.
763 // When checking against MI: if it's a conditional code requires
764 // checking of V bit, then this is not safe to do.
765 // It is safe to remove CmpInstr if NZCV is redefined or killed.
766 // If we are done with the basic block, we need to check whether NZCV is
769 for (MachineBasicBlock::iterator I = CmpInstr,
770 E = CmpInstr->getParent()->end();
771 !IsSafe && ++I != E;) {
772 const MachineInstr &Instr = *I;
773 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
775 const MachineOperand &MO = Instr.getOperand(IO);
776 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
780 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
787 // Decode the condition code.
788 unsigned Opc = Instr.getOpcode();
789 AArch64CC::CondCode CC;
794 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
796 case AArch64::CSINVWr:
797 case AArch64::CSINVXr:
798 case AArch64::CSINCWr:
799 case AArch64::CSINCXr:
800 case AArch64::CSELWr:
801 case AArch64::CSELXr:
802 case AArch64::CSNEGWr:
803 case AArch64::CSNEGXr:
804 case AArch64::FCSELSrrr:
805 case AArch64::FCSELDrrr:
806 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
810 // It is not safe to remove Compare instruction if Overflow(V) is used.
813 // NZCV can be used multiple times, we should continue.
826 // If NZCV is not killed nor re-defined, we should check whether it is
827 // live-out. If it is live-out, do not optimize.
829 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
830 for (auto *MBB : ParentBlock->successors())
831 if (MBB->isLiveIn(AArch64::NZCV))
835 // Update the instruction to set NZCV.
836 MI->setDesc(get(NewOpc));
837 CmpInstr->eraseFromParent();
838 bool succeeded = UpdateOperandRegClass(MI);
840 assert(succeeded && "Some operands reg class are incompatible!");
841 MI->addRegisterDefined(AArch64::NZCV, TRI);
845 /// Return true if this is this instruction has a non-zero immediate
846 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
847 switch (MI->getOpcode()) {
850 case AArch64::ADDSWrs:
851 case AArch64::ADDSXrs:
852 case AArch64::ADDWrs:
853 case AArch64::ADDXrs:
854 case AArch64::ANDSWrs:
855 case AArch64::ANDSXrs:
856 case AArch64::ANDWrs:
857 case AArch64::ANDXrs:
858 case AArch64::BICSWrs:
859 case AArch64::BICSXrs:
860 case AArch64::BICWrs:
861 case AArch64::BICXrs:
862 case AArch64::CRC32Brr:
863 case AArch64::CRC32CBrr:
864 case AArch64::CRC32CHrr:
865 case AArch64::CRC32CWrr:
866 case AArch64::CRC32CXrr:
867 case AArch64::CRC32Hrr:
868 case AArch64::CRC32Wrr:
869 case AArch64::CRC32Xrr:
870 case AArch64::EONWrs:
871 case AArch64::EONXrs:
872 case AArch64::EORWrs:
873 case AArch64::EORXrs:
874 case AArch64::ORNWrs:
875 case AArch64::ORNXrs:
876 case AArch64::ORRWrs:
877 case AArch64::ORRXrs:
878 case AArch64::SUBSWrs:
879 case AArch64::SUBSXrs:
880 case AArch64::SUBWrs:
881 case AArch64::SUBXrs:
882 if (MI->getOperand(3).isImm()) {
883 unsigned val = MI->getOperand(3).getImm();
891 /// Return true if this is this instruction has a non-zero immediate
892 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
893 switch (MI->getOpcode()) {
896 case AArch64::ADDSWrx:
897 case AArch64::ADDSXrx:
898 case AArch64::ADDSXrx64:
899 case AArch64::ADDWrx:
900 case AArch64::ADDXrx:
901 case AArch64::ADDXrx64:
902 case AArch64::SUBSWrx:
903 case AArch64::SUBSXrx:
904 case AArch64::SUBSXrx64:
905 case AArch64::SUBWrx:
906 case AArch64::SUBXrx:
907 case AArch64::SUBXrx64:
908 if (MI->getOperand(3).isImm()) {
909 unsigned val = MI->getOperand(3).getImm();
918 // Return true if this instruction simply sets its single destination register
919 // to zero. This is equivalent to a register rename of the zero-register.
920 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
921 switch (MI->getOpcode()) {
924 case AArch64::MOVZWi:
925 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
926 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
927 assert(MI->getDesc().getNumOperands() == 3 &&
928 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
932 case AArch64::ANDWri: // and Rd, Rzr, #imm
933 return MI->getOperand(1).getReg() == AArch64::WZR;
934 case AArch64::ANDXri:
935 return MI->getOperand(1).getReg() == AArch64::XZR;
936 case TargetOpcode::COPY:
937 return MI->getOperand(1).getReg() == AArch64::WZR;
942 // Return true if this instruction simply renames a general register without
944 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
945 switch (MI->getOpcode()) {
948 case TargetOpcode::COPY: {
949 // GPR32 copies will by lowered to ORRXrs
950 unsigned DstReg = MI->getOperand(0).getReg();
951 return (AArch64::GPR32RegClass.contains(DstReg) ||
952 AArch64::GPR64RegClass.contains(DstReg));
954 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
955 if (MI->getOperand(1).getReg() == AArch64::XZR) {
956 assert(MI->getDesc().getNumOperands() == 4 &&
957 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
960 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
961 if (MI->getOperand(2).getImm() == 0) {
962 assert(MI->getDesc().getNumOperands() == 4 &&
963 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
970 // Return true if this instruction simply renames a general register without
972 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
973 switch (MI->getOpcode()) {
976 case TargetOpcode::COPY: {
977 // FPR64 copies will by lowered to ORR.16b
978 unsigned DstReg = MI->getOperand(0).getReg();
979 return (AArch64::FPR64RegClass.contains(DstReg) ||
980 AArch64::FPR128RegClass.contains(DstReg));
982 case AArch64::ORRv16i8:
983 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
984 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
985 "invalid ORRv16i8 operands");
992 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
993 int &FrameIndex) const {
994 switch (MI->getOpcode()) {
997 case AArch64::LDRWui:
998 case AArch64::LDRXui:
999 case AArch64::LDRBui:
1000 case AArch64::LDRHui:
1001 case AArch64::LDRSui:
1002 case AArch64::LDRDui:
1003 case AArch64::LDRQui:
1004 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1005 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1006 FrameIndex = MI->getOperand(1).getIndex();
1007 return MI->getOperand(0).getReg();
1015 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1016 int &FrameIndex) const {
1017 switch (MI->getOpcode()) {
1020 case AArch64::STRWui:
1021 case AArch64::STRXui:
1022 case AArch64::STRBui:
1023 case AArch64::STRHui:
1024 case AArch64::STRSui:
1025 case AArch64::STRDui:
1026 case AArch64::STRQui:
1027 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1028 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1029 FrameIndex = MI->getOperand(1).getIndex();
1030 return MI->getOperand(0).getReg();
1037 /// Return true if this is load/store scales or extends its register offset.
1038 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1039 /// MI should be a memory op that allows scaled addressing.
1040 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1041 switch (MI->getOpcode()) {
1044 case AArch64::LDRBBroW:
1045 case AArch64::LDRBroW:
1046 case AArch64::LDRDroW:
1047 case AArch64::LDRHHroW:
1048 case AArch64::LDRHroW:
1049 case AArch64::LDRQroW:
1050 case AArch64::LDRSBWroW:
1051 case AArch64::LDRSBXroW:
1052 case AArch64::LDRSHWroW:
1053 case AArch64::LDRSHXroW:
1054 case AArch64::LDRSWroW:
1055 case AArch64::LDRSroW:
1056 case AArch64::LDRWroW:
1057 case AArch64::LDRXroW:
1058 case AArch64::STRBBroW:
1059 case AArch64::STRBroW:
1060 case AArch64::STRDroW:
1061 case AArch64::STRHHroW:
1062 case AArch64::STRHroW:
1063 case AArch64::STRQroW:
1064 case AArch64::STRSroW:
1065 case AArch64::STRWroW:
1066 case AArch64::STRXroW:
1067 case AArch64::LDRBBroX:
1068 case AArch64::LDRBroX:
1069 case AArch64::LDRDroX:
1070 case AArch64::LDRHHroX:
1071 case AArch64::LDRHroX:
1072 case AArch64::LDRQroX:
1073 case AArch64::LDRSBWroX:
1074 case AArch64::LDRSBXroX:
1075 case AArch64::LDRSHWroX:
1076 case AArch64::LDRSHXroX:
1077 case AArch64::LDRSWroX:
1078 case AArch64::LDRSroX:
1079 case AArch64::LDRWroX:
1080 case AArch64::LDRXroX:
1081 case AArch64::STRBBroX:
1082 case AArch64::STRBroX:
1083 case AArch64::STRDroX:
1084 case AArch64::STRHHroX:
1085 case AArch64::STRHroX:
1086 case AArch64::STRQroX:
1087 case AArch64::STRSroX:
1088 case AArch64::STRWroX:
1089 case AArch64::STRXroX:
1091 unsigned Val = MI->getOperand(3).getImm();
1092 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1093 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1098 /// Check all MachineMemOperands for a hint to suppress pairing.
1099 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1100 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1101 "Too many target MO flags");
1102 for (auto *MM : MI->memoperands()) {
1103 if (MM->getFlags() &
1104 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1111 /// Set a flag on the first MachineMemOperand to suppress pairing.
1112 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1113 if (MI->memoperands_empty())
1116 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1117 "Too many target MO flags");
1118 (*MI->memoperands_begin())
1119 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1123 AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1125 const TargetRegisterInfo *TRI) const {
1126 switch (LdSt->getOpcode()) {
1129 case AArch64::STRSui:
1130 case AArch64::STRDui:
1131 case AArch64::STRQui:
1132 case AArch64::STRXui:
1133 case AArch64::STRWui:
1134 case AArch64::LDRSui:
1135 case AArch64::LDRDui:
1136 case AArch64::LDRQui:
1137 case AArch64::LDRXui:
1138 case AArch64::LDRWui:
1139 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1141 BaseReg = LdSt->getOperand(1).getReg();
1142 MachineFunction &MF = *LdSt->getParent()->getParent();
1143 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1144 Offset = LdSt->getOperand(2).getImm() * Width;
1149 /// Detect opportunities for ldp/stp formation.
1151 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1152 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1153 MachineInstr *SecondLdSt,
1154 unsigned NumLoads) const {
1155 // Only cluster up to a single pair.
1158 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1160 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1161 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1162 // Allow 6 bits of positive range.
1165 // The caller should already have ordered First/SecondLdSt by offset.
1166 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1167 return Ofs1 + 1 == Ofs2;
1170 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1171 MachineInstr *Second) const {
1172 // Cyclone can fuse CMN, CMP followed by Bcc.
1174 // FIXME: B0 can also fuse:
1175 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1176 if (Second->getOpcode() != AArch64::Bcc)
1178 switch (First->getOpcode()) {
1181 case AArch64::SUBSWri:
1182 case AArch64::ADDSWri:
1183 case AArch64::ANDSWri:
1184 case AArch64::SUBSXri:
1185 case AArch64::ADDSXri:
1186 case AArch64::ANDSXri:
1191 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1194 const MDNode *MDPtr,
1195 DebugLoc DL) const {
1196 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1197 .addFrameIndex(FrameIx)
1200 .addMetadata(MDPtr);
1204 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1205 unsigned Reg, unsigned SubIdx,
1207 const TargetRegisterInfo *TRI) {
1209 return MIB.addReg(Reg, State);
1211 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1212 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1213 return MIB.addReg(Reg, State, SubIdx);
1216 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1218 // We really want the positive remainder mod 32 here, that happens to be
1219 // easily obtainable with a mask.
1220 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1223 void AArch64InstrInfo::copyPhysRegTuple(
1224 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1225 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1226 llvm::ArrayRef<unsigned> Indices) const {
1227 assert(getSubTarget().hasNEON() &&
1228 "Unexpected register copy without NEON");
1229 const TargetRegisterInfo *TRI = &getRegisterInfo();
1230 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1231 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1232 unsigned NumRegs = Indices.size();
1234 int SubReg = 0, End = NumRegs, Incr = 1;
1235 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1236 SubReg = NumRegs - 1;
1241 for (; SubReg != End; SubReg += Incr) {
1242 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1243 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1244 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1245 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1249 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1250 MachineBasicBlock::iterator I, DebugLoc DL,
1251 unsigned DestReg, unsigned SrcReg,
1252 bool KillSrc) const {
1253 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1254 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1255 const TargetRegisterInfo *TRI = &getRegisterInfo();
1257 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1258 // If either operand is WSP, expand to ADD #0.
1259 if (Subtarget.hasZeroCycleRegMove()) {
1260 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1261 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1262 &AArch64::GPR64spRegClass);
1263 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1264 &AArch64::GPR64spRegClass);
1265 // This instruction is reading and writing X registers. This may upset
1266 // the register scavenger and machine verifier, so we need to indicate
1267 // that we are reading an undefined value from SrcRegX, but a proper
1268 // value from SrcReg.
1269 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1270 .addReg(SrcRegX, RegState::Undef)
1272 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1273 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1275 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1276 .addReg(SrcReg, getKillRegState(KillSrc))
1278 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1280 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1281 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1282 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1284 if (Subtarget.hasZeroCycleRegMove()) {
1285 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1286 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1287 &AArch64::GPR64spRegClass);
1288 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1289 &AArch64::GPR64spRegClass);
1290 // This instruction is reading and writing X registers. This may upset
1291 // the register scavenger and machine verifier, so we need to indicate
1292 // that we are reading an undefined value from SrcRegX, but a proper
1293 // value from SrcReg.
1294 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1295 .addReg(AArch64::XZR)
1296 .addReg(SrcRegX, RegState::Undef)
1297 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1299 // Otherwise, expand to ORR WZR.
1300 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1301 .addReg(AArch64::WZR)
1302 .addReg(SrcReg, getKillRegState(KillSrc));
1308 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1309 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1310 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1311 // If either operand is SP, expand to ADD #0.
1312 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1313 .addReg(SrcReg, getKillRegState(KillSrc))
1315 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1316 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1317 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1318 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1320 // Otherwise, expand to ORR XZR.
1321 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1322 .addReg(AArch64::XZR)
1323 .addReg(SrcReg, getKillRegState(KillSrc));
1328 // Copy a DDDD register quad by copying the individual sub-registers.
1329 if (AArch64::DDDDRegClass.contains(DestReg) &&
1330 AArch64::DDDDRegClass.contains(SrcReg)) {
1331 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1332 AArch64::dsub2, AArch64::dsub3 };
1333 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1338 // Copy a DDD register triple by copying the individual sub-registers.
1339 if (AArch64::DDDRegClass.contains(DestReg) &&
1340 AArch64::DDDRegClass.contains(SrcReg)) {
1341 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1343 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1348 // Copy a DD register pair by copying the individual sub-registers.
1349 if (AArch64::DDRegClass.contains(DestReg) &&
1350 AArch64::DDRegClass.contains(SrcReg)) {
1351 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1352 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1357 // Copy a QQQQ register quad by copying the individual sub-registers.
1358 if (AArch64::QQQQRegClass.contains(DestReg) &&
1359 AArch64::QQQQRegClass.contains(SrcReg)) {
1360 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1361 AArch64::qsub2, AArch64::qsub3 };
1362 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1367 // Copy a QQQ register triple by copying the individual sub-registers.
1368 if (AArch64::QQQRegClass.contains(DestReg) &&
1369 AArch64::QQQRegClass.contains(SrcReg)) {
1370 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1372 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1377 // Copy a QQ register pair by copying the individual sub-registers.
1378 if (AArch64::QQRegClass.contains(DestReg) &&
1379 AArch64::QQRegClass.contains(SrcReg)) {
1380 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1381 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1386 if (AArch64::FPR128RegClass.contains(DestReg) &&
1387 AArch64::FPR128RegClass.contains(SrcReg)) {
1388 if(getSubTarget().hasNEON()) {
1389 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1391 .addReg(SrcReg, getKillRegState(KillSrc));
1393 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1394 .addReg(AArch64::SP, RegState::Define)
1395 .addReg(SrcReg, getKillRegState(KillSrc))
1396 .addReg(AArch64::SP)
1398 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1399 .addReg(AArch64::SP, RegState::Define)
1400 .addReg(DestReg, RegState::Define)
1401 .addReg(AArch64::SP)
1407 if (AArch64::FPR64RegClass.contains(DestReg) &&
1408 AArch64::FPR64RegClass.contains(SrcReg)) {
1409 if(getSubTarget().hasNEON()) {
1410 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1411 &AArch64::FPR128RegClass);
1412 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1413 &AArch64::FPR128RegClass);
1414 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1416 .addReg(SrcReg, getKillRegState(KillSrc));
1418 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1419 .addReg(SrcReg, getKillRegState(KillSrc));
1424 if (AArch64::FPR32RegClass.contains(DestReg) &&
1425 AArch64::FPR32RegClass.contains(SrcReg)) {
1426 if(getSubTarget().hasNEON()) {
1427 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1428 &AArch64::FPR128RegClass);
1429 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1430 &AArch64::FPR128RegClass);
1431 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1433 .addReg(SrcReg, getKillRegState(KillSrc));
1435 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1436 .addReg(SrcReg, getKillRegState(KillSrc));
1441 if (AArch64::FPR16RegClass.contains(DestReg) &&
1442 AArch64::FPR16RegClass.contains(SrcReg)) {
1443 if(getSubTarget().hasNEON()) {
1444 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1445 &AArch64::FPR128RegClass);
1446 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1447 &AArch64::FPR128RegClass);
1448 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1450 .addReg(SrcReg, getKillRegState(KillSrc));
1452 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1453 &AArch64::FPR32RegClass);
1454 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1455 &AArch64::FPR32RegClass);
1456 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1457 .addReg(SrcReg, getKillRegState(KillSrc));
1462 if (AArch64::FPR8RegClass.contains(DestReg) &&
1463 AArch64::FPR8RegClass.contains(SrcReg)) {
1464 if(getSubTarget().hasNEON()) {
1465 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1466 &AArch64::FPR128RegClass);
1467 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1468 &AArch64::FPR128RegClass);
1469 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1471 .addReg(SrcReg, getKillRegState(KillSrc));
1473 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1474 &AArch64::FPR32RegClass);
1475 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1476 &AArch64::FPR32RegClass);
1477 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1478 .addReg(SrcReg, getKillRegState(KillSrc));
1483 // Copies between GPR64 and FPR64.
1484 if (AArch64::FPR64RegClass.contains(DestReg) &&
1485 AArch64::GPR64RegClass.contains(SrcReg)) {
1486 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1487 .addReg(SrcReg, getKillRegState(KillSrc));
1490 if (AArch64::GPR64RegClass.contains(DestReg) &&
1491 AArch64::FPR64RegClass.contains(SrcReg)) {
1492 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1493 .addReg(SrcReg, getKillRegState(KillSrc));
1496 // Copies between GPR32 and FPR32.
1497 if (AArch64::FPR32RegClass.contains(DestReg) &&
1498 AArch64::GPR32RegClass.contains(SrcReg)) {
1499 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1500 .addReg(SrcReg, getKillRegState(KillSrc));
1503 if (AArch64::GPR32RegClass.contains(DestReg) &&
1504 AArch64::FPR32RegClass.contains(SrcReg)) {
1505 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1506 .addReg(SrcReg, getKillRegState(KillSrc));
1510 assert(0 && "unimplemented reg-to-reg copy");
1513 void AArch64InstrInfo::storeRegToStackSlot(
1514 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1515 bool isKill, int FI, const TargetRegisterClass *RC,
1516 const TargetRegisterInfo *TRI) const {
1518 if (MBBI != MBB.end())
1519 DL = MBBI->getDebugLoc();
1520 MachineFunction &MF = *MBB.getParent();
1521 MachineFrameInfo &MFI = *MF.getFrameInfo();
1522 unsigned Align = MFI.getObjectAlignment(FI);
1524 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1525 MachineMemOperand *MMO = MF.getMachineMemOperand(
1526 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1529 switch (RC->getSize()) {
1531 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1532 Opc = AArch64::STRBui;
1535 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1536 Opc = AArch64::STRHui;
1539 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1540 Opc = AArch64::STRWui;
1541 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1542 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1544 assert(SrcReg != AArch64::WSP);
1545 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1546 Opc = AArch64::STRSui;
1549 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1550 Opc = AArch64::STRXui;
1551 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1552 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1554 assert(SrcReg != AArch64::SP);
1555 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1556 Opc = AArch64::STRDui;
1559 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1560 Opc = AArch64::STRQui;
1561 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1562 assert(getSubTarget().hasNEON() &&
1563 "Unexpected register store without NEON");
1564 Opc = AArch64::ST1Twov1d, Offset = false;
1568 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1569 assert(getSubTarget().hasNEON() &&
1570 "Unexpected register store without NEON");
1571 Opc = AArch64::ST1Threev1d, Offset = false;
1575 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1576 assert(getSubTarget().hasNEON() &&
1577 "Unexpected register store without NEON");
1578 Opc = AArch64::ST1Fourv1d, Offset = false;
1579 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1580 assert(getSubTarget().hasNEON() &&
1581 "Unexpected register store without NEON");
1582 Opc = AArch64::ST1Twov2d, Offset = false;
1586 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1587 assert(getSubTarget().hasNEON() &&
1588 "Unexpected register store without NEON");
1589 Opc = AArch64::ST1Threev2d, Offset = false;
1593 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1594 assert(getSubTarget().hasNEON() &&
1595 "Unexpected register store without NEON");
1596 Opc = AArch64::ST1Fourv2d, Offset = false;
1600 assert(Opc && "Unknown register class");
1602 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1603 .addReg(SrcReg, getKillRegState(isKill))
1608 MI.addMemOperand(MMO);
1611 void AArch64InstrInfo::loadRegFromStackSlot(
1612 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1613 int FI, const TargetRegisterClass *RC,
1614 const TargetRegisterInfo *TRI) const {
1616 if (MBBI != MBB.end())
1617 DL = MBBI->getDebugLoc();
1618 MachineFunction &MF = *MBB.getParent();
1619 MachineFrameInfo &MFI = *MF.getFrameInfo();
1620 unsigned Align = MFI.getObjectAlignment(FI);
1621 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1622 MachineMemOperand *MMO = MF.getMachineMemOperand(
1623 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1627 switch (RC->getSize()) {
1629 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1630 Opc = AArch64::LDRBui;
1633 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1634 Opc = AArch64::LDRHui;
1637 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1638 Opc = AArch64::LDRWui;
1639 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1640 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1642 assert(DestReg != AArch64::WSP);
1643 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1644 Opc = AArch64::LDRSui;
1647 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1648 Opc = AArch64::LDRXui;
1649 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1650 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1652 assert(DestReg != AArch64::SP);
1653 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1654 Opc = AArch64::LDRDui;
1657 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1658 Opc = AArch64::LDRQui;
1659 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1660 assert(getSubTarget().hasNEON() &&
1661 "Unexpected register load without NEON");
1662 Opc = AArch64::LD1Twov1d, Offset = false;
1666 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1667 assert(getSubTarget().hasNEON() &&
1668 "Unexpected register load without NEON");
1669 Opc = AArch64::LD1Threev1d, Offset = false;
1673 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1674 assert(getSubTarget().hasNEON() &&
1675 "Unexpected register load without NEON");
1676 Opc = AArch64::LD1Fourv1d, Offset = false;
1677 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1678 assert(getSubTarget().hasNEON() &&
1679 "Unexpected register load without NEON");
1680 Opc = AArch64::LD1Twov2d, Offset = false;
1684 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1685 assert(getSubTarget().hasNEON() &&
1686 "Unexpected register load without NEON");
1687 Opc = AArch64::LD1Threev2d, Offset = false;
1691 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1692 assert(getSubTarget().hasNEON() &&
1693 "Unexpected register load without NEON");
1694 Opc = AArch64::LD1Fourv2d, Offset = false;
1698 assert(Opc && "Unknown register class");
1700 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1701 .addReg(DestReg, getDefRegState(true))
1705 MI.addMemOperand(MMO);
1708 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1709 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1710 unsigned DestReg, unsigned SrcReg, int Offset,
1711 const AArch64InstrInfo *TII,
1712 MachineInstr::MIFlag Flag, bool SetNZCV) {
1713 if (DestReg == SrcReg && Offset == 0)
1716 bool isSub = Offset < 0;
1720 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
1721 // scratch register. If DestReg is a virtual register, use it as the
1722 // scratch register; otherwise, create a new virtual register (to be
1723 // replaced by the scavenger at the end of PEI). That case can be optimized
1724 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
1725 // register can be loaded with offset%8 and the add/sub can use an extending
1726 // instruction with LSL#3.
1727 // Currently the function handles any offsets but generates a poor sequence
1729 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
1733 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
1735 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
1736 const unsigned MaxEncoding = 0xfff;
1737 const unsigned ShiftSize = 12;
1738 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
1739 while (((unsigned)Offset) >= (1 << ShiftSize)) {
1741 if (((unsigned)Offset) > MaxEncodableValue) {
1742 ThisVal = MaxEncodableValue;
1744 ThisVal = Offset & MaxEncodableValue;
1746 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
1747 "Encoding cannot handle value that big");
1748 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1750 .addImm(ThisVal >> ShiftSize)
1751 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
1759 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1762 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1767 AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
1768 const SmallVectorImpl<unsigned> &Ops,
1769 int FrameIndex) const {
1770 // This is a bit of a hack. Consider this instruction:
1772 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
1774 // We explicitly chose GPR64all for the virtual register so such a copy might
1775 // be eliminated by RegisterCoalescer. However, that may not be possible, and
1776 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
1777 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
1779 // To prevent that, we are going to constrain the %vreg0 register class here.
1781 // <rdar://problem/11522048>
1784 unsigned DstReg = MI->getOperand(0).getReg();
1785 unsigned SrcReg = MI->getOperand(1).getReg();
1786 if (SrcReg == AArch64::SP &&
1787 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1788 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
1791 if (DstReg == AArch64::SP &&
1792 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
1793 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1802 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
1803 bool *OutUseUnscaledOp,
1804 unsigned *OutUnscaledOp,
1805 int *EmittableOffset) {
1807 bool IsSigned = false;
1808 // The ImmIdx should be changed case by case if it is not 2.
1809 unsigned ImmIdx = 2;
1810 unsigned UnscaledOp = 0;
1811 // Set output values in case of early exit.
1812 if (EmittableOffset)
1813 *EmittableOffset = 0;
1814 if (OutUseUnscaledOp)
1815 *OutUseUnscaledOp = false;
1818 switch (MI.getOpcode()) {
1820 assert(0 && "unhandled opcode in rewriteAArch64FrameIndex");
1821 // Vector spills/fills can't take an immediate offset.
1822 case AArch64::LD1Twov2d:
1823 case AArch64::LD1Threev2d:
1824 case AArch64::LD1Fourv2d:
1825 case AArch64::LD1Twov1d:
1826 case AArch64::LD1Threev1d:
1827 case AArch64::LD1Fourv1d:
1828 case AArch64::ST1Twov2d:
1829 case AArch64::ST1Threev2d:
1830 case AArch64::ST1Fourv2d:
1831 case AArch64::ST1Twov1d:
1832 case AArch64::ST1Threev1d:
1833 case AArch64::ST1Fourv1d:
1834 return AArch64FrameOffsetCannotUpdate;
1835 case AArch64::PRFMui:
1837 UnscaledOp = AArch64::PRFUMi;
1839 case AArch64::LDRXui:
1841 UnscaledOp = AArch64::LDURXi;
1843 case AArch64::LDRWui:
1845 UnscaledOp = AArch64::LDURWi;
1847 case AArch64::LDRBui:
1849 UnscaledOp = AArch64::LDURBi;
1851 case AArch64::LDRHui:
1853 UnscaledOp = AArch64::LDURHi;
1855 case AArch64::LDRSui:
1857 UnscaledOp = AArch64::LDURSi;
1859 case AArch64::LDRDui:
1861 UnscaledOp = AArch64::LDURDi;
1863 case AArch64::LDRQui:
1865 UnscaledOp = AArch64::LDURQi;
1867 case AArch64::LDRBBui:
1869 UnscaledOp = AArch64::LDURBBi;
1871 case AArch64::LDRHHui:
1873 UnscaledOp = AArch64::LDURHHi;
1875 case AArch64::LDRSBXui:
1877 UnscaledOp = AArch64::LDURSBXi;
1879 case AArch64::LDRSBWui:
1881 UnscaledOp = AArch64::LDURSBWi;
1883 case AArch64::LDRSHXui:
1885 UnscaledOp = AArch64::LDURSHXi;
1887 case AArch64::LDRSHWui:
1889 UnscaledOp = AArch64::LDURSHWi;
1891 case AArch64::LDRSWui:
1893 UnscaledOp = AArch64::LDURSWi;
1896 case AArch64::STRXui:
1898 UnscaledOp = AArch64::STURXi;
1900 case AArch64::STRWui:
1902 UnscaledOp = AArch64::STURWi;
1904 case AArch64::STRBui:
1906 UnscaledOp = AArch64::STURBi;
1908 case AArch64::STRHui:
1910 UnscaledOp = AArch64::STURHi;
1912 case AArch64::STRSui:
1914 UnscaledOp = AArch64::STURSi;
1916 case AArch64::STRDui:
1918 UnscaledOp = AArch64::STURDi;
1920 case AArch64::STRQui:
1922 UnscaledOp = AArch64::STURQi;
1924 case AArch64::STRBBui:
1926 UnscaledOp = AArch64::STURBBi;
1928 case AArch64::STRHHui:
1930 UnscaledOp = AArch64::STURHHi;
1933 case AArch64::LDPXi:
1934 case AArch64::LDPDi:
1935 case AArch64::STPXi:
1936 case AArch64::STPDi:
1940 case AArch64::LDPQi:
1941 case AArch64::STPQi:
1945 case AArch64::LDPWi:
1946 case AArch64::LDPSi:
1947 case AArch64::STPWi:
1948 case AArch64::STPSi:
1953 case AArch64::LDURXi:
1954 case AArch64::LDURWi:
1955 case AArch64::LDURBi:
1956 case AArch64::LDURHi:
1957 case AArch64::LDURSi:
1958 case AArch64::LDURDi:
1959 case AArch64::LDURQi:
1960 case AArch64::LDURHHi:
1961 case AArch64::LDURBBi:
1962 case AArch64::LDURSBXi:
1963 case AArch64::LDURSBWi:
1964 case AArch64::LDURSHXi:
1965 case AArch64::LDURSHWi:
1966 case AArch64::LDURSWi:
1967 case AArch64::STURXi:
1968 case AArch64::STURWi:
1969 case AArch64::STURBi:
1970 case AArch64::STURHi:
1971 case AArch64::STURSi:
1972 case AArch64::STURDi:
1973 case AArch64::STURQi:
1974 case AArch64::STURBBi:
1975 case AArch64::STURHHi:
1980 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
1982 bool useUnscaledOp = false;
1983 // If the offset doesn't match the scale, we rewrite the instruction to
1984 // use the unscaled instruction instead. Likewise, if we have a negative
1985 // offset (and have an unscaled op to use).
1986 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
1987 useUnscaledOp = true;
1989 // Use an unscaled addressing mode if the instruction has a negative offset
1990 // (or if the instruction is already using an unscaled addressing mode).
1993 // ldp/stp instructions.
1996 } else if (UnscaledOp == 0 || useUnscaledOp) {
2006 // Attempt to fold address computation.
2007 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2008 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2009 if (Offset >= MinOff && Offset <= MaxOff) {
2010 if (EmittableOffset)
2011 *EmittableOffset = Offset;
2014 int NewOff = Offset < 0 ? MinOff : MaxOff;
2015 if (EmittableOffset)
2016 *EmittableOffset = NewOff;
2017 Offset = (Offset - NewOff) * Scale;
2019 if (OutUseUnscaledOp)
2020 *OutUseUnscaledOp = useUnscaledOp;
2022 *OutUnscaledOp = UnscaledOp;
2023 return AArch64FrameOffsetCanUpdate |
2024 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2027 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2028 unsigned FrameReg, int &Offset,
2029 const AArch64InstrInfo *TII) {
2030 unsigned Opcode = MI.getOpcode();
2031 unsigned ImmIdx = FrameRegIdx + 1;
2033 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2034 Offset += MI.getOperand(ImmIdx).getImm();
2035 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2036 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2037 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2038 MI.eraseFromParent();
2044 unsigned UnscaledOp;
2046 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2047 &UnscaledOp, &NewOffset);
2048 if (Status & AArch64FrameOffsetCanUpdate) {
2049 if (Status & AArch64FrameOffsetIsLegal)
2050 // Replace the FrameIndex with FrameReg.
2051 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2053 MI.setDesc(TII->get(UnscaledOp));
2055 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2062 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2063 NopInst.setOpcode(AArch64::HINT);
2064 NopInst.addOperand(MCOperand::CreateImm(0));