1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/TargetRegistry.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AArch64GenInstrInfo.inc"
31 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
32 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
33 RI(this, &STI), Subtarget(STI) {}
35 /// GetInstSize - Return the number of bytes of code the specified
36 /// instruction may be. This returns the maximum number of bytes.
37 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
38 const MachineBasicBlock &MBB = *MI->getParent();
39 const MachineFunction *MF = MBB.getParent();
40 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
42 if (MI->getOpcode() == AArch64::INLINEASM)
43 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
45 const MCInstrDesc &Desc = MI->getDesc();
46 switch (Desc.getOpcode()) {
48 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
50 case TargetOpcode::DBG_VALUE:
51 case TargetOpcode::EH_LABEL:
52 case TargetOpcode::IMPLICIT_DEF:
53 case TargetOpcode::KILL:
57 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
60 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
61 SmallVectorImpl<MachineOperand> &Cond) {
62 // Block ends with fall-through condbranch.
63 switch (LastInst->getOpcode()) {
65 llvm_unreachable("Unknown branch instruction?");
67 Target = LastInst->getOperand(1).getMBB();
68 Cond.push_back(LastInst->getOperand(0));
74 Target = LastInst->getOperand(1).getMBB();
75 Cond.push_back(MachineOperand::CreateImm(-1));
76 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
77 Cond.push_back(LastInst->getOperand(0));
83 Target = LastInst->getOperand(2).getMBB();
84 Cond.push_back(MachineOperand::CreateImm(-1));
85 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
86 Cond.push_back(LastInst->getOperand(0));
87 Cond.push_back(LastInst->getOperand(1));
92 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
93 MachineBasicBlock *&TBB,
94 MachineBasicBlock *&FBB,
95 SmallVectorImpl<MachineOperand> &Cond,
96 bool AllowModify) const {
97 // If the block has no terminators, it just falls into the block after it.
98 MachineBasicBlock::iterator I = MBB.end();
102 while (I->isDebugValue()) {
103 if (I == MBB.begin())
107 if (!isUnpredicatedTerminator(I))
110 // Get the last instruction in the block.
111 MachineInstr *LastInst = I;
113 // If there is only one terminator instruction, process it.
114 unsigned LastOpc = LastInst->getOpcode();
115 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
116 if (isUncondBranchOpcode(LastOpc)) {
117 TBB = LastInst->getOperand(0).getMBB();
120 if (isCondBranchOpcode(LastOpc)) {
121 // Block ends with fall-through condbranch.
122 parseCondBranch(LastInst, TBB, Cond);
125 return true; // Can't handle indirect branch.
128 // Get the instruction before it if it is a terminator.
129 MachineInstr *SecondLastInst = I;
130 unsigned SecondLastOpc = SecondLastInst->getOpcode();
132 // If AllowModify is true and the block ends with two or more unconditional
133 // branches, delete all but the first unconditional branch.
134 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
135 while (isUncondBranchOpcode(SecondLastOpc)) {
136 LastInst->eraseFromParent();
137 LastInst = SecondLastInst;
138 LastOpc = LastInst->getOpcode();
139 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
140 // Return now the only terminator is an unconditional branch.
141 TBB = LastInst->getOperand(0).getMBB();
145 SecondLastOpc = SecondLastInst->getOpcode();
150 // If there are three terminators, we don't know what sort of block this is.
151 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
154 // If the block ends with a B and a Bcc, handle it.
155 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
156 parseCondBranch(SecondLastInst, TBB, Cond);
157 FBB = LastInst->getOperand(0).getMBB();
161 // If the block ends with two unconditional branches, handle it. The second
162 // one is not executed, so remove it.
163 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
164 TBB = SecondLastInst->getOperand(0).getMBB();
167 I->eraseFromParent();
171 // ...likewise if it ends with an indirect branch followed by an unconditional
173 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
176 I->eraseFromParent();
180 // Otherwise, can't handle this.
184 bool AArch64InstrInfo::ReverseBranchCondition(
185 SmallVectorImpl<MachineOperand> &Cond) const {
186 if (Cond[0].getImm() != -1) {
188 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
189 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
191 // Folded compare-and-branch
192 switch (Cond[1].getImm()) {
194 llvm_unreachable("Unknown conditional branch!");
196 Cond[1].setImm(AArch64::CBNZW);
199 Cond[1].setImm(AArch64::CBZW);
202 Cond[1].setImm(AArch64::CBNZX);
205 Cond[1].setImm(AArch64::CBZX);
208 Cond[1].setImm(AArch64::TBNZW);
211 Cond[1].setImm(AArch64::TBZW);
214 Cond[1].setImm(AArch64::TBNZX);
217 Cond[1].setImm(AArch64::TBZX);
225 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
226 MachineBasicBlock::iterator I = MBB.end();
227 if (I == MBB.begin())
230 while (I->isDebugValue()) {
231 if (I == MBB.begin())
235 if (!isUncondBranchOpcode(I->getOpcode()) &&
236 !isCondBranchOpcode(I->getOpcode()))
239 // Remove the branch.
240 I->eraseFromParent();
244 if (I == MBB.begin())
247 if (!isCondBranchOpcode(I->getOpcode()))
250 // Remove the branch.
251 I->eraseFromParent();
255 void AArch64InstrInfo::instantiateCondBranch(
256 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
257 const SmallVectorImpl<MachineOperand> &Cond) const {
258 if (Cond[0].getImm() != -1) {
260 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
262 // Folded compare-and-branch
263 const MachineInstrBuilder MIB =
264 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
266 MIB.addImm(Cond[3].getImm());
271 unsigned AArch64InstrInfo::InsertBranch(
272 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
273 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
274 // Shouldn't be a fall through.
275 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
278 if (Cond.empty()) // Unconditional branch?
279 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
281 instantiateCondBranch(MBB, DL, TBB, Cond);
285 // Two-way conditional branch.
286 instantiateCondBranch(MBB, DL, TBB, Cond);
287 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
291 // Find the original register that VReg is copied from.
292 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
293 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
294 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
295 if (!DefMI->isFullCopy())
297 VReg = DefMI->getOperand(1).getReg();
302 // Determine if VReg is defined by an instruction that can be folded into a
303 // csel instruction. If so, return the folded opcode, and the replacement
305 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
306 unsigned *NewVReg = nullptr) {
307 VReg = removeCopies(MRI, VReg);
308 if (!TargetRegisterInfo::isVirtualRegister(VReg))
311 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
312 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
314 unsigned SrcOpNum = 0;
315 switch (DefMI->getOpcode()) {
316 case AArch64::ADDSXri:
317 case AArch64::ADDSWri:
318 // if NZCV is used, do not fold.
319 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
321 // fall-through to ADDXri and ADDWri.
322 case AArch64::ADDXri:
323 case AArch64::ADDWri:
324 // add x, 1 -> csinc.
325 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
326 DefMI->getOperand(3).getImm() != 0)
329 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
332 case AArch64::ORNXrr:
333 case AArch64::ORNWrr: {
334 // not x -> csinv, represented as orn dst, xzr, src.
335 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
336 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
339 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
343 case AArch64::SUBSXrr:
344 case AArch64::SUBSWrr:
345 // if NZCV is used, do not fold.
346 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
348 // fall-through to SUBXrr and SUBWrr.
349 case AArch64::SUBXrr:
350 case AArch64::SUBWrr: {
351 // neg x -> csneg, represented as sub dst, xzr, src.
352 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
353 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
356 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
362 assert(Opc && SrcOpNum && "Missing parameters");
365 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
369 bool AArch64InstrInfo::canInsertSelect(
370 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
371 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
372 int &FalseCycles) const {
373 // Check register classes.
374 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
375 const TargetRegisterClass *RC =
376 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
380 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
381 unsigned ExtraCondLat = Cond.size() != 1;
383 // GPRs are handled by csel.
384 // FIXME: Fold in x+1, -x, and ~x when applicable.
385 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
386 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
387 // Single-cycle csel, csinc, csinv, and csneg.
388 CondCycles = 1 + ExtraCondLat;
389 TrueCycles = FalseCycles = 1;
390 if (canFoldIntoCSel(MRI, TrueReg))
392 else if (canFoldIntoCSel(MRI, FalseReg))
397 // Scalar floating point is handled by fcsel.
398 // FIXME: Form fabs, fmin, and fmax when applicable.
399 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
400 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
401 CondCycles = 5 + ExtraCondLat;
402 TrueCycles = FalseCycles = 2;
410 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
411 MachineBasicBlock::iterator I, DebugLoc DL,
413 const SmallVectorImpl<MachineOperand> &Cond,
414 unsigned TrueReg, unsigned FalseReg) const {
415 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
417 // Parse the condition code, see parseCondBranch() above.
418 AArch64CC::CondCode CC;
419 switch (Cond.size()) {
421 llvm_unreachable("Unknown condition opcode in Cond");
423 CC = AArch64CC::CondCode(Cond[0].getImm());
425 case 3: { // cbz/cbnz
426 // We must insert a compare against 0.
428 switch (Cond[1].getImm()) {
430 llvm_unreachable("Unknown branch opcode in Cond");
448 unsigned SrcReg = Cond[2].getReg();
450 // cmp reg, #0 is actually subs xzr, reg, #0.
451 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
452 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
457 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
458 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
465 case 4: { // tbz/tbnz
466 // We must insert a tst instruction.
467 switch (Cond[1].getImm()) {
469 llvm_unreachable("Unknown branch opcode in Cond");
479 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
480 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
481 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
482 .addReg(Cond[2].getReg())
484 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
486 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
487 .addReg(Cond[2].getReg())
489 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
495 const TargetRegisterClass *RC = nullptr;
496 bool TryFold = false;
497 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
498 RC = &AArch64::GPR64RegClass;
499 Opc = AArch64::CSELXr;
501 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
502 RC = &AArch64::GPR32RegClass;
503 Opc = AArch64::CSELWr;
505 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
506 RC = &AArch64::FPR64RegClass;
507 Opc = AArch64::FCSELDrrr;
508 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
509 RC = &AArch64::FPR32RegClass;
510 Opc = AArch64::FCSELSrrr;
512 assert(RC && "Unsupported regclass");
514 // Try folding simple instructions into the csel.
516 unsigned NewVReg = 0;
517 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
519 // The folded opcodes csinc, csinc and csneg apply the operation to
520 // FalseReg, so we need to invert the condition.
521 CC = AArch64CC::getInvertedCondCode(CC);
524 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
526 // Fold the operation. Leave any dead instructions for DCE to clean up.
530 // The extends the live range of NewVReg.
531 MRI.clearKillFlags(NewVReg);
535 // Pull all virtual register into the appropriate class.
536 MRI.constrainRegClass(TrueReg, RC);
537 MRI.constrainRegClass(FalseReg, RC);
540 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
544 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
545 unsigned &SrcReg, unsigned &DstReg,
546 unsigned &SubIdx) const {
547 switch (MI.getOpcode()) {
550 case AArch64::SBFMXri: // aka sxtw
551 case AArch64::UBFMXri: // aka uxtw
552 // Check for the 32 -> 64 bit extension case, these instructions can do
554 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
556 // This is a signed or unsigned 32 -> 64 bit extension.
557 SrcReg = MI.getOperand(1).getReg();
558 DstReg = MI.getOperand(0).getReg();
559 SubIdx = AArch64::sub_32;
564 /// analyzeCompare - For a comparison instruction, return the source registers
565 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
566 /// Return true if the comparison instruction can be analyzed.
567 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
568 unsigned &SrcReg2, int &CmpMask,
569 int &CmpValue) const {
570 switch (MI->getOpcode()) {
573 case AArch64::SUBSWrr:
574 case AArch64::SUBSWrs:
575 case AArch64::SUBSWrx:
576 case AArch64::SUBSXrr:
577 case AArch64::SUBSXrs:
578 case AArch64::SUBSXrx:
579 case AArch64::ADDSWrr:
580 case AArch64::ADDSWrs:
581 case AArch64::ADDSWrx:
582 case AArch64::ADDSXrr:
583 case AArch64::ADDSXrs:
584 case AArch64::ADDSXrx:
585 // Replace SUBSWrr with SUBWrr if NZCV is not used.
586 SrcReg = MI->getOperand(1).getReg();
587 SrcReg2 = MI->getOperand(2).getReg();
591 case AArch64::SUBSWri:
592 case AArch64::ADDSWri:
593 case AArch64::SUBSXri:
594 case AArch64::ADDSXri:
595 SrcReg = MI->getOperand(1).getReg();
598 CmpValue = MI->getOperand(2).getImm();
600 case AArch64::ANDSWri:
601 case AArch64::ANDSXri:
602 // ANDS does not use the same encoding scheme as the others xxxS
604 SrcReg = MI->getOperand(1).getReg();
607 CmpValue = AArch64_AM::decodeLogicalImmediate(
608 MI->getOperand(2).getImm(),
609 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64);
616 static bool UpdateOperandRegClass(MachineInstr *Instr) {
617 MachineBasicBlock *MBB = Instr->getParent();
618 assert(MBB && "Can't get MachineBasicBlock here");
619 MachineFunction *MF = MBB->getParent();
620 assert(MF && "Can't get MachineFunction here");
621 const TargetMachine *TM = &MF->getTarget();
622 const TargetInstrInfo *TII = TM->getInstrInfo();
623 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
624 MachineRegisterInfo *MRI = &MF->getRegInfo();
626 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
628 MachineOperand &MO = Instr->getOperand(OpIdx);
629 const TargetRegisterClass *OpRegCstraints =
630 Instr->getRegClassConstraint(OpIdx, TII, TRI);
632 // If there's no constraint, there's nothing to do.
635 // If the operand is a frame index, there's nothing to do here.
636 // A frame index operand will resolve correctly during PEI.
641 "Operand has register constraints without being a register!");
643 unsigned Reg = MO.getReg();
644 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
645 if (!OpRegCstraints->contains(Reg))
647 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
648 !MRI->constrainRegClass(Reg, OpRegCstraints))
655 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
656 /// comparison into one that sets the zero bit in the flags register.
657 bool AArch64InstrInfo::optimizeCompareInstr(
658 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
659 int CmpValue, const MachineRegisterInfo *MRI) const {
661 // Replace SUBSWrr with SUBWrr if NZCV is not used.
662 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
663 if (Cmp_NZCV != -1) {
665 switch (CmpInstr->getOpcode()) {
668 case AArch64::ADDSWrr: NewOpc = AArch64::ADDWrr; break;
669 case AArch64::ADDSWri: NewOpc = AArch64::ADDWri; break;
670 case AArch64::ADDSWrs: NewOpc = AArch64::ADDWrs; break;
671 case AArch64::ADDSWrx: NewOpc = AArch64::ADDWrx; break;
672 case AArch64::ADDSXrr: NewOpc = AArch64::ADDXrr; break;
673 case AArch64::ADDSXri: NewOpc = AArch64::ADDXri; break;
674 case AArch64::ADDSXrs: NewOpc = AArch64::ADDXrs; break;
675 case AArch64::ADDSXrx: NewOpc = AArch64::ADDXrx; break;
676 case AArch64::SUBSWrr: NewOpc = AArch64::SUBWrr; break;
677 case AArch64::SUBSWri: NewOpc = AArch64::SUBWri; break;
678 case AArch64::SUBSWrs: NewOpc = AArch64::SUBWrs; break;
679 case AArch64::SUBSWrx: NewOpc = AArch64::SUBWrx; break;
680 case AArch64::SUBSXrr: NewOpc = AArch64::SUBXrr; break;
681 case AArch64::SUBSXri: NewOpc = AArch64::SUBXri; break;
682 case AArch64::SUBSXrs: NewOpc = AArch64::SUBXrs; break;
683 case AArch64::SUBSXrx: NewOpc = AArch64::SUBXrx; break;
686 const MCInstrDesc &MCID = get(NewOpc);
687 CmpInstr->setDesc(MCID);
688 CmpInstr->RemoveOperand(Cmp_NZCV);
689 bool succeeded = UpdateOperandRegClass(CmpInstr);
691 assert(succeeded && "Some operands reg class are incompatible!");
695 // Continue only if we have a "ri" where immediate is zero.
696 if (CmpValue != 0 || SrcReg2 != 0)
699 // CmpInstr is a Compare instruction if destination register is not used.
700 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
703 // Get the unique definition of SrcReg.
704 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
708 // We iterate backward, starting from the instruction before CmpInstr and
709 // stop when reaching the definition of the source register or done with the
710 // basic block, to check whether NZCV is used or modified in between.
711 MachineBasicBlock::iterator I = CmpInstr, E = MI,
712 B = CmpInstr->getParent()->begin();
714 // Early exit if CmpInstr is at the beginning of the BB.
718 // Check whether the definition of SrcReg is in the same basic block as
719 // Compare. If not, we can't optimize away the Compare.
720 if (MI->getParent() != CmpInstr->getParent())
723 // Check that NZCV isn't set between the comparison instruction and the one we
725 const TargetRegisterInfo *TRI = &getRegisterInfo();
726 for (--I; I != E; --I) {
727 const MachineInstr &Instr = *I;
729 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
730 Instr.readsRegister(AArch64::NZCV, TRI))
731 // This instruction modifies or uses NZCV after the one we want to
732 // change. We can't do this transformation.
735 // The 'and' is below the comparison instruction.
739 unsigned NewOpc = MI->getOpcode();
740 switch (MI->getOpcode()) {
743 case AArch64::ADDSWrr:
744 case AArch64::ADDSWri:
745 case AArch64::ADDSXrr:
746 case AArch64::ADDSXri:
747 case AArch64::SUBSWrr:
748 case AArch64::SUBSWri:
749 case AArch64::SUBSXrr:
750 case AArch64::SUBSXri:
752 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
753 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
754 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
755 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
756 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
757 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
758 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
759 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
760 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
761 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
762 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
763 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
764 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
765 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
768 // Scan forward for the use of NZCV.
769 // When checking against MI: if it's a conditional code requires
770 // checking of V bit, then this is not safe to do.
771 // It is safe to remove CmpInstr if NZCV is redefined or killed.
772 // If we are done with the basic block, we need to check whether NZCV is
775 for (MachineBasicBlock::iterator I = CmpInstr,
776 E = CmpInstr->getParent()->end();
777 !IsSafe && ++I != E;) {
778 const MachineInstr &Instr = *I;
779 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
781 const MachineOperand &MO = Instr.getOperand(IO);
782 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
786 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
793 // Decode the condition code.
794 unsigned Opc = Instr.getOpcode();
795 AArch64CC::CondCode CC;
800 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
802 case AArch64::CSINVWr:
803 case AArch64::CSINVXr:
804 case AArch64::CSINCWr:
805 case AArch64::CSINCXr:
806 case AArch64::CSELWr:
807 case AArch64::CSELXr:
808 case AArch64::CSNEGWr:
809 case AArch64::CSNEGXr:
810 case AArch64::FCSELSrrr:
811 case AArch64::FCSELDrrr:
812 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
816 // It is not safe to remove Compare instruction if Overflow(V) is used.
819 // NZCV can be used multiple times, we should continue.
832 // If NZCV is not killed nor re-defined, we should check whether it is
833 // live-out. If it is live-out, do not optimize.
835 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
836 for (auto *MBB : ParentBlock->successors())
837 if (MBB->isLiveIn(AArch64::NZCV))
841 // Update the instruction to set NZCV.
842 MI->setDesc(get(NewOpc));
843 CmpInstr->eraseFromParent();
844 bool succeeded = UpdateOperandRegClass(MI);
846 assert(succeeded && "Some operands reg class are incompatible!");
847 MI->addRegisterDefined(AArch64::NZCV, TRI);
851 /// Return true if this is this instruction has a non-zero immediate
852 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
853 switch (MI->getOpcode()) {
856 case AArch64::ADDSWrs:
857 case AArch64::ADDSXrs:
858 case AArch64::ADDWrs:
859 case AArch64::ADDXrs:
860 case AArch64::ANDSWrs:
861 case AArch64::ANDSXrs:
862 case AArch64::ANDWrs:
863 case AArch64::ANDXrs:
864 case AArch64::BICSWrs:
865 case AArch64::BICSXrs:
866 case AArch64::BICWrs:
867 case AArch64::BICXrs:
868 case AArch64::CRC32Brr:
869 case AArch64::CRC32CBrr:
870 case AArch64::CRC32CHrr:
871 case AArch64::CRC32CWrr:
872 case AArch64::CRC32CXrr:
873 case AArch64::CRC32Hrr:
874 case AArch64::CRC32Wrr:
875 case AArch64::CRC32Xrr:
876 case AArch64::EONWrs:
877 case AArch64::EONXrs:
878 case AArch64::EORWrs:
879 case AArch64::EORXrs:
880 case AArch64::ORNWrs:
881 case AArch64::ORNXrs:
882 case AArch64::ORRWrs:
883 case AArch64::ORRXrs:
884 case AArch64::SUBSWrs:
885 case AArch64::SUBSXrs:
886 case AArch64::SUBWrs:
887 case AArch64::SUBXrs:
888 if (MI->getOperand(3).isImm()) {
889 unsigned val = MI->getOperand(3).getImm();
897 /// Return true if this is this instruction has a non-zero immediate
898 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
899 switch (MI->getOpcode()) {
902 case AArch64::ADDSWrx:
903 case AArch64::ADDSXrx:
904 case AArch64::ADDSXrx64:
905 case AArch64::ADDWrx:
906 case AArch64::ADDXrx:
907 case AArch64::ADDXrx64:
908 case AArch64::SUBSWrx:
909 case AArch64::SUBSXrx:
910 case AArch64::SUBSXrx64:
911 case AArch64::SUBWrx:
912 case AArch64::SUBXrx:
913 case AArch64::SUBXrx64:
914 if (MI->getOperand(3).isImm()) {
915 unsigned val = MI->getOperand(3).getImm();
924 // Return true if this instruction simply sets its single destination register
925 // to zero. This is equivalent to a register rename of the zero-register.
926 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
927 switch (MI->getOpcode()) {
930 case AArch64::MOVZWi:
931 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
932 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
933 assert(MI->getDesc().getNumOperands() == 3 &&
934 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
938 case AArch64::ANDWri: // and Rd, Rzr, #imm
939 return MI->getOperand(1).getReg() == AArch64::WZR;
940 case AArch64::ANDXri:
941 return MI->getOperand(1).getReg() == AArch64::XZR;
942 case TargetOpcode::COPY:
943 return MI->getOperand(1).getReg() == AArch64::WZR;
948 // Return true if this instruction simply renames a general register without
950 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
951 switch (MI->getOpcode()) {
954 case TargetOpcode::COPY: {
955 // GPR32 copies will by lowered to ORRXrs
956 unsigned DstReg = MI->getOperand(0).getReg();
957 return (AArch64::GPR32RegClass.contains(DstReg) ||
958 AArch64::GPR64RegClass.contains(DstReg));
960 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
961 if (MI->getOperand(1).getReg() == AArch64::XZR) {
962 assert(MI->getDesc().getNumOperands() == 4 &&
963 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
966 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
967 if (MI->getOperand(2).getImm() == 0) {
968 assert(MI->getDesc().getNumOperands() == 4 &&
969 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
976 // Return true if this instruction simply renames a general register without
978 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
979 switch (MI->getOpcode()) {
982 case TargetOpcode::COPY: {
983 // FPR64 copies will by lowered to ORR.16b
984 unsigned DstReg = MI->getOperand(0).getReg();
985 return (AArch64::FPR64RegClass.contains(DstReg) ||
986 AArch64::FPR128RegClass.contains(DstReg));
988 case AArch64::ORRv16i8:
989 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
990 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
991 "invalid ORRv16i8 operands");
998 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
999 int &FrameIndex) const {
1000 switch (MI->getOpcode()) {
1003 case AArch64::LDRWui:
1004 case AArch64::LDRXui:
1005 case AArch64::LDRBui:
1006 case AArch64::LDRHui:
1007 case AArch64::LDRSui:
1008 case AArch64::LDRDui:
1009 case AArch64::LDRQui:
1010 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1011 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1012 FrameIndex = MI->getOperand(1).getIndex();
1013 return MI->getOperand(0).getReg();
1021 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1022 int &FrameIndex) const {
1023 switch (MI->getOpcode()) {
1026 case AArch64::STRWui:
1027 case AArch64::STRXui:
1028 case AArch64::STRBui:
1029 case AArch64::STRHui:
1030 case AArch64::STRSui:
1031 case AArch64::STRDui:
1032 case AArch64::STRQui:
1033 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1034 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1035 FrameIndex = MI->getOperand(1).getIndex();
1036 return MI->getOperand(0).getReg();
1043 /// Return true if this is load/store scales or extends its register offset.
1044 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1045 /// MI should be a memory op that allows scaled addressing.
1046 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1047 switch (MI->getOpcode()) {
1050 case AArch64::LDRBBroW:
1051 case AArch64::LDRBroW:
1052 case AArch64::LDRDroW:
1053 case AArch64::LDRHHroW:
1054 case AArch64::LDRHroW:
1055 case AArch64::LDRQroW:
1056 case AArch64::LDRSBWroW:
1057 case AArch64::LDRSBXroW:
1058 case AArch64::LDRSHWroW:
1059 case AArch64::LDRSHXroW:
1060 case AArch64::LDRSWroW:
1061 case AArch64::LDRSroW:
1062 case AArch64::LDRWroW:
1063 case AArch64::LDRXroW:
1064 case AArch64::STRBBroW:
1065 case AArch64::STRBroW:
1066 case AArch64::STRDroW:
1067 case AArch64::STRHHroW:
1068 case AArch64::STRHroW:
1069 case AArch64::STRQroW:
1070 case AArch64::STRSroW:
1071 case AArch64::STRWroW:
1072 case AArch64::STRXroW:
1073 case AArch64::LDRBBroX:
1074 case AArch64::LDRBroX:
1075 case AArch64::LDRDroX:
1076 case AArch64::LDRHHroX:
1077 case AArch64::LDRHroX:
1078 case AArch64::LDRQroX:
1079 case AArch64::LDRSBWroX:
1080 case AArch64::LDRSBXroX:
1081 case AArch64::LDRSHWroX:
1082 case AArch64::LDRSHXroX:
1083 case AArch64::LDRSWroX:
1084 case AArch64::LDRSroX:
1085 case AArch64::LDRWroX:
1086 case AArch64::LDRXroX:
1087 case AArch64::STRBBroX:
1088 case AArch64::STRBroX:
1089 case AArch64::STRDroX:
1090 case AArch64::STRHHroX:
1091 case AArch64::STRHroX:
1092 case AArch64::STRQroX:
1093 case AArch64::STRSroX:
1094 case AArch64::STRWroX:
1095 case AArch64::STRXroX:
1097 unsigned Val = MI->getOperand(3).getImm();
1098 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1099 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1104 /// Check all MachineMemOperands for a hint to suppress pairing.
1105 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1106 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1107 "Too many target MO flags");
1108 for (auto *MM : MI->memoperands()) {
1109 if (MM->getFlags() &
1110 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1117 /// Set a flag on the first MachineMemOperand to suppress pairing.
1118 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1119 if (MI->memoperands_empty())
1122 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1123 "Too many target MO flags");
1124 (*MI->memoperands_begin())
1125 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1129 AArch64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1131 const TargetRegisterInfo *TRI) const {
1132 switch (LdSt->getOpcode()) {
1135 case AArch64::STRSui:
1136 case AArch64::STRDui:
1137 case AArch64::STRQui:
1138 case AArch64::STRXui:
1139 case AArch64::STRWui:
1140 case AArch64::LDRSui:
1141 case AArch64::LDRDui:
1142 case AArch64::LDRQui:
1143 case AArch64::LDRXui:
1144 case AArch64::LDRWui:
1145 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1147 BaseReg = LdSt->getOperand(1).getReg();
1148 MachineFunction &MF = *LdSt->getParent()->getParent();
1149 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1150 Offset = LdSt->getOperand(2).getImm() * Width;
1155 /// Detect opportunities for ldp/stp formation.
1157 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1158 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1159 MachineInstr *SecondLdSt,
1160 unsigned NumLoads) const {
1161 // Only cluster up to a single pair.
1164 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1166 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1167 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1168 // Allow 6 bits of positive range.
1171 // The caller should already have ordered First/SecondLdSt by offset.
1172 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1173 return Ofs1 + 1 == Ofs2;
1176 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1177 MachineInstr *Second) const {
1178 // Cyclone can fuse CMN, CMP followed by Bcc.
1180 // FIXME: B0 can also fuse:
1181 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1182 if (Second->getOpcode() != AArch64::Bcc)
1184 switch (First->getOpcode()) {
1187 case AArch64::SUBSWri:
1188 case AArch64::ADDSWri:
1189 case AArch64::ANDSWri:
1190 case AArch64::SUBSXri:
1191 case AArch64::ADDSXri:
1192 case AArch64::ANDSXri:
1197 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1200 const MDNode *MDPtr,
1201 DebugLoc DL) const {
1202 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1203 .addFrameIndex(FrameIx)
1206 .addMetadata(MDPtr);
1210 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1211 unsigned Reg, unsigned SubIdx,
1213 const TargetRegisterInfo *TRI) {
1215 return MIB.addReg(Reg, State);
1217 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1218 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1219 return MIB.addReg(Reg, State, SubIdx);
1222 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1224 // We really want the positive remainder mod 32 here, that happens to be
1225 // easily obtainable with a mask.
1226 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1229 void AArch64InstrInfo::copyPhysRegTuple(
1230 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1231 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1232 llvm::ArrayRef<unsigned> Indices) const {
1233 assert(Subtarget.hasNEON() &&
1234 "Unexpected register copy without NEON");
1235 const TargetRegisterInfo *TRI = &getRegisterInfo();
1236 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1237 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1238 unsigned NumRegs = Indices.size();
1240 int SubReg = 0, End = NumRegs, Incr = 1;
1241 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1242 SubReg = NumRegs - 1;
1247 for (; SubReg != End; SubReg += Incr) {
1248 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1249 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1250 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1251 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1255 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1256 MachineBasicBlock::iterator I, DebugLoc DL,
1257 unsigned DestReg, unsigned SrcReg,
1258 bool KillSrc) const {
1259 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1260 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1261 const TargetRegisterInfo *TRI = &getRegisterInfo();
1263 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1264 // If either operand is WSP, expand to ADD #0.
1265 if (Subtarget.hasZeroCycleRegMove()) {
1266 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1267 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1268 &AArch64::GPR64spRegClass);
1269 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1270 &AArch64::GPR64spRegClass);
1271 // This instruction is reading and writing X registers. This may upset
1272 // the register scavenger and machine verifier, so we need to indicate
1273 // that we are reading an undefined value from SrcRegX, but a proper
1274 // value from SrcReg.
1275 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1276 .addReg(SrcRegX, RegState::Undef)
1278 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1279 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1281 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1282 .addReg(SrcReg, getKillRegState(KillSrc))
1284 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1286 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1287 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1288 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1290 if (Subtarget.hasZeroCycleRegMove()) {
1291 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1292 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1293 &AArch64::GPR64spRegClass);
1294 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1295 &AArch64::GPR64spRegClass);
1296 // This instruction is reading and writing X registers. This may upset
1297 // the register scavenger and machine verifier, so we need to indicate
1298 // that we are reading an undefined value from SrcRegX, but a proper
1299 // value from SrcReg.
1300 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1301 .addReg(AArch64::XZR)
1302 .addReg(SrcRegX, RegState::Undef)
1303 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1305 // Otherwise, expand to ORR WZR.
1306 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1307 .addReg(AArch64::WZR)
1308 .addReg(SrcReg, getKillRegState(KillSrc));
1314 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1315 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1316 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1317 // If either operand is SP, expand to ADD #0.
1318 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1319 .addReg(SrcReg, getKillRegState(KillSrc))
1321 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1322 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1323 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1324 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1326 // Otherwise, expand to ORR XZR.
1327 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1328 .addReg(AArch64::XZR)
1329 .addReg(SrcReg, getKillRegState(KillSrc));
1334 // Copy a DDDD register quad by copying the individual sub-registers.
1335 if (AArch64::DDDDRegClass.contains(DestReg) &&
1336 AArch64::DDDDRegClass.contains(SrcReg)) {
1337 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1338 AArch64::dsub2, AArch64::dsub3 };
1339 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1344 // Copy a DDD register triple by copying the individual sub-registers.
1345 if (AArch64::DDDRegClass.contains(DestReg) &&
1346 AArch64::DDDRegClass.contains(SrcReg)) {
1347 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1349 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1354 // Copy a DD register pair by copying the individual sub-registers.
1355 if (AArch64::DDRegClass.contains(DestReg) &&
1356 AArch64::DDRegClass.contains(SrcReg)) {
1357 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1358 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1363 // Copy a QQQQ register quad by copying the individual sub-registers.
1364 if (AArch64::QQQQRegClass.contains(DestReg) &&
1365 AArch64::QQQQRegClass.contains(SrcReg)) {
1366 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1367 AArch64::qsub2, AArch64::qsub3 };
1368 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1373 // Copy a QQQ register triple by copying the individual sub-registers.
1374 if (AArch64::QQQRegClass.contains(DestReg) &&
1375 AArch64::QQQRegClass.contains(SrcReg)) {
1376 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1378 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1383 // Copy a QQ register pair by copying the individual sub-registers.
1384 if (AArch64::QQRegClass.contains(DestReg) &&
1385 AArch64::QQRegClass.contains(SrcReg)) {
1386 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1387 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1392 if (AArch64::FPR128RegClass.contains(DestReg) &&
1393 AArch64::FPR128RegClass.contains(SrcReg)) {
1394 if(Subtarget.hasNEON()) {
1395 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1397 .addReg(SrcReg, getKillRegState(KillSrc));
1399 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1400 .addReg(AArch64::SP, RegState::Define)
1401 .addReg(SrcReg, getKillRegState(KillSrc))
1402 .addReg(AArch64::SP)
1404 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1405 .addReg(AArch64::SP, RegState::Define)
1406 .addReg(DestReg, RegState::Define)
1407 .addReg(AArch64::SP)
1413 if (AArch64::FPR64RegClass.contains(DestReg) &&
1414 AArch64::FPR64RegClass.contains(SrcReg)) {
1415 if(Subtarget.hasNEON()) {
1416 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1417 &AArch64::FPR128RegClass);
1418 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1419 &AArch64::FPR128RegClass);
1420 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1422 .addReg(SrcReg, getKillRegState(KillSrc));
1424 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1425 .addReg(SrcReg, getKillRegState(KillSrc));
1430 if (AArch64::FPR32RegClass.contains(DestReg) &&
1431 AArch64::FPR32RegClass.contains(SrcReg)) {
1432 if(Subtarget.hasNEON()) {
1433 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1434 &AArch64::FPR128RegClass);
1435 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1436 &AArch64::FPR128RegClass);
1437 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1439 .addReg(SrcReg, getKillRegState(KillSrc));
1441 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1442 .addReg(SrcReg, getKillRegState(KillSrc));
1447 if (AArch64::FPR16RegClass.contains(DestReg) &&
1448 AArch64::FPR16RegClass.contains(SrcReg)) {
1449 if(Subtarget.hasNEON()) {
1450 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1451 &AArch64::FPR128RegClass);
1452 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1453 &AArch64::FPR128RegClass);
1454 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1456 .addReg(SrcReg, getKillRegState(KillSrc));
1458 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1459 &AArch64::FPR32RegClass);
1460 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1461 &AArch64::FPR32RegClass);
1462 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1463 .addReg(SrcReg, getKillRegState(KillSrc));
1468 if (AArch64::FPR8RegClass.contains(DestReg) &&
1469 AArch64::FPR8RegClass.contains(SrcReg)) {
1470 if(Subtarget.hasNEON()) {
1471 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1472 &AArch64::FPR128RegClass);
1473 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1474 &AArch64::FPR128RegClass);
1475 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1477 .addReg(SrcReg, getKillRegState(KillSrc));
1479 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1480 &AArch64::FPR32RegClass);
1481 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1482 &AArch64::FPR32RegClass);
1483 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1484 .addReg(SrcReg, getKillRegState(KillSrc));
1489 // Copies between GPR64 and FPR64.
1490 if (AArch64::FPR64RegClass.contains(DestReg) &&
1491 AArch64::GPR64RegClass.contains(SrcReg)) {
1492 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1493 .addReg(SrcReg, getKillRegState(KillSrc));
1496 if (AArch64::GPR64RegClass.contains(DestReg) &&
1497 AArch64::FPR64RegClass.contains(SrcReg)) {
1498 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1499 .addReg(SrcReg, getKillRegState(KillSrc));
1502 // Copies between GPR32 and FPR32.
1503 if (AArch64::FPR32RegClass.contains(DestReg) &&
1504 AArch64::GPR32RegClass.contains(SrcReg)) {
1505 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1506 .addReg(SrcReg, getKillRegState(KillSrc));
1509 if (AArch64::GPR32RegClass.contains(DestReg) &&
1510 AArch64::FPR32RegClass.contains(SrcReg)) {
1511 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1512 .addReg(SrcReg, getKillRegState(KillSrc));
1516 if (DestReg == AArch64::NZCV) {
1517 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1518 BuildMI(MBB, I, DL, get(AArch64::MSR))
1519 .addImm(AArch64SysReg::NZCV)
1520 .addReg(SrcReg, getKillRegState(KillSrc))
1521 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1525 if (SrcReg == AArch64::NZCV) {
1526 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1527 BuildMI(MBB, I, DL, get(AArch64::MRS))
1529 .addImm(AArch64SysReg::NZCV)
1530 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1534 llvm_unreachable("unimplemented reg-to-reg copy");
1537 void AArch64InstrInfo::storeRegToStackSlot(
1538 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1539 bool isKill, int FI, const TargetRegisterClass *RC,
1540 const TargetRegisterInfo *TRI) const {
1542 if (MBBI != MBB.end())
1543 DL = MBBI->getDebugLoc();
1544 MachineFunction &MF = *MBB.getParent();
1545 MachineFrameInfo &MFI = *MF.getFrameInfo();
1546 unsigned Align = MFI.getObjectAlignment(FI);
1548 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1549 MachineMemOperand *MMO = MF.getMachineMemOperand(
1550 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1553 switch (RC->getSize()) {
1555 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1556 Opc = AArch64::STRBui;
1559 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1560 Opc = AArch64::STRHui;
1563 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1564 Opc = AArch64::STRWui;
1565 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1566 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1568 assert(SrcReg != AArch64::WSP);
1569 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1570 Opc = AArch64::STRSui;
1573 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1574 Opc = AArch64::STRXui;
1575 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1576 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1578 assert(SrcReg != AArch64::SP);
1579 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1580 Opc = AArch64::STRDui;
1583 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1584 Opc = AArch64::STRQui;
1585 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1586 assert(Subtarget.hasNEON() &&
1587 "Unexpected register store without NEON");
1588 Opc = AArch64::ST1Twov1d, Offset = false;
1592 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1593 assert(Subtarget.hasNEON() &&
1594 "Unexpected register store without NEON");
1595 Opc = AArch64::ST1Threev1d, Offset = false;
1599 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1600 assert(Subtarget.hasNEON() &&
1601 "Unexpected register store without NEON");
1602 Opc = AArch64::ST1Fourv1d, Offset = false;
1603 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1604 assert(Subtarget.hasNEON() &&
1605 "Unexpected register store without NEON");
1606 Opc = AArch64::ST1Twov2d, Offset = false;
1610 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1611 assert(Subtarget.hasNEON() &&
1612 "Unexpected register store without NEON");
1613 Opc = AArch64::ST1Threev2d, Offset = false;
1617 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1618 assert(Subtarget.hasNEON() &&
1619 "Unexpected register store without NEON");
1620 Opc = AArch64::ST1Fourv2d, Offset = false;
1624 assert(Opc && "Unknown register class");
1626 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1627 .addReg(SrcReg, getKillRegState(isKill))
1632 MI.addMemOperand(MMO);
1635 void AArch64InstrInfo::loadRegFromStackSlot(
1636 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1637 int FI, const TargetRegisterClass *RC,
1638 const TargetRegisterInfo *TRI) const {
1640 if (MBBI != MBB.end())
1641 DL = MBBI->getDebugLoc();
1642 MachineFunction &MF = *MBB.getParent();
1643 MachineFrameInfo &MFI = *MF.getFrameInfo();
1644 unsigned Align = MFI.getObjectAlignment(FI);
1645 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1646 MachineMemOperand *MMO = MF.getMachineMemOperand(
1647 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1651 switch (RC->getSize()) {
1653 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1654 Opc = AArch64::LDRBui;
1657 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1658 Opc = AArch64::LDRHui;
1661 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1662 Opc = AArch64::LDRWui;
1663 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1664 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1666 assert(DestReg != AArch64::WSP);
1667 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1668 Opc = AArch64::LDRSui;
1671 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1672 Opc = AArch64::LDRXui;
1673 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1674 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1676 assert(DestReg != AArch64::SP);
1677 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1678 Opc = AArch64::LDRDui;
1681 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1682 Opc = AArch64::LDRQui;
1683 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1684 assert(Subtarget.hasNEON() &&
1685 "Unexpected register load without NEON");
1686 Opc = AArch64::LD1Twov1d, Offset = false;
1690 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1691 assert(Subtarget.hasNEON() &&
1692 "Unexpected register load without NEON");
1693 Opc = AArch64::LD1Threev1d, Offset = false;
1697 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1698 assert(Subtarget.hasNEON() &&
1699 "Unexpected register load without NEON");
1700 Opc = AArch64::LD1Fourv1d, Offset = false;
1701 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1702 assert(Subtarget.hasNEON() &&
1703 "Unexpected register load without NEON");
1704 Opc = AArch64::LD1Twov2d, Offset = false;
1708 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1709 assert(Subtarget.hasNEON() &&
1710 "Unexpected register load without NEON");
1711 Opc = AArch64::LD1Threev2d, Offset = false;
1715 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1716 assert(Subtarget.hasNEON() &&
1717 "Unexpected register load without NEON");
1718 Opc = AArch64::LD1Fourv2d, Offset = false;
1722 assert(Opc && "Unknown register class");
1724 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1725 .addReg(DestReg, getDefRegState(true))
1729 MI.addMemOperand(MMO);
1732 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1733 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1734 unsigned DestReg, unsigned SrcReg, int Offset,
1735 const TargetInstrInfo *TII,
1736 MachineInstr::MIFlag Flag, bool SetNZCV) {
1737 if (DestReg == SrcReg && Offset == 0)
1740 bool isSub = Offset < 0;
1744 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
1745 // scratch register. If DestReg is a virtual register, use it as the
1746 // scratch register; otherwise, create a new virtual register (to be
1747 // replaced by the scavenger at the end of PEI). That case can be optimized
1748 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
1749 // register can be loaded with offset%8 and the add/sub can use an extending
1750 // instruction with LSL#3.
1751 // Currently the function handles any offsets but generates a poor sequence
1753 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
1757 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
1759 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
1760 const unsigned MaxEncoding = 0xfff;
1761 const unsigned ShiftSize = 12;
1762 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
1763 while (((unsigned)Offset) >= (1 << ShiftSize)) {
1765 if (((unsigned)Offset) > MaxEncodableValue) {
1766 ThisVal = MaxEncodableValue;
1768 ThisVal = Offset & MaxEncodableValue;
1770 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
1771 "Encoding cannot handle value that big");
1772 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1774 .addImm(ThisVal >> ShiftSize)
1775 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
1783 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1786 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1791 AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
1792 const SmallVectorImpl<unsigned> &Ops,
1793 int FrameIndex) const {
1794 // This is a bit of a hack. Consider this instruction:
1796 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
1798 // We explicitly chose GPR64all for the virtual register so such a copy might
1799 // be eliminated by RegisterCoalescer. However, that may not be possible, and
1800 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
1801 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
1803 // To prevent that, we are going to constrain the %vreg0 register class here.
1805 // <rdar://problem/11522048>
1808 unsigned DstReg = MI->getOperand(0).getReg();
1809 unsigned SrcReg = MI->getOperand(1).getReg();
1810 if (SrcReg == AArch64::SP &&
1811 TargetRegisterInfo::isVirtualRegister(DstReg)) {
1812 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
1815 if (DstReg == AArch64::SP &&
1816 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
1817 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1826 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
1827 bool *OutUseUnscaledOp,
1828 unsigned *OutUnscaledOp,
1829 int *EmittableOffset) {
1831 bool IsSigned = false;
1832 // The ImmIdx should be changed case by case if it is not 2.
1833 unsigned ImmIdx = 2;
1834 unsigned UnscaledOp = 0;
1835 // Set output values in case of early exit.
1836 if (EmittableOffset)
1837 *EmittableOffset = 0;
1838 if (OutUseUnscaledOp)
1839 *OutUseUnscaledOp = false;
1842 switch (MI.getOpcode()) {
1844 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
1845 // Vector spills/fills can't take an immediate offset.
1846 case AArch64::LD1Twov2d:
1847 case AArch64::LD1Threev2d:
1848 case AArch64::LD1Fourv2d:
1849 case AArch64::LD1Twov1d:
1850 case AArch64::LD1Threev1d:
1851 case AArch64::LD1Fourv1d:
1852 case AArch64::ST1Twov2d:
1853 case AArch64::ST1Threev2d:
1854 case AArch64::ST1Fourv2d:
1855 case AArch64::ST1Twov1d:
1856 case AArch64::ST1Threev1d:
1857 case AArch64::ST1Fourv1d:
1858 return AArch64FrameOffsetCannotUpdate;
1859 case AArch64::PRFMui:
1861 UnscaledOp = AArch64::PRFUMi;
1863 case AArch64::LDRXui:
1865 UnscaledOp = AArch64::LDURXi;
1867 case AArch64::LDRWui:
1869 UnscaledOp = AArch64::LDURWi;
1871 case AArch64::LDRBui:
1873 UnscaledOp = AArch64::LDURBi;
1875 case AArch64::LDRHui:
1877 UnscaledOp = AArch64::LDURHi;
1879 case AArch64::LDRSui:
1881 UnscaledOp = AArch64::LDURSi;
1883 case AArch64::LDRDui:
1885 UnscaledOp = AArch64::LDURDi;
1887 case AArch64::LDRQui:
1889 UnscaledOp = AArch64::LDURQi;
1891 case AArch64::LDRBBui:
1893 UnscaledOp = AArch64::LDURBBi;
1895 case AArch64::LDRHHui:
1897 UnscaledOp = AArch64::LDURHHi;
1899 case AArch64::LDRSBXui:
1901 UnscaledOp = AArch64::LDURSBXi;
1903 case AArch64::LDRSBWui:
1905 UnscaledOp = AArch64::LDURSBWi;
1907 case AArch64::LDRSHXui:
1909 UnscaledOp = AArch64::LDURSHXi;
1911 case AArch64::LDRSHWui:
1913 UnscaledOp = AArch64::LDURSHWi;
1915 case AArch64::LDRSWui:
1917 UnscaledOp = AArch64::LDURSWi;
1920 case AArch64::STRXui:
1922 UnscaledOp = AArch64::STURXi;
1924 case AArch64::STRWui:
1926 UnscaledOp = AArch64::STURWi;
1928 case AArch64::STRBui:
1930 UnscaledOp = AArch64::STURBi;
1932 case AArch64::STRHui:
1934 UnscaledOp = AArch64::STURHi;
1936 case AArch64::STRSui:
1938 UnscaledOp = AArch64::STURSi;
1940 case AArch64::STRDui:
1942 UnscaledOp = AArch64::STURDi;
1944 case AArch64::STRQui:
1946 UnscaledOp = AArch64::STURQi;
1948 case AArch64::STRBBui:
1950 UnscaledOp = AArch64::STURBBi;
1952 case AArch64::STRHHui:
1954 UnscaledOp = AArch64::STURHHi;
1957 case AArch64::LDPXi:
1958 case AArch64::LDPDi:
1959 case AArch64::STPXi:
1960 case AArch64::STPDi:
1964 case AArch64::LDPQi:
1965 case AArch64::STPQi:
1969 case AArch64::LDPWi:
1970 case AArch64::LDPSi:
1971 case AArch64::STPWi:
1972 case AArch64::STPSi:
1977 case AArch64::LDURXi:
1978 case AArch64::LDURWi:
1979 case AArch64::LDURBi:
1980 case AArch64::LDURHi:
1981 case AArch64::LDURSi:
1982 case AArch64::LDURDi:
1983 case AArch64::LDURQi:
1984 case AArch64::LDURHHi:
1985 case AArch64::LDURBBi:
1986 case AArch64::LDURSBXi:
1987 case AArch64::LDURSBWi:
1988 case AArch64::LDURSHXi:
1989 case AArch64::LDURSHWi:
1990 case AArch64::LDURSWi:
1991 case AArch64::STURXi:
1992 case AArch64::STURWi:
1993 case AArch64::STURBi:
1994 case AArch64::STURHi:
1995 case AArch64::STURSi:
1996 case AArch64::STURDi:
1997 case AArch64::STURQi:
1998 case AArch64::STURBBi:
1999 case AArch64::STURHHi:
2004 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2006 bool useUnscaledOp = false;
2007 // If the offset doesn't match the scale, we rewrite the instruction to
2008 // use the unscaled instruction instead. Likewise, if we have a negative
2009 // offset (and have an unscaled op to use).
2010 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2011 useUnscaledOp = true;
2013 // Use an unscaled addressing mode if the instruction has a negative offset
2014 // (or if the instruction is already using an unscaled addressing mode).
2017 // ldp/stp instructions.
2020 } else if (UnscaledOp == 0 || useUnscaledOp) {
2030 // Attempt to fold address computation.
2031 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2032 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2033 if (Offset >= MinOff && Offset <= MaxOff) {
2034 if (EmittableOffset)
2035 *EmittableOffset = Offset;
2038 int NewOff = Offset < 0 ? MinOff : MaxOff;
2039 if (EmittableOffset)
2040 *EmittableOffset = NewOff;
2041 Offset = (Offset - NewOff) * Scale;
2043 if (OutUseUnscaledOp)
2044 *OutUseUnscaledOp = useUnscaledOp;
2046 *OutUnscaledOp = UnscaledOp;
2047 return AArch64FrameOffsetCanUpdate |
2048 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2051 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2052 unsigned FrameReg, int &Offset,
2053 const AArch64InstrInfo *TII) {
2054 unsigned Opcode = MI.getOpcode();
2055 unsigned ImmIdx = FrameRegIdx + 1;
2057 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2058 Offset += MI.getOperand(ImmIdx).getImm();
2059 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2060 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2061 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2062 MI.eraseFromParent();
2068 unsigned UnscaledOp;
2070 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2071 &UnscaledOp, &NewOffset);
2072 if (Status & AArch64FrameOffsetCanUpdate) {
2073 if (Status & AArch64FrameOffsetIsLegal)
2074 // Replace the FrameIndex with FrameReg.
2075 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2077 MI.setDesc(TII->get(UnscaledOp));
2079 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2086 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2087 NopInst.setOpcode(AArch64::HINT);
2088 NopInst.addOperand(MCOperand::CreateImm(0));