1 //===- ARM64InstrInfo.cpp - ARM64 Instruction Information -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the ARM64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "ARM64InstrInfo.h"
15 #include "ARM64Subtarget.h"
16 #include "MCTargetDesc/ARM64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/TargetRegistry.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "ARM64GenInstrInfo.inc"
31 ARM64InstrInfo::ARM64InstrInfo(const ARM64Subtarget &STI)
32 : ARM64GenInstrInfo(ARM64::ADJCALLSTACKDOWN, ARM64::ADJCALLSTACKUP),
33 RI(this, &STI), Subtarget(STI) {}
35 /// GetInstSize - Return the number of bytes of code the specified
36 /// instruction may be. This returns the maximum number of bytes.
37 unsigned ARM64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
38 const MCInstrDesc &Desc = MI->getDesc();
40 switch (Desc.getOpcode()) {
42 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
44 case TargetOpcode::DBG_VALUE:
45 case TargetOpcode::EH_LABEL:
46 case TargetOpcode::IMPLICIT_DEF:
47 case TargetOpcode::KILL:
51 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
54 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
55 SmallVectorImpl<MachineOperand> &Cond) {
56 // Block ends with fall-through condbranch.
57 switch (LastInst->getOpcode()) {
59 llvm_unreachable("Unknown branch instruction?");
61 Target = LastInst->getOperand(1).getMBB();
62 Cond.push_back(LastInst->getOperand(0));
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(MachineOperand::CreateImm(-1));
70 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
71 Cond.push_back(LastInst->getOperand(0));
77 Target = LastInst->getOperand(2).getMBB();
78 Cond.push_back(MachineOperand::CreateImm(-1));
79 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
80 Cond.push_back(LastInst->getOperand(0));
81 Cond.push_back(LastInst->getOperand(1));
86 bool ARM64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
87 MachineBasicBlock *&TBB,
88 MachineBasicBlock *&FBB,
89 SmallVectorImpl<MachineOperand> &Cond,
90 bool AllowModify) const {
91 // If the block has no terminators, it just falls into the block after it.
92 MachineBasicBlock::iterator I = MBB.end();
96 while (I->isDebugValue()) {
101 if (!isUnpredicatedTerminator(I))
104 // Get the last instruction in the block.
105 MachineInstr *LastInst = I;
107 // If there is only one terminator instruction, process it.
108 unsigned LastOpc = LastInst->getOpcode();
109 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
110 if (isUncondBranchOpcode(LastOpc)) {
111 TBB = LastInst->getOperand(0).getMBB();
114 if (isCondBranchOpcode(LastOpc)) {
115 // Block ends with fall-through condbranch.
116 parseCondBranch(LastInst, TBB, Cond);
119 return true; // Can't handle indirect branch.
122 // Get the instruction before it if it is a terminator.
123 MachineInstr *SecondLastInst = I;
124 unsigned SecondLastOpc = SecondLastInst->getOpcode();
126 // If AllowModify is true and the block ends with two or more unconditional
127 // branches, delete all but the first unconditional branch.
128 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
129 while (isUncondBranchOpcode(SecondLastOpc)) {
130 LastInst->eraseFromParent();
131 LastInst = SecondLastInst;
132 LastOpc = LastInst->getOpcode();
133 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
134 // Return now the only terminator is an unconditional branch.
135 TBB = LastInst->getOperand(0).getMBB();
139 SecondLastOpc = SecondLastInst->getOpcode();
144 // If there are three terminators, we don't know what sort of block this is.
145 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
148 // If the block ends with a B and a Bcc, handle it.
149 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
150 parseCondBranch(SecondLastInst, TBB, Cond);
151 FBB = LastInst->getOperand(0).getMBB();
155 // If the block ends with two unconditional branches, handle it. The second
156 // one is not executed, so remove it.
157 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
158 TBB = SecondLastInst->getOperand(0).getMBB();
161 I->eraseFromParent();
165 // ...likewise if it ends with an indirect branch followed by an unconditional
167 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
170 I->eraseFromParent();
174 // Otherwise, can't handle this.
178 bool ARM64InstrInfo::ReverseBranchCondition(
179 SmallVectorImpl<MachineOperand> &Cond) const {
180 if (Cond[0].getImm() != -1) {
182 ARM64CC::CondCode CC = (ARM64CC::CondCode)(int)Cond[0].getImm();
183 Cond[0].setImm(ARM64CC::getInvertedCondCode(CC));
185 // Folded compare-and-branch
186 switch (Cond[1].getImm()) {
188 llvm_unreachable("Unknown conditional branch!");
190 Cond[1].setImm(ARM64::CBNZW);
193 Cond[1].setImm(ARM64::CBZW);
196 Cond[1].setImm(ARM64::CBNZX);
199 Cond[1].setImm(ARM64::CBZX);
202 Cond[1].setImm(ARM64::TBNZW);
205 Cond[1].setImm(ARM64::TBZW);
208 Cond[1].setImm(ARM64::TBNZX);
211 Cond[1].setImm(ARM64::TBZX);
219 unsigned ARM64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
220 MachineBasicBlock::iterator I = MBB.end();
221 if (I == MBB.begin())
224 while (I->isDebugValue()) {
225 if (I == MBB.begin())
229 if (!isUncondBranchOpcode(I->getOpcode()) &&
230 !isCondBranchOpcode(I->getOpcode()))
233 // Remove the branch.
234 I->eraseFromParent();
238 if (I == MBB.begin())
241 if (!isCondBranchOpcode(I->getOpcode()))
244 // Remove the branch.
245 I->eraseFromParent();
249 void ARM64InstrInfo::instantiateCondBranch(
250 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
251 const SmallVectorImpl<MachineOperand> &Cond) const {
252 if (Cond[0].getImm() != -1) {
254 BuildMI(&MBB, DL, get(ARM64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
256 // Folded compare-and-branch
257 const MachineInstrBuilder MIB =
258 BuildMI(&MBB, DL, get(Cond[1].getImm())).addReg(Cond[2].getReg());
260 MIB.addImm(Cond[3].getImm());
265 unsigned ARM64InstrInfo::InsertBranch(
266 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
267 const SmallVectorImpl<MachineOperand> &Cond, DebugLoc DL) const {
268 // Shouldn't be a fall through.
269 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
272 if (Cond.empty()) // Unconditional branch?
273 BuildMI(&MBB, DL, get(ARM64::B)).addMBB(TBB);
275 instantiateCondBranch(MBB, DL, TBB, Cond);
279 // Two-way conditional branch.
280 instantiateCondBranch(MBB, DL, TBB, Cond);
281 BuildMI(&MBB, DL, get(ARM64::B)).addMBB(FBB);
285 // Find the original register that VReg is copied from.
286 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
287 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
288 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
289 if (!DefMI->isFullCopy())
291 VReg = DefMI->getOperand(1).getReg();
296 // Determine if VReg is defined by an instruction that can be folded into a
297 // csel instruction. If so, return the folded opcode, and the replacement
299 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
300 unsigned *NewVReg = nullptr) {
301 VReg = removeCopies(MRI, VReg);
302 if (!TargetRegisterInfo::isVirtualRegister(VReg))
305 bool Is64Bit = ARM64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
306 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
308 unsigned SrcOpNum = 0;
309 switch (DefMI->getOpcode()) {
312 // if NZCV is used, do not fold.
313 if (DefMI->findRegisterDefOperandIdx(ARM64::NZCV, true) == -1)
315 // fall-through to ADDXri and ADDWri.
318 // add x, 1 -> csinc.
319 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
320 DefMI->getOperand(3).getImm() != 0)
323 Opc = Is64Bit ? ARM64::CSINCXr : ARM64::CSINCWr;
327 case ARM64::ORNWrr: {
328 // not x -> csinv, represented as orn dst, xzr, src.
329 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
330 if (ZReg != ARM64::XZR && ZReg != ARM64::WZR)
333 Opc = Is64Bit ? ARM64::CSINVXr : ARM64::CSINVWr;
339 // if NZCV is used, do not fold.
340 if (DefMI->findRegisterDefOperandIdx(ARM64::NZCV, true) == -1)
342 // fall-through to SUBXrr and SUBWrr.
344 case ARM64::SUBWrr: {
345 // neg x -> csneg, represented as sub dst, xzr, src.
346 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
347 if (ZReg != ARM64::XZR && ZReg != ARM64::WZR)
350 Opc = Is64Bit ? ARM64::CSNEGXr : ARM64::CSNEGWr;
356 assert(Opc && SrcOpNum && "Missing parameters");
359 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
363 bool ARM64InstrInfo::canInsertSelect(
364 const MachineBasicBlock &MBB, const SmallVectorImpl<MachineOperand> &Cond,
365 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
366 int &FalseCycles) const {
367 // Check register classes.
368 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
369 const TargetRegisterClass *RC =
370 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
374 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
375 unsigned ExtraCondLat = Cond.size() != 1;
377 // GPRs are handled by csel.
378 // FIXME: Fold in x+1, -x, and ~x when applicable.
379 if (ARM64::GPR64allRegClass.hasSubClassEq(RC) ||
380 ARM64::GPR32allRegClass.hasSubClassEq(RC)) {
381 // Single-cycle csel, csinc, csinv, and csneg.
382 CondCycles = 1 + ExtraCondLat;
383 TrueCycles = FalseCycles = 1;
384 if (canFoldIntoCSel(MRI, TrueReg))
386 else if (canFoldIntoCSel(MRI, FalseReg))
391 // Scalar floating point is handled by fcsel.
392 // FIXME: Form fabs, fmin, and fmax when applicable.
393 if (ARM64::FPR64RegClass.hasSubClassEq(RC) ||
394 ARM64::FPR32RegClass.hasSubClassEq(RC)) {
395 CondCycles = 5 + ExtraCondLat;
396 TrueCycles = FalseCycles = 2;
404 void ARM64InstrInfo::insertSelect(MachineBasicBlock &MBB,
405 MachineBasicBlock::iterator I, DebugLoc DL,
407 const SmallVectorImpl<MachineOperand> &Cond,
408 unsigned TrueReg, unsigned FalseReg) const {
409 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
411 // Parse the condition code, see parseCondBranch() above.
412 ARM64CC::CondCode CC;
413 switch (Cond.size()) {
415 llvm_unreachable("Unknown condition opcode in Cond");
417 CC = ARM64CC::CondCode(Cond[0].getImm());
419 case 3: { // cbz/cbnz
420 // We must insert a compare against 0.
422 switch (Cond[1].getImm()) {
424 llvm_unreachable("Unknown branch opcode in Cond");
442 unsigned SrcReg = Cond[2].getReg();
444 // cmp reg, #0 is actually subs xzr, reg, #0.
445 MRI.constrainRegClass(SrcReg, &ARM64::GPR64spRegClass);
446 BuildMI(MBB, I, DL, get(ARM64::SUBSXri), ARM64::XZR)
451 MRI.constrainRegClass(SrcReg, &ARM64::GPR32spRegClass);
452 BuildMI(MBB, I, DL, get(ARM64::SUBSWri), ARM64::WZR)
459 case 4: { // tbz/tbnz
460 // We must insert a tst instruction.
461 switch (Cond[1].getImm()) {
463 llvm_unreachable("Unknown branch opcode in Cond");
473 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
474 if (Cond[1].getImm() == ARM64::TBZW || Cond[1].getImm() == ARM64::TBNZW)
475 BuildMI(MBB, I, DL, get(ARM64::ANDSWri), ARM64::WZR)
476 .addReg(Cond[2].getReg())
477 .addImm(ARM64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
479 BuildMI(MBB, I, DL, get(ARM64::ANDSXri), ARM64::XZR)
480 .addReg(Cond[2].getReg())
481 .addImm(ARM64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
487 const TargetRegisterClass *RC = nullptr;
488 bool TryFold = false;
489 if (MRI.constrainRegClass(DstReg, &ARM64::GPR64RegClass)) {
490 RC = &ARM64::GPR64RegClass;
493 } else if (MRI.constrainRegClass(DstReg, &ARM64::GPR32RegClass)) {
494 RC = &ARM64::GPR32RegClass;
497 } else if (MRI.constrainRegClass(DstReg, &ARM64::FPR64RegClass)) {
498 RC = &ARM64::FPR64RegClass;
499 Opc = ARM64::FCSELDrrr;
500 } else if (MRI.constrainRegClass(DstReg, &ARM64::FPR32RegClass)) {
501 RC = &ARM64::FPR32RegClass;
502 Opc = ARM64::FCSELSrrr;
504 assert(RC && "Unsupported regclass");
506 // Try folding simple instructions into the csel.
508 unsigned NewVReg = 0;
509 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
511 // The folded opcodes csinc, csinc and csneg apply the operation to
512 // FalseReg, so we need to invert the condition.
513 CC = ARM64CC::getInvertedCondCode(CC);
516 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
518 // Fold the operation. Leave any dead instructions for DCE to clean up.
522 // The extends the live range of NewVReg.
523 MRI.clearKillFlags(NewVReg);
527 // Pull all virtual register into the appropriate class.
528 MRI.constrainRegClass(TrueReg, RC);
529 MRI.constrainRegClass(FalseReg, RC);
532 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
536 bool ARM64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
537 unsigned &SrcReg, unsigned &DstReg,
538 unsigned &SubIdx) const {
539 switch (MI.getOpcode()) {
542 case ARM64::SBFMXri: // aka sxtw
543 case ARM64::UBFMXri: // aka uxtw
544 // Check for the 32 -> 64 bit extension case, these instructions can do
546 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
548 // This is a signed or unsigned 32 -> 64 bit extension.
549 SrcReg = MI.getOperand(1).getReg();
550 DstReg = MI.getOperand(0).getReg();
551 SubIdx = ARM64::sub_32;
556 /// analyzeCompare - For a comparison instruction, return the source registers
557 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
558 /// Return true if the comparison instruction can be analyzed.
559 bool ARM64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
560 unsigned &SrcReg2, int &CmpMask,
561 int &CmpValue) const {
562 switch (MI->getOpcode()) {
577 // Replace SUBSWrr with SUBWrr if NZCV is not used.
578 SrcReg = MI->getOperand(1).getReg();
579 SrcReg2 = MI->getOperand(2).getReg();
587 SrcReg = MI->getOperand(1).getReg();
590 CmpValue = MI->getOperand(2).getImm();
594 // ANDS does not use the same encoding scheme as the others xxxS
596 SrcReg = MI->getOperand(1).getReg();
599 CmpValue = ARM64_AM::decodeLogicalImmediate(
600 MI->getOperand(2).getImm(),
601 MI->getOpcode() == ARM64::ANDSWri ? 32 : 64);
608 static bool UpdateOperandRegClass(MachineInstr *Instr) {
609 MachineBasicBlock *MBB = Instr->getParent();
610 assert(MBB && "Can't get MachineBasicBlock here");
611 MachineFunction *MF = MBB->getParent();
612 assert(MF && "Can't get MachineFunction here");
613 const TargetMachine *TM = &MF->getTarget();
614 const TargetInstrInfo *TII = TM->getInstrInfo();
615 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
616 MachineRegisterInfo *MRI = &MF->getRegInfo();
618 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
620 MachineOperand &MO = Instr->getOperand(OpIdx);
621 const TargetRegisterClass *OpRegCstraints =
622 Instr->getRegClassConstraint(OpIdx, TII, TRI);
624 // If there's no constraint, there's nothing to do.
627 // If the operand is a frame index, there's nothing to do here.
628 // A frame index operand will resolve correctly during PEI.
633 "Operand has register constraints without being a register!");
635 unsigned Reg = MO.getReg();
636 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
637 if (!OpRegCstraints->contains(Reg))
639 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
640 !MRI->constrainRegClass(Reg, OpRegCstraints))
647 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
648 /// comparison into one that sets the zero bit in the flags register.
649 bool ARM64InstrInfo::optimizeCompareInstr(
650 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
651 int CmpValue, const MachineRegisterInfo *MRI) const {
653 // Replace SUBSWrr with SUBWrr if NZCV is not used.
654 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(ARM64::NZCV, true);
655 if (Cmp_NZCV != -1) {
657 switch (CmpInstr->getOpcode()) {
660 case ARM64::ADDSWrr: NewOpc = ARM64::ADDWrr; break;
661 case ARM64::ADDSWri: NewOpc = ARM64::ADDWri; break;
662 case ARM64::ADDSWrs: NewOpc = ARM64::ADDWrs; break;
663 case ARM64::ADDSWrx: NewOpc = ARM64::ADDWrx; break;
664 case ARM64::ADDSXrr: NewOpc = ARM64::ADDXrr; break;
665 case ARM64::ADDSXri: NewOpc = ARM64::ADDXri; break;
666 case ARM64::ADDSXrs: NewOpc = ARM64::ADDXrs; break;
667 case ARM64::ADDSXrx: NewOpc = ARM64::ADDXrx; break;
668 case ARM64::SUBSWrr: NewOpc = ARM64::SUBWrr; break;
669 case ARM64::SUBSWri: NewOpc = ARM64::SUBWri; break;
670 case ARM64::SUBSWrs: NewOpc = ARM64::SUBWrs; break;
671 case ARM64::SUBSWrx: NewOpc = ARM64::SUBWrx; break;
672 case ARM64::SUBSXrr: NewOpc = ARM64::SUBXrr; break;
673 case ARM64::SUBSXri: NewOpc = ARM64::SUBXri; break;
674 case ARM64::SUBSXrs: NewOpc = ARM64::SUBXrs; break;
675 case ARM64::SUBSXrx: NewOpc = ARM64::SUBXrx; break;
678 const MCInstrDesc &MCID = get(NewOpc);
679 CmpInstr->setDesc(MCID);
680 CmpInstr->RemoveOperand(Cmp_NZCV);
681 bool succeeded = UpdateOperandRegClass(CmpInstr);
683 assert(succeeded && "Some operands reg class are incompatible!");
687 // Continue only if we have a "ri" where immediate is zero.
688 if (CmpValue != 0 || SrcReg2 != 0)
691 // CmpInstr is a Compare instruction if destination register is not used.
692 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
695 // Get the unique definition of SrcReg.
696 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
700 // We iterate backward, starting from the instruction before CmpInstr and
701 // stop when reaching the definition of the source register or done with the
702 // basic block, to check whether NZCV is used or modified in between.
703 MachineBasicBlock::iterator I = CmpInstr, E = MI,
704 B = CmpInstr->getParent()->begin();
706 // Early exit if CmpInstr is at the beginning of the BB.
710 // Check whether the definition of SrcReg is in the same basic block as
711 // Compare. If not, we can't optimize away the Compare.
712 if (MI->getParent() != CmpInstr->getParent())
715 // Check that NZCV isn't set between the comparison instruction and the one we
717 const TargetRegisterInfo *TRI = &getRegisterInfo();
718 for (--I; I != E; --I) {
719 const MachineInstr &Instr = *I;
721 if (Instr.modifiesRegister(ARM64::NZCV, TRI) ||
722 Instr.readsRegister(ARM64::NZCV, TRI))
723 // This instruction modifies or uses NZCV after the one we want to
724 // change. We can't do this transformation.
727 // The 'and' is below the comparison instruction.
731 unsigned NewOpc = MI->getOpcode();
732 switch (MI->getOpcode()) {
744 case ARM64::ADDWrr: NewOpc = ARM64::ADDSWrr; break;
745 case ARM64::ADDWri: NewOpc = ARM64::ADDSWri; break;
746 case ARM64::ADDXrr: NewOpc = ARM64::ADDSXrr; break;
747 case ARM64::ADDXri: NewOpc = ARM64::ADDSXri; break;
748 case ARM64::ADCWr: NewOpc = ARM64::ADCSWr; break;
749 case ARM64::ADCXr: NewOpc = ARM64::ADCSXr; break;
750 case ARM64::SUBWrr: NewOpc = ARM64::SUBSWrr; break;
751 case ARM64::SUBWri: NewOpc = ARM64::SUBSWri; break;
752 case ARM64::SUBXrr: NewOpc = ARM64::SUBSXrr; break;
753 case ARM64::SUBXri: NewOpc = ARM64::SUBSXri; break;
754 case ARM64::SBCWr: NewOpc = ARM64::SBCSWr; break;
755 case ARM64::SBCXr: NewOpc = ARM64::SBCSXr; break;
756 case ARM64::ANDWri: NewOpc = ARM64::ANDSWri; break;
757 case ARM64::ANDXri: NewOpc = ARM64::ANDSXri; break;
760 // Scan forward for the use of NZCV.
761 // When checking against MI: if it's a conditional code requires
762 // checking of V bit, then this is not safe to do.
763 // It is safe to remove CmpInstr if NZCV is redefined or killed.
764 // If we are done with the basic block, we need to check whether NZCV is
767 for (MachineBasicBlock::iterator I = CmpInstr,
768 E = CmpInstr->getParent()->end();
769 !IsSafe && ++I != E;) {
770 const MachineInstr &Instr = *I;
771 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
773 const MachineOperand &MO = Instr.getOperand(IO);
774 if (MO.isRegMask() && MO.clobbersPhysReg(ARM64::NZCV)) {
778 if (!MO.isReg() || MO.getReg() != ARM64::NZCV)
785 // Decode the condition code.
786 unsigned Opc = Instr.getOpcode();
787 ARM64CC::CondCode CC;
792 CC = (ARM64CC::CondCode)Instr.getOperand(IO - 2).getImm();
802 case ARM64::FCSELSrrr:
803 case ARM64::FCSELDrrr:
804 CC = (ARM64CC::CondCode)Instr.getOperand(IO - 1).getImm();
808 // It is not safe to remove Compare instruction if Overflow(V) is used.
811 // NZCV can be used multiple times, we should continue.
824 // If NZCV is not killed nor re-defined, we should check whether it is
825 // live-out. If it is live-out, do not optimize.
827 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
828 for (auto *MBB : ParentBlock->successors())
829 if (MBB->isLiveIn(ARM64::NZCV))
833 // Update the instruction to set NZCV.
834 MI->setDesc(get(NewOpc));
835 CmpInstr->eraseFromParent();
836 bool succeeded = UpdateOperandRegClass(MI);
838 assert(succeeded && "Some operands reg class are incompatible!");
839 MI->addRegisterDefined(ARM64::NZCV, TRI);
843 /// Return true if this is this instruction has a non-zero immediate
844 bool ARM64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
845 switch (MI->getOpcode()) {
860 case ARM64::CRC32Brr:
861 case ARM64::CRC32CBrr:
862 case ARM64::CRC32CHrr:
863 case ARM64::CRC32CWrr:
864 case ARM64::CRC32CXrr:
865 case ARM64::CRC32Hrr:
866 case ARM64::CRC32Wrr:
867 case ARM64::CRC32Xrr:
880 if (MI->getOperand(3).isImm()) {
881 unsigned val = MI->getOperand(3).getImm();
889 /// Return true if this is this instruction has a non-zero immediate
890 bool ARM64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
891 switch (MI->getOpcode()) {
896 case ARM64::ADDSXrx64:
899 case ARM64::ADDXrx64:
902 case ARM64::SUBSXrx64:
905 case ARM64::SUBXrx64:
906 if (MI->getOperand(3).isImm()) {
907 unsigned val = MI->getOperand(3).getImm();
916 // Return true if this instruction simply sets its single destination register
917 // to zero. This is equivalent to a register rename of the zero-register.
918 bool ARM64InstrInfo::isGPRZero(const MachineInstr *MI) const {
919 switch (MI->getOpcode()) {
923 case ARM64::MOVZXi: // movz Rd, #0 (LSL #0)
924 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
925 assert(MI->getDesc().getNumOperands() == 3 &&
926 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
930 case ARM64::ANDWri: // and Rd, Rzr, #imm
931 return MI->getOperand(1).getReg() == ARM64::WZR;
933 return MI->getOperand(1).getReg() == ARM64::XZR;
934 case TargetOpcode::COPY:
935 return MI->getOperand(1).getReg() == ARM64::WZR;
940 // Return true if this instruction simply renames a general register without
942 bool ARM64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
943 switch (MI->getOpcode()) {
946 case TargetOpcode::COPY: {
947 // GPR32 copies will by lowered to ORRXrs
948 unsigned DstReg = MI->getOperand(0).getReg();
949 return (ARM64::GPR32RegClass.contains(DstReg) ||
950 ARM64::GPR64RegClass.contains(DstReg));
952 case ARM64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
953 if (MI->getOperand(1).getReg() == ARM64::XZR) {
954 assert(MI->getDesc().getNumOperands() == 4 &&
955 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
958 case ARM64::ADDXri: // add Xd, Xn, #0 (LSL #0)
959 if (MI->getOperand(2).getImm() == 0) {
960 assert(MI->getDesc().getNumOperands() == 4 &&
961 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
968 // Return true if this instruction simply renames a general register without
970 bool ARM64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
971 switch (MI->getOpcode()) {
974 case TargetOpcode::COPY: {
975 // FPR64 copies will by lowered to ORR.16b
976 unsigned DstReg = MI->getOperand(0).getReg();
977 return (ARM64::FPR64RegClass.contains(DstReg) ||
978 ARM64::FPR128RegClass.contains(DstReg));
980 case ARM64::ORRv16i8:
981 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
982 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
983 "invalid ORRv16i8 operands");
990 unsigned ARM64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
991 int &FrameIndex) const {
992 switch (MI->getOpcode()) {
1002 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1003 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1004 FrameIndex = MI->getOperand(1).getIndex();
1005 return MI->getOperand(0).getReg();
1013 unsigned ARM64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1014 int &FrameIndex) const {
1015 switch (MI->getOpcode()) {
1025 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1026 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1027 FrameIndex = MI->getOperand(1).getIndex();
1028 return MI->getOperand(0).getReg();
1035 /// Return true if this is load/store scales or extends its register offset.
1036 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1037 /// MI should be a memory op that allows scaled addressing.
1038 bool ARM64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1039 switch (MI->getOpcode()) {
1042 case ARM64::LDRBBroW:
1043 case ARM64::LDRBroW:
1044 case ARM64::LDRDroW:
1045 case ARM64::LDRHHroW:
1046 case ARM64::LDRHroW:
1047 case ARM64::LDRQroW:
1048 case ARM64::LDRSBWroW:
1049 case ARM64::LDRSBXroW:
1050 case ARM64::LDRSHWroW:
1051 case ARM64::LDRSHXroW:
1052 case ARM64::LDRSWroW:
1053 case ARM64::LDRSroW:
1054 case ARM64::LDRWroW:
1055 case ARM64::LDRXroW:
1056 case ARM64::STRBBroW:
1057 case ARM64::STRBroW:
1058 case ARM64::STRDroW:
1059 case ARM64::STRHHroW:
1060 case ARM64::STRHroW:
1061 case ARM64::STRQroW:
1062 case ARM64::STRSroW:
1063 case ARM64::STRWroW:
1064 case ARM64::STRXroW:
1065 case ARM64::LDRBBroX:
1066 case ARM64::LDRBroX:
1067 case ARM64::LDRDroX:
1068 case ARM64::LDRHHroX:
1069 case ARM64::LDRHroX:
1070 case ARM64::LDRQroX:
1071 case ARM64::LDRSBWroX:
1072 case ARM64::LDRSBXroX:
1073 case ARM64::LDRSHWroX:
1074 case ARM64::LDRSHXroX:
1075 case ARM64::LDRSWroX:
1076 case ARM64::LDRSroX:
1077 case ARM64::LDRWroX:
1078 case ARM64::LDRXroX:
1079 case ARM64::STRBBroX:
1080 case ARM64::STRBroX:
1081 case ARM64::STRDroX:
1082 case ARM64::STRHHroX:
1083 case ARM64::STRHroX:
1084 case ARM64::STRQroX:
1085 case ARM64::STRSroX:
1086 case ARM64::STRWroX:
1087 case ARM64::STRXroX:
1089 unsigned Val = MI->getOperand(3).getImm();
1090 ARM64_AM::ShiftExtendType ExtType = ARM64_AM::getMemExtendType(Val);
1091 return (ExtType != ARM64_AM::UXTX) || ARM64_AM::getMemDoShift(Val);
1096 /// Check all MachineMemOperands for a hint to suppress pairing.
1097 bool ARM64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1098 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1099 "Too many target MO flags");
1100 for (auto *MM : MI->memoperands()) {
1101 if (MM->getFlags() &
1102 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1109 /// Set a flag on the first MachineMemOperand to suppress pairing.
1110 void ARM64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1111 if (MI->memoperands_empty())
1114 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1115 "Too many target MO flags");
1116 (*MI->memoperands_begin())
1117 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1120 bool ARM64InstrInfo::getLdStBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1122 const TargetRegisterInfo *TRI) const {
1123 switch (LdSt->getOpcode()) {
1136 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1138 BaseReg = LdSt->getOperand(1).getReg();
1139 MachineFunction &MF = *LdSt->getParent()->getParent();
1140 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1141 Offset = LdSt->getOperand(2).getImm() * Width;
1146 /// Detect opportunities for ldp/stp formation.
1148 /// Only called for LdSt for which getLdStBaseRegImmOfs returns true.
1149 bool ARM64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1150 MachineInstr *SecondLdSt,
1151 unsigned NumLoads) const {
1152 // Only cluster up to a single pair.
1155 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1157 // getLdStBaseRegImmOfs guarantees that oper 2 isImm.
1158 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1159 // Allow 6 bits of positive range.
1162 // The caller should already have ordered First/SecondLdSt by offset.
1163 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1164 return Ofs1 + 1 == Ofs2;
1167 bool ARM64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1168 MachineInstr *Second) const {
1169 // Cyclone can fuse CMN, CMP followed by Bcc.
1171 // FIXME: B0 can also fuse:
1172 // AND, BIC, ORN, ORR, or EOR (optional S) followed by Bcc or CBZ or CBNZ.
1173 if (Second->getOpcode() != ARM64::Bcc)
1175 switch (First->getOpcode()) {
1178 case ARM64::SUBSWri:
1179 case ARM64::ADDSWri:
1180 case ARM64::ANDSWri:
1181 case ARM64::SUBSXri:
1182 case ARM64::ADDSXri:
1183 case ARM64::ANDSXri:
1188 MachineInstr *ARM64InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
1191 const MDNode *MDPtr,
1192 DebugLoc DL) const {
1193 MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM64::DBG_VALUE))
1194 .addFrameIndex(FrameIx)
1197 .addMetadata(MDPtr);
1201 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1202 unsigned Reg, unsigned SubIdx,
1204 const TargetRegisterInfo *TRI) {
1206 return MIB.addReg(Reg, State);
1208 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1209 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1210 return MIB.addReg(Reg, State, SubIdx);
1213 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1215 // We really want the positive remainder mod 32 here, that happens to be
1216 // easily obtainable with a mask.
1217 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1220 void ARM64InstrInfo::copyPhysRegTuple(MachineBasicBlock &MBB,
1221 MachineBasicBlock::iterator I,
1222 DebugLoc DL, unsigned DestReg,
1223 unsigned SrcReg, bool KillSrc,
1225 llvm::ArrayRef<unsigned> Indices) const {
1226 assert(getSubTarget().hasNEON() &&
1227 "Unexpected register copy without NEON");
1228 const TargetRegisterInfo *TRI = &getRegisterInfo();
1229 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1230 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1231 unsigned NumRegs = Indices.size();
1233 int SubReg = 0, End = NumRegs, Incr = 1;
1234 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1235 SubReg = NumRegs - 1;
1240 for (; SubReg != End; SubReg += Incr) {
1241 const MachineInstrBuilder &MIB = BuildMI(MBB, I, DL, get(Opcode));
1242 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1243 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1244 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1248 void ARM64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1249 MachineBasicBlock::iterator I, DebugLoc DL,
1250 unsigned DestReg, unsigned SrcReg,
1251 bool KillSrc) const {
1252 if (ARM64::GPR32spRegClass.contains(DestReg) &&
1253 (ARM64::GPR32spRegClass.contains(SrcReg) || SrcReg == ARM64::WZR)) {
1254 const TargetRegisterInfo *TRI = &getRegisterInfo();
1256 if (DestReg == ARM64::WSP || SrcReg == ARM64::WSP) {
1257 // If either operand is WSP, expand to ADD #0.
1258 if (Subtarget.hasZeroCycleRegMove()) {
1259 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1260 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, ARM64::sub_32,
1261 &ARM64::GPR64spRegClass);
1262 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, ARM64::sub_32,
1263 &ARM64::GPR64spRegClass);
1264 // This instruction is reading and writing X registers. This may upset
1265 // the register scavenger and machine verifier, so we need to indicate
1266 // that we are reading an undefined value from SrcRegX, but a proper
1267 // value from SrcReg.
1268 BuildMI(MBB, I, DL, get(ARM64::ADDXri), DestRegX)
1269 .addReg(SrcRegX, RegState::Undef)
1271 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0))
1272 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1274 BuildMI(MBB, I, DL, get(ARM64::ADDWri), DestReg)
1275 .addReg(SrcReg, getKillRegState(KillSrc))
1277 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1279 } else if (SrcReg == ARM64::WZR && Subtarget.hasZeroCycleZeroing()) {
1280 BuildMI(MBB, I, DL, get(ARM64::MOVZWi), DestReg).addImm(0).addImm(
1281 ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1283 if (Subtarget.hasZeroCycleRegMove()) {
1284 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1285 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, ARM64::sub_32,
1286 &ARM64::GPR64spRegClass);
1287 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, ARM64::sub_32,
1288 &ARM64::GPR64spRegClass);
1289 // This instruction is reading and writing X registers. This may upset
1290 // the register scavenger and machine verifier, so we need to indicate
1291 // that we are reading an undefined value from SrcRegX, but a proper
1292 // value from SrcReg.
1293 BuildMI(MBB, I, DL, get(ARM64::ORRXrr), DestRegX)
1295 .addReg(SrcRegX, RegState::Undef)
1296 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1298 // Otherwise, expand to ORR WZR.
1299 BuildMI(MBB, I, DL, get(ARM64::ORRWrr), DestReg)
1301 .addReg(SrcReg, getKillRegState(KillSrc));
1307 if (ARM64::GPR64spRegClass.contains(DestReg) &&
1308 (ARM64::GPR64spRegClass.contains(SrcReg) || SrcReg == ARM64::XZR)) {
1309 if (DestReg == ARM64::SP || SrcReg == ARM64::SP) {
1310 // If either operand is SP, expand to ADD #0.
1311 BuildMI(MBB, I, DL, get(ARM64::ADDXri), DestReg)
1312 .addReg(SrcReg, getKillRegState(KillSrc))
1314 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1315 } else if (SrcReg == ARM64::XZR && Subtarget.hasZeroCycleZeroing()) {
1316 BuildMI(MBB, I, DL, get(ARM64::MOVZXi), DestReg).addImm(0).addImm(
1317 ARM64_AM::getShifterImm(ARM64_AM::LSL, 0));
1319 // Otherwise, expand to ORR XZR.
1320 BuildMI(MBB, I, DL, get(ARM64::ORRXrr), DestReg)
1322 .addReg(SrcReg, getKillRegState(KillSrc));
1327 // Copy a DDDD register quad by copying the individual sub-registers.
1328 if (ARM64::DDDDRegClass.contains(DestReg) &&
1329 ARM64::DDDDRegClass.contains(SrcReg)) {
1330 static const unsigned Indices[] = { ARM64::dsub0, ARM64::dsub1,
1331 ARM64::dsub2, ARM64::dsub3 };
1332 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv8i8,
1337 // Copy a DDD register triple by copying the individual sub-registers.
1338 if (ARM64::DDDRegClass.contains(DestReg) &&
1339 ARM64::DDDRegClass.contains(SrcReg)) {
1340 static const unsigned Indices[] = { ARM64::dsub0, ARM64::dsub1,
1342 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv8i8,
1347 // Copy a DD register pair by copying the individual sub-registers.
1348 if (ARM64::DDRegClass.contains(DestReg) &&
1349 ARM64::DDRegClass.contains(SrcReg)) {
1350 static const unsigned Indices[] = { ARM64::dsub0, ARM64::dsub1 };
1351 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv8i8,
1356 // Copy a QQQQ register quad by copying the individual sub-registers.
1357 if (ARM64::QQQQRegClass.contains(DestReg) &&
1358 ARM64::QQQQRegClass.contains(SrcReg)) {
1359 static const unsigned Indices[] = { ARM64::qsub0, ARM64::qsub1,
1360 ARM64::qsub2, ARM64::qsub3 };
1361 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv16i8,
1366 // Copy a QQQ register triple by copying the individual sub-registers.
1367 if (ARM64::QQQRegClass.contains(DestReg) &&
1368 ARM64::QQQRegClass.contains(SrcReg)) {
1369 static const unsigned Indices[] = { ARM64::qsub0, ARM64::qsub1,
1371 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv16i8,
1376 // Copy a QQ register pair by copying the individual sub-registers.
1377 if (ARM64::QQRegClass.contains(DestReg) &&
1378 ARM64::QQRegClass.contains(SrcReg)) {
1379 static const unsigned Indices[] = { ARM64::qsub0, ARM64::qsub1 };
1380 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, ARM64::ORRv16i8,
1385 if (ARM64::FPR128RegClass.contains(DestReg) &&
1386 ARM64::FPR128RegClass.contains(SrcReg)) {
1387 if(getSubTarget().hasNEON()) {
1388 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1389 SrcReg, getKillRegState(KillSrc));
1391 BuildMI(MBB, I, DL, get(ARM64::STRQpre))
1392 .addReg(ARM64::SP, RegState::Define)
1393 .addReg(SrcReg, getKillRegState(KillSrc))
1396 BuildMI(MBB, I, DL, get(ARM64::LDRQpre))
1397 .addReg(ARM64::SP, RegState::Define)
1398 .addReg(DestReg, RegState::Define)
1405 if (ARM64::FPR64RegClass.contains(DestReg) &&
1406 ARM64::FPR64RegClass.contains(SrcReg)) {
1407 if(getSubTarget().hasNEON()) {
1409 RI.getMatchingSuperReg(DestReg, ARM64::dsub, &ARM64::FPR128RegClass);
1411 RI.getMatchingSuperReg(SrcReg, ARM64::dsub, &ARM64::FPR128RegClass);
1412 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1413 SrcReg, getKillRegState(KillSrc));
1415 BuildMI(MBB, I, DL, get(ARM64::FMOVDr), DestReg)
1416 .addReg(SrcReg, getKillRegState(KillSrc));
1421 if (ARM64::FPR32RegClass.contains(DestReg) &&
1422 ARM64::FPR32RegClass.contains(SrcReg)) {
1423 if(getSubTarget().hasNEON()) {
1425 RI.getMatchingSuperReg(DestReg, ARM64::ssub, &ARM64::FPR128RegClass);
1427 RI.getMatchingSuperReg(SrcReg, ARM64::ssub, &ARM64::FPR128RegClass);
1428 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1429 SrcReg, getKillRegState(KillSrc));
1431 BuildMI(MBB, I, DL, get(ARM64::FMOVSr), DestReg)
1432 .addReg(SrcReg, getKillRegState(KillSrc));
1437 if (ARM64::FPR16RegClass.contains(DestReg) &&
1438 ARM64::FPR16RegClass.contains(SrcReg)) {
1439 if(getSubTarget().hasNEON()) {
1441 RI.getMatchingSuperReg(DestReg, ARM64::hsub, &ARM64::FPR128RegClass);
1443 RI.getMatchingSuperReg(SrcReg, ARM64::hsub, &ARM64::FPR128RegClass);
1444 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1445 SrcReg, getKillRegState(KillSrc));
1448 RI.getMatchingSuperReg(DestReg, ARM64::hsub, &ARM64::FPR32RegClass);
1450 RI.getMatchingSuperReg(SrcReg, ARM64::hsub, &ARM64::FPR32RegClass);
1451 BuildMI(MBB, I, DL, get(ARM64::FMOVSr), DestReg)
1452 .addReg(SrcReg, getKillRegState(KillSrc));
1457 if (ARM64::FPR8RegClass.contains(DestReg) &&
1458 ARM64::FPR8RegClass.contains(SrcReg)) {
1459 if(getSubTarget().hasNEON()) {
1461 RI.getMatchingSuperReg(DestReg, ARM64::bsub, &ARM64::FPR128RegClass);
1463 RI.getMatchingSuperReg(SrcReg, ARM64::bsub, &ARM64::FPR128RegClass);
1464 BuildMI(MBB, I, DL, get(ARM64::ORRv16i8), DestReg).addReg(SrcReg).addReg(
1465 SrcReg, getKillRegState(KillSrc));
1468 RI.getMatchingSuperReg(DestReg, ARM64::bsub, &ARM64::FPR32RegClass);
1470 RI.getMatchingSuperReg(SrcReg, ARM64::bsub, &ARM64::FPR32RegClass);
1471 BuildMI(MBB, I, DL, get(ARM64::FMOVSr), DestReg)
1472 .addReg(SrcReg, getKillRegState(KillSrc));
1477 // Copies between GPR64 and FPR64.
1478 if (ARM64::FPR64RegClass.contains(DestReg) &&
1479 ARM64::GPR64RegClass.contains(SrcReg)) {
1480 BuildMI(MBB, I, DL, get(ARM64::FMOVXDr), DestReg)
1481 .addReg(SrcReg, getKillRegState(KillSrc));
1484 if (ARM64::GPR64RegClass.contains(DestReg) &&
1485 ARM64::FPR64RegClass.contains(SrcReg)) {
1486 BuildMI(MBB, I, DL, get(ARM64::FMOVDXr), DestReg)
1487 .addReg(SrcReg, getKillRegState(KillSrc));
1490 // Copies between GPR32 and FPR32.
1491 if (ARM64::FPR32RegClass.contains(DestReg) &&
1492 ARM64::GPR32RegClass.contains(SrcReg)) {
1493 BuildMI(MBB, I, DL, get(ARM64::FMOVWSr), DestReg)
1494 .addReg(SrcReg, getKillRegState(KillSrc));
1497 if (ARM64::GPR32RegClass.contains(DestReg) &&
1498 ARM64::FPR32RegClass.contains(SrcReg)) {
1499 BuildMI(MBB, I, DL, get(ARM64::FMOVSWr), DestReg)
1500 .addReg(SrcReg, getKillRegState(KillSrc));
1504 assert(0 && "unimplemented reg-to-reg copy");
1507 void ARM64InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
1508 MachineBasicBlock::iterator MBBI,
1509 unsigned SrcReg, bool isKill, int FI,
1510 const TargetRegisterClass *RC,
1511 const TargetRegisterInfo *TRI) const {
1513 if (MBBI != MBB.end())
1514 DL = MBBI->getDebugLoc();
1515 MachineFunction &MF = *MBB.getParent();
1516 MachineFrameInfo &MFI = *MF.getFrameInfo();
1517 unsigned Align = MFI.getObjectAlignment(FI);
1519 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1520 MachineMemOperand *MMO = MF.getMachineMemOperand(
1521 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1524 switch (RC->getSize()) {
1526 if (ARM64::FPR8RegClass.hasSubClassEq(RC))
1527 Opc = ARM64::STRBui;
1530 if (ARM64::FPR16RegClass.hasSubClassEq(RC))
1531 Opc = ARM64::STRHui;
1534 if (ARM64::GPR32allRegClass.hasSubClassEq(RC)) {
1535 Opc = ARM64::STRWui;
1536 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1537 MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR32RegClass);
1539 assert(SrcReg != ARM64::WSP);
1540 } else if (ARM64::FPR32RegClass.hasSubClassEq(RC))
1541 Opc = ARM64::STRSui;
1544 if (ARM64::GPR64allRegClass.hasSubClassEq(RC)) {
1545 Opc = ARM64::STRXui;
1546 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1547 MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR64RegClass);
1549 assert(SrcReg != ARM64::SP);
1550 } else if (ARM64::FPR64RegClass.hasSubClassEq(RC))
1551 Opc = ARM64::STRDui;
1554 if (ARM64::FPR128RegClass.hasSubClassEq(RC))
1555 Opc = ARM64::STRQui;
1556 else if (ARM64::DDRegClass.hasSubClassEq(RC)) {
1557 assert(getSubTarget().hasNEON() &&
1558 "Unexpected register store without NEON");
1559 Opc = ARM64::ST1Twov1d, Offset = false;
1563 if (ARM64::DDDRegClass.hasSubClassEq(RC)) {
1564 assert(getSubTarget().hasNEON() &&
1565 "Unexpected register store without NEON");
1566 Opc = ARM64::ST1Threev1d, Offset = false;
1570 if (ARM64::DDDDRegClass.hasSubClassEq(RC)) {
1571 assert(getSubTarget().hasNEON() &&
1572 "Unexpected register store without NEON");
1573 Opc = ARM64::ST1Fourv1d, Offset = false;
1574 } else if (ARM64::QQRegClass.hasSubClassEq(RC)) {
1575 assert(getSubTarget().hasNEON() &&
1576 "Unexpected register store without NEON");
1577 Opc = ARM64::ST1Twov2d, Offset = false;
1581 if (ARM64::QQQRegClass.hasSubClassEq(RC)) {
1582 assert(getSubTarget().hasNEON() &&
1583 "Unexpected register store without NEON");
1584 Opc = ARM64::ST1Threev2d, Offset = false;
1588 if (ARM64::QQQQRegClass.hasSubClassEq(RC)) {
1589 assert(getSubTarget().hasNEON() &&
1590 "Unexpected register store without NEON");
1591 Opc = ARM64::ST1Fourv2d, Offset = false;
1595 assert(Opc && "Unknown register class");
1597 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1598 .addReg(SrcReg, getKillRegState(isKill))
1603 MI.addMemOperand(MMO);
1606 void ARM64InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
1607 MachineBasicBlock::iterator MBBI,
1608 unsigned DestReg, int FI,
1609 const TargetRegisterClass *RC,
1610 const TargetRegisterInfo *TRI) const {
1612 if (MBBI != MBB.end())
1613 DL = MBBI->getDebugLoc();
1614 MachineFunction &MF = *MBB.getParent();
1615 MachineFrameInfo &MFI = *MF.getFrameInfo();
1616 unsigned Align = MFI.getObjectAlignment(FI);
1617 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1618 MachineMemOperand *MMO = MF.getMachineMemOperand(
1619 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1623 switch (RC->getSize()) {
1625 if (ARM64::FPR8RegClass.hasSubClassEq(RC))
1626 Opc = ARM64::LDRBui;
1629 if (ARM64::FPR16RegClass.hasSubClassEq(RC))
1630 Opc = ARM64::LDRHui;
1633 if (ARM64::GPR32allRegClass.hasSubClassEq(RC)) {
1634 Opc = ARM64::LDRWui;
1635 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1636 MF.getRegInfo().constrainRegClass(DestReg, &ARM64::GPR32RegClass);
1638 assert(DestReg != ARM64::WSP);
1639 } else if (ARM64::FPR32RegClass.hasSubClassEq(RC))
1640 Opc = ARM64::LDRSui;
1643 if (ARM64::GPR64allRegClass.hasSubClassEq(RC)) {
1644 Opc = ARM64::LDRXui;
1645 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1646 MF.getRegInfo().constrainRegClass(DestReg, &ARM64::GPR64RegClass);
1648 assert(DestReg != ARM64::SP);
1649 } else if (ARM64::FPR64RegClass.hasSubClassEq(RC))
1650 Opc = ARM64::LDRDui;
1653 if (ARM64::FPR128RegClass.hasSubClassEq(RC))
1654 Opc = ARM64::LDRQui;
1655 else if (ARM64::DDRegClass.hasSubClassEq(RC)) {
1656 assert(getSubTarget().hasNEON() &&
1657 "Unexpected register load without NEON");
1658 Opc = ARM64::LD1Twov1d, Offset = false;
1662 if (ARM64::DDDRegClass.hasSubClassEq(RC)) {
1663 assert(getSubTarget().hasNEON() &&
1664 "Unexpected register load without NEON");
1665 Opc = ARM64::LD1Threev1d, Offset = false;
1669 if (ARM64::DDDDRegClass.hasSubClassEq(RC)) {
1670 assert(getSubTarget().hasNEON() &&
1671 "Unexpected register load without NEON");
1672 Opc = ARM64::LD1Fourv1d, Offset = false;
1673 } else if (ARM64::QQRegClass.hasSubClassEq(RC)) {
1674 assert(getSubTarget().hasNEON() &&
1675 "Unexpected register load without NEON");
1676 Opc = ARM64::LD1Twov2d, Offset = false;
1680 if (ARM64::QQQRegClass.hasSubClassEq(RC)) {
1681 assert(getSubTarget().hasNEON() &&
1682 "Unexpected register load without NEON");
1683 Opc = ARM64::LD1Threev2d, Offset = false;
1687 if (ARM64::QQQQRegClass.hasSubClassEq(RC)) {
1688 assert(getSubTarget().hasNEON() &&
1689 "Unexpected register load without NEON");
1690 Opc = ARM64::LD1Fourv2d, Offset = false;
1694 assert(Opc && "Unknown register class");
1696 const MachineInstrBuilder &MI = BuildMI(MBB, MBBI, DL, get(Opc))
1697 .addReg(DestReg, getDefRegState(true))
1701 MI.addMemOperand(MMO);
1704 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
1705 MachineBasicBlock::iterator MBBI, DebugLoc DL,
1706 unsigned DestReg, unsigned SrcReg, int Offset,
1707 const ARM64InstrInfo *TII, MachineInstr::MIFlag Flag,
1709 if (DestReg == SrcReg && Offset == 0)
1712 bool isSub = Offset < 0;
1716 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
1717 // scratch register. If DestReg is a virtual register, use it as the
1718 // scratch register; otherwise, create a new virtual register (to be
1719 // replaced by the scavenger at the end of PEI). That case can be optimized
1720 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
1721 // register can be loaded with offset%8 and the add/sub can use an extending
1722 // instruction with LSL#3.
1723 // Currently the function handles any offsets but generates a poor sequence
1725 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
1729 Opc = isSub ? ARM64::SUBSXri : ARM64::ADDSXri;
1731 Opc = isSub ? ARM64::SUBXri : ARM64::ADDXri;
1732 const unsigned MaxEncoding = 0xfff;
1733 const unsigned ShiftSize = 12;
1734 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
1735 while (((unsigned)Offset) >= (1 << ShiftSize)) {
1737 if (((unsigned)Offset) > MaxEncodableValue) {
1738 ThisVal = MaxEncodableValue;
1740 ThisVal = Offset & MaxEncodableValue;
1742 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
1743 "Encoding cannot handle value that big");
1744 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1746 .addImm(ThisVal >> ShiftSize)
1747 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, ShiftSize))
1755 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
1758 .addImm(ARM64_AM::getShifterImm(ARM64_AM::LSL, 0))
1763 ARM64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
1764 const SmallVectorImpl<unsigned> &Ops,
1765 int FrameIndex) const {
1766 // This is a bit of a hack. Consider this instruction:
1768 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
1770 // We explicitly chose GPR64all for the virtual register so such a copy might
1771 // be eliminated by RegisterCoalescer. However, that may not be possible, and
1772 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
1773 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
1775 // To prevent that, we are going to constrain the %vreg0 register class here.
1777 // <rdar://problem/11522048>
1780 unsigned DstReg = MI->getOperand(0).getReg();
1781 unsigned SrcReg = MI->getOperand(1).getReg();
1782 if (SrcReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(DstReg)) {
1783 MF.getRegInfo().constrainRegClass(DstReg, &ARM64::GPR64RegClass);
1786 if (DstReg == ARM64::SP && TargetRegisterInfo::isVirtualRegister(SrcReg)) {
1787 MF.getRegInfo().constrainRegClass(SrcReg, &ARM64::GPR64RegClass);
1796 int llvm::isARM64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
1797 bool *OutUseUnscaledOp,
1798 unsigned *OutUnscaledOp,
1799 int *EmittableOffset) {
1801 bool IsSigned = false;
1802 // The ImmIdx should be changed case by case if it is not 2.
1803 unsigned ImmIdx = 2;
1804 unsigned UnscaledOp = 0;
1805 // Set output values in case of early exit.
1806 if (EmittableOffset)
1807 *EmittableOffset = 0;
1808 if (OutUseUnscaledOp)
1809 *OutUseUnscaledOp = false;
1812 switch (MI.getOpcode()) {
1814 assert(0 && "unhandled opcode in rewriteARM64FrameIndex");
1815 // Vector spills/fills can't take an immediate offset.
1816 case ARM64::LD1Twov2d:
1817 case ARM64::LD1Threev2d:
1818 case ARM64::LD1Fourv2d:
1819 case ARM64::LD1Twov1d:
1820 case ARM64::LD1Threev1d:
1821 case ARM64::LD1Fourv1d:
1822 case ARM64::ST1Twov2d:
1823 case ARM64::ST1Threev2d:
1824 case ARM64::ST1Fourv2d:
1825 case ARM64::ST1Twov1d:
1826 case ARM64::ST1Threev1d:
1827 case ARM64::ST1Fourv1d:
1828 return ARM64FrameOffsetCannotUpdate;
1831 UnscaledOp = ARM64::PRFUMi;
1835 UnscaledOp = ARM64::LDURXi;
1839 UnscaledOp = ARM64::LDURWi;
1843 UnscaledOp = ARM64::LDURBi;
1847 UnscaledOp = ARM64::LDURHi;
1851 UnscaledOp = ARM64::LDURSi;
1855 UnscaledOp = ARM64::LDURDi;
1859 UnscaledOp = ARM64::LDURQi;
1861 case ARM64::LDRBBui:
1863 UnscaledOp = ARM64::LDURBBi;
1865 case ARM64::LDRHHui:
1867 UnscaledOp = ARM64::LDURHHi;
1869 case ARM64::LDRSBXui:
1871 UnscaledOp = ARM64::LDURSBXi;
1873 case ARM64::LDRSBWui:
1875 UnscaledOp = ARM64::LDURSBWi;
1877 case ARM64::LDRSHXui:
1879 UnscaledOp = ARM64::LDURSHXi;
1881 case ARM64::LDRSHWui:
1883 UnscaledOp = ARM64::LDURSHWi;
1885 case ARM64::LDRSWui:
1887 UnscaledOp = ARM64::LDURSWi;
1892 UnscaledOp = ARM64::STURXi;
1896 UnscaledOp = ARM64::STURWi;
1900 UnscaledOp = ARM64::STURBi;
1904 UnscaledOp = ARM64::STURHi;
1908 UnscaledOp = ARM64::STURSi;
1912 UnscaledOp = ARM64::STURDi;
1916 UnscaledOp = ARM64::STURQi;
1918 case ARM64::STRBBui:
1920 UnscaledOp = ARM64::STURBBi;
1922 case ARM64::STRHHui:
1924 UnscaledOp = ARM64::STURHHi;
1954 case ARM64::LDURHHi:
1955 case ARM64::LDURBBi:
1956 case ARM64::LDURSBXi:
1957 case ARM64::LDURSBWi:
1958 case ARM64::LDURSHXi:
1959 case ARM64::LDURSHWi:
1960 case ARM64::LDURSWi:
1968 case ARM64::STURBBi:
1969 case ARM64::STURHHi:
1974 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
1976 bool useUnscaledOp = false;
1977 // If the offset doesn't match the scale, we rewrite the instruction to
1978 // use the unscaled instruction instead. Likewise, if we have a negative
1979 // offset (and have an unscaled op to use).
1980 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
1981 useUnscaledOp = true;
1983 // Use an unscaled addressing mode if the instruction has a negative offset
1984 // (or if the instruction is already using an unscaled addressing mode).
1987 // ldp/stp instructions.
1990 } else if (UnscaledOp == 0 || useUnscaledOp) {
2000 // Attempt to fold address computation.
2001 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2002 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2003 if (Offset >= MinOff && Offset <= MaxOff) {
2004 if (EmittableOffset)
2005 *EmittableOffset = Offset;
2008 int NewOff = Offset < 0 ? MinOff : MaxOff;
2009 if (EmittableOffset)
2010 *EmittableOffset = NewOff;
2011 Offset = (Offset - NewOff) * Scale;
2013 if (OutUseUnscaledOp)
2014 *OutUseUnscaledOp = useUnscaledOp;
2016 *OutUnscaledOp = UnscaledOp;
2017 return ARM64FrameOffsetCanUpdate |
2018 (Offset == 0 ? ARM64FrameOffsetIsLegal : 0);
2021 bool llvm::rewriteARM64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2022 unsigned FrameReg, int &Offset,
2023 const ARM64InstrInfo *TII) {
2024 unsigned Opcode = MI.getOpcode();
2025 unsigned ImmIdx = FrameRegIdx + 1;
2027 if (Opcode == ARM64::ADDSXri || Opcode == ARM64::ADDXri) {
2028 Offset += MI.getOperand(ImmIdx).getImm();
2029 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2030 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2031 MachineInstr::NoFlags, (Opcode == ARM64::ADDSXri));
2032 MI.eraseFromParent();
2038 unsigned UnscaledOp;
2040 int Status = isARM64FrameOffsetLegal(MI, Offset, &UseUnscaledOp, &UnscaledOp,
2042 if (Status & ARM64FrameOffsetCanUpdate) {
2043 if (Status & ARM64FrameOffsetIsLegal)
2044 // Replace the FrameIndex with FrameReg.
2045 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2047 MI.setDesc(TII->get(UnscaledOp));
2049 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2056 void ARM64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2057 NopInst.setOpcode(ARM64::HINT);
2058 NopInst.addOperand(MCOperand::CreateImm(0));