1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/Support/ErrorHandling.h"
24 #include "llvm/Support/TargetRegistry.h"
28 #define GET_INSTRINFO_CTOR_DTOR
29 #include "AArch64GenInstrInfo.inc"
31 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
32 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
33 RI(STI.getTargetTriple()), Subtarget(STI) {}
35 /// GetInstSize - Return the number of bytes of code the specified
36 /// instruction may be. This returns the maximum number of bytes.
37 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
38 const MachineBasicBlock &MBB = *MI->getParent();
39 const MachineFunction *MF = MBB.getParent();
40 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
42 if (MI->getOpcode() == AArch64::INLINEASM)
43 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
45 const MCInstrDesc &Desc = MI->getDesc();
46 switch (Desc.getOpcode()) {
48 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
50 case TargetOpcode::DBG_VALUE:
51 case TargetOpcode::EH_LABEL:
52 case TargetOpcode::IMPLICIT_DEF:
53 case TargetOpcode::KILL:
57 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
60 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
61 SmallVectorImpl<MachineOperand> &Cond) {
62 // Block ends with fall-through condbranch.
63 switch (LastInst->getOpcode()) {
65 llvm_unreachable("Unknown branch instruction?");
67 Target = LastInst->getOperand(1).getMBB();
68 Cond.push_back(LastInst->getOperand(0));
74 Target = LastInst->getOperand(1).getMBB();
75 Cond.push_back(MachineOperand::CreateImm(-1));
76 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
77 Cond.push_back(LastInst->getOperand(0));
83 Target = LastInst->getOperand(2).getMBB();
84 Cond.push_back(MachineOperand::CreateImm(-1));
85 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
86 Cond.push_back(LastInst->getOperand(0));
87 Cond.push_back(LastInst->getOperand(1));
92 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
93 MachineBasicBlock *&TBB,
94 MachineBasicBlock *&FBB,
95 SmallVectorImpl<MachineOperand> &Cond,
96 bool AllowModify) const {
97 // If the block has no terminators, it just falls into the block after it.
98 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
102 if (!isUnpredicatedTerminator(I))
105 // Get the last instruction in the block.
106 MachineInstr *LastInst = I;
108 // If there is only one terminator instruction, process it.
109 unsigned LastOpc = LastInst->getOpcode();
110 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
111 if (isUncondBranchOpcode(LastOpc)) {
112 TBB = LastInst->getOperand(0).getMBB();
115 if (isCondBranchOpcode(LastOpc)) {
116 // Block ends with fall-through condbranch.
117 parseCondBranch(LastInst, TBB, Cond);
120 return true; // Can't handle indirect branch.
123 // Get the instruction before it if it is a terminator.
124 MachineInstr *SecondLastInst = I;
125 unsigned SecondLastOpc = SecondLastInst->getOpcode();
127 // If AllowModify is true and the block ends with two or more unconditional
128 // branches, delete all but the first unconditional branch.
129 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
130 while (isUncondBranchOpcode(SecondLastOpc)) {
131 LastInst->eraseFromParent();
132 LastInst = SecondLastInst;
133 LastOpc = LastInst->getOpcode();
134 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
135 // Return now the only terminator is an unconditional branch.
136 TBB = LastInst->getOperand(0).getMBB();
140 SecondLastOpc = SecondLastInst->getOpcode();
145 // If there are three terminators, we don't know what sort of block this is.
146 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
149 // If the block ends with a B and a Bcc, handle it.
150 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
151 parseCondBranch(SecondLastInst, TBB, Cond);
152 FBB = LastInst->getOperand(0).getMBB();
156 // If the block ends with two unconditional branches, handle it. The second
157 // one is not executed, so remove it.
158 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
159 TBB = SecondLastInst->getOperand(0).getMBB();
162 I->eraseFromParent();
166 // ...likewise if it ends with an indirect branch followed by an unconditional
168 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
171 I->eraseFromParent();
175 // Otherwise, can't handle this.
179 bool AArch64InstrInfo::ReverseBranchCondition(
180 SmallVectorImpl<MachineOperand> &Cond) const {
181 if (Cond[0].getImm() != -1) {
183 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
184 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
186 // Folded compare-and-branch
187 switch (Cond[1].getImm()) {
189 llvm_unreachable("Unknown conditional branch!");
191 Cond[1].setImm(AArch64::CBNZW);
194 Cond[1].setImm(AArch64::CBZW);
197 Cond[1].setImm(AArch64::CBNZX);
200 Cond[1].setImm(AArch64::CBZX);
203 Cond[1].setImm(AArch64::TBNZW);
206 Cond[1].setImm(AArch64::TBZW);
209 Cond[1].setImm(AArch64::TBNZX);
212 Cond[1].setImm(AArch64::TBZX);
220 // XXX-update: Returns whether we can remove a conditional branch instruction.
221 // If it's one that is mannually added by us, then don't remove it (return
222 // false). All their successors are the same.
223 static bool shouldRemoveConditionalBranch(MachineInstr* I) {
224 auto* MBB = I->getParent();
225 assert(isCondBranchOpcode(I->getOpcode()));
226 bool SameSuccessor = true;
227 MachineBasicBlock* BB = nullptr;
228 for (auto* Succ : MBB->successors()) {
233 SameSuccessor = false;
236 return !SameSuccessor;
239 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
240 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
244 if (!isUncondBranchOpcode(I->getOpcode()) &&
245 !isCondBranchOpcode(I->getOpcode()))
248 // XXX-update: Don't remove fake conditional branches.
249 if (isCondBranchOpcode(I->getOpcode()) && !shouldRemoveConditionalBranch(I)) {
253 // Remove the branch.
254 I->eraseFromParent();
258 if (I == MBB.begin())
261 if (!isCondBranchOpcode(I->getOpcode()))
264 // XXX-update: Don't remove fake conditional branches.
265 if (!shouldRemoveConditionalBranch(I)) {
269 // Remove the branch.
270 I->eraseFromParent();
274 void AArch64InstrInfo::instantiateCondBranch(
275 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
276 ArrayRef<MachineOperand> Cond) const {
277 if (Cond[0].getImm() != -1) {
279 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
281 // Folded compare-and-branch
282 // Note that we use addOperand instead of addReg to keep the flags.
283 const MachineInstrBuilder MIB =
284 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
286 MIB.addImm(Cond[3].getImm());
291 unsigned AArch64InstrInfo::InsertBranch(
292 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
293 ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
294 // Shouldn't be a fall through.
295 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
298 if (Cond.empty()) // Unconditional branch?
299 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
301 instantiateCondBranch(MBB, DL, TBB, Cond);
305 // Two-way conditional branch.
306 instantiateCondBranch(MBB, DL, TBB, Cond);
307 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
311 // Find the original register that VReg is copied from.
312 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
313 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
314 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
315 if (!DefMI->isFullCopy())
317 VReg = DefMI->getOperand(1).getReg();
322 // Determine if VReg is defined by an instruction that can be folded into a
323 // csel instruction. If so, return the folded opcode, and the replacement
325 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
326 unsigned *NewVReg = nullptr) {
327 VReg = removeCopies(MRI, VReg);
328 if (!TargetRegisterInfo::isVirtualRegister(VReg))
331 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
332 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
334 unsigned SrcOpNum = 0;
335 switch (DefMI->getOpcode()) {
336 case AArch64::ADDSXri:
337 case AArch64::ADDSWri:
338 // if NZCV is used, do not fold.
339 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
341 // fall-through to ADDXri and ADDWri.
342 case AArch64::ADDXri:
343 case AArch64::ADDWri:
344 // add x, 1 -> csinc.
345 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
346 DefMI->getOperand(3).getImm() != 0)
349 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
352 case AArch64::ORNXrr:
353 case AArch64::ORNWrr: {
354 // not x -> csinv, represented as orn dst, xzr, src.
355 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
356 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
359 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
363 case AArch64::SUBSXrr:
364 case AArch64::SUBSWrr:
365 // if NZCV is used, do not fold.
366 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
368 // fall-through to SUBXrr and SUBWrr.
369 case AArch64::SUBXrr:
370 case AArch64::SUBWrr: {
371 // neg x -> csneg, represented as sub dst, xzr, src.
372 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
373 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
376 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
382 assert(Opc && SrcOpNum && "Missing parameters");
385 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
389 bool AArch64InstrInfo::canInsertSelect(
390 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
391 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
392 int &FalseCycles) const {
393 // Check register classes.
394 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
395 const TargetRegisterClass *RC =
396 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
400 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
401 unsigned ExtraCondLat = Cond.size() != 1;
403 // GPRs are handled by csel.
404 // FIXME: Fold in x+1, -x, and ~x when applicable.
405 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
406 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
407 // Single-cycle csel, csinc, csinv, and csneg.
408 CondCycles = 1 + ExtraCondLat;
409 TrueCycles = FalseCycles = 1;
410 if (canFoldIntoCSel(MRI, TrueReg))
412 else if (canFoldIntoCSel(MRI, FalseReg))
417 // Scalar floating point is handled by fcsel.
418 // FIXME: Form fabs, fmin, and fmax when applicable.
419 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
420 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
421 CondCycles = 5 + ExtraCondLat;
422 TrueCycles = FalseCycles = 2;
430 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
431 MachineBasicBlock::iterator I, DebugLoc DL,
433 ArrayRef<MachineOperand> Cond,
434 unsigned TrueReg, unsigned FalseReg) const {
435 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
437 // Parse the condition code, see parseCondBranch() above.
438 AArch64CC::CondCode CC;
439 switch (Cond.size()) {
441 llvm_unreachable("Unknown condition opcode in Cond");
443 CC = AArch64CC::CondCode(Cond[0].getImm());
445 case 3: { // cbz/cbnz
446 // We must insert a compare against 0.
448 switch (Cond[1].getImm()) {
450 llvm_unreachable("Unknown branch opcode in Cond");
468 unsigned SrcReg = Cond[2].getReg();
470 // cmp reg, #0 is actually subs xzr, reg, #0.
471 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
472 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
477 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
478 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
485 case 4: { // tbz/tbnz
486 // We must insert a tst instruction.
487 switch (Cond[1].getImm()) {
489 llvm_unreachable("Unknown branch opcode in Cond");
499 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
500 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
501 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
502 .addReg(Cond[2].getReg())
504 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
506 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
507 .addReg(Cond[2].getReg())
509 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
515 const TargetRegisterClass *RC = nullptr;
516 bool TryFold = false;
517 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
518 RC = &AArch64::GPR64RegClass;
519 Opc = AArch64::CSELXr;
521 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
522 RC = &AArch64::GPR32RegClass;
523 Opc = AArch64::CSELWr;
525 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
526 RC = &AArch64::FPR64RegClass;
527 Opc = AArch64::FCSELDrrr;
528 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
529 RC = &AArch64::FPR32RegClass;
530 Opc = AArch64::FCSELSrrr;
532 assert(RC && "Unsupported regclass");
534 // Try folding simple instructions into the csel.
536 unsigned NewVReg = 0;
537 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
539 // The folded opcodes csinc, csinc and csneg apply the operation to
540 // FalseReg, so we need to invert the condition.
541 CC = AArch64CC::getInvertedCondCode(CC);
544 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
546 // Fold the operation. Leave any dead instructions for DCE to clean up.
550 // The extends the live range of NewVReg.
551 MRI.clearKillFlags(NewVReg);
555 // Pull all virtual register into the appropriate class.
556 MRI.constrainRegClass(TrueReg, RC);
557 MRI.constrainRegClass(FalseReg, RC);
560 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
564 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
565 static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
566 uint64_t Imm = MI->getOperand(1).getImm();
567 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
569 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
572 // FIXME: this implementation should be micro-architecture dependent, so a
573 // micro-architecture target hook should be introduced here in future.
574 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
575 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
576 return MI->isAsCheapAsAMove();
578 switch (MI->getOpcode()) {
582 // add/sub on register without shift
583 case AArch64::ADDWri:
584 case AArch64::ADDXri:
585 case AArch64::SUBWri:
586 case AArch64::SUBXri:
587 return (MI->getOperand(3).getImm() == 0);
589 // logical ops on immediate
590 case AArch64::ANDWri:
591 case AArch64::ANDXri:
592 case AArch64::EORWri:
593 case AArch64::EORXri:
594 case AArch64::ORRWri:
595 case AArch64::ORRXri:
598 // logical ops on register without shift
599 case AArch64::ANDWrr:
600 case AArch64::ANDXrr:
601 case AArch64::BICWrr:
602 case AArch64::BICXrr:
603 case AArch64::EONWrr:
604 case AArch64::EONXrr:
605 case AArch64::EORWrr:
606 case AArch64::EORXrr:
607 case AArch64::ORNWrr:
608 case AArch64::ORNXrr:
609 case AArch64::ORRWrr:
610 case AArch64::ORRXrr:
612 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
613 // ORRXri, it is as cheap as MOV
614 case AArch64::MOVi32imm:
615 return canBeExpandedToORR(MI, 32);
616 case AArch64::MOVi64imm:
617 return canBeExpandedToORR(MI, 64);
620 llvm_unreachable("Unknown opcode to check as cheap as a move!");
623 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
624 unsigned &SrcReg, unsigned &DstReg,
625 unsigned &SubIdx) const {
626 switch (MI.getOpcode()) {
629 case AArch64::SBFMXri: // aka sxtw
630 case AArch64::UBFMXri: // aka uxtw
631 // Check for the 32 -> 64 bit extension case, these instructions can do
633 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
635 // This is a signed or unsigned 32 -> 64 bit extension.
636 SrcReg = MI.getOperand(1).getReg();
637 DstReg = MI.getOperand(0).getReg();
638 SubIdx = AArch64::sub_32;
644 AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
646 AliasAnalysis *AA) const {
647 const TargetRegisterInfo *TRI = &getRegisterInfo();
648 unsigned BaseRegA = 0, BaseRegB = 0;
649 int OffsetA = 0, OffsetB = 0;
650 int WidthA = 0, WidthB = 0;
652 assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
653 assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
655 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
656 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
659 // Retrieve the base register, offset from the base register and width. Width
660 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
661 // base registers are identical, and the offset of a lower memory access +
662 // the width doesn't overlap the offset of a higher memory access,
663 // then the memory accesses are different.
664 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
665 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
666 if (BaseRegA == BaseRegB) {
667 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
668 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
669 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
670 if (LowOffset + LowWidth <= HighOffset)
677 /// analyzeCompare - For a comparison instruction, return the source registers
678 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
679 /// Return true if the comparison instruction can be analyzed.
680 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
681 unsigned &SrcReg2, int &CmpMask,
682 int &CmpValue) const {
683 switch (MI->getOpcode()) {
686 case AArch64::SUBSWrr:
687 case AArch64::SUBSWrs:
688 case AArch64::SUBSWrx:
689 case AArch64::SUBSXrr:
690 case AArch64::SUBSXrs:
691 case AArch64::SUBSXrx:
692 case AArch64::ADDSWrr:
693 case AArch64::ADDSWrs:
694 case AArch64::ADDSWrx:
695 case AArch64::ADDSXrr:
696 case AArch64::ADDSXrs:
697 case AArch64::ADDSXrx:
698 // Replace SUBSWrr with SUBWrr if NZCV is not used.
699 SrcReg = MI->getOperand(1).getReg();
700 SrcReg2 = MI->getOperand(2).getReg();
704 case AArch64::SUBSWri:
705 case AArch64::ADDSWri:
706 case AArch64::SUBSXri:
707 case AArch64::ADDSXri:
708 SrcReg = MI->getOperand(1).getReg();
711 // FIXME: In order to convert CmpValue to 0 or 1
712 CmpValue = (MI->getOperand(2).getImm() != 0);
714 case AArch64::ANDSWri:
715 case AArch64::ANDSXri:
716 // ANDS does not use the same encoding scheme as the others xxxS
718 SrcReg = MI->getOperand(1).getReg();
721 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
722 // while the type of CmpValue is int. When converting uint64_t to int,
723 // the high 32 bits of uint64_t will be lost.
724 // In fact it causes a bug in spec2006-483.xalancbmk
725 // CmpValue is only used to compare with zero in OptimizeCompareInstr
726 CmpValue = (AArch64_AM::decodeLogicalImmediate(
727 MI->getOperand(2).getImm(),
728 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
735 static bool UpdateOperandRegClass(MachineInstr *Instr) {
736 MachineBasicBlock *MBB = Instr->getParent();
737 assert(MBB && "Can't get MachineBasicBlock here");
738 MachineFunction *MF = MBB->getParent();
739 assert(MF && "Can't get MachineFunction here");
740 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
741 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
742 MachineRegisterInfo *MRI = &MF->getRegInfo();
744 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
746 MachineOperand &MO = Instr->getOperand(OpIdx);
747 const TargetRegisterClass *OpRegCstraints =
748 Instr->getRegClassConstraint(OpIdx, TII, TRI);
750 // If there's no constraint, there's nothing to do.
753 // If the operand is a frame index, there's nothing to do here.
754 // A frame index operand will resolve correctly during PEI.
759 "Operand has register constraints without being a register!");
761 unsigned Reg = MO.getReg();
762 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
763 if (!OpRegCstraints->contains(Reg))
765 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
766 !MRI->constrainRegClass(Reg, OpRegCstraints))
773 /// \brief Return the opcode that does not set flags when possible - otherwise
774 /// return the original opcode. The caller is responsible to do the actual
775 /// substitution and legality checking.
776 static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
777 // Don't convert all compare instructions, because for some the zero register
778 // encoding becomes the sp register.
779 bool MIDefinesZeroReg = false;
780 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
781 MIDefinesZeroReg = true;
783 switch (MI->getOpcode()) {
785 return MI->getOpcode();
786 case AArch64::ADDSWrr:
787 return AArch64::ADDWrr;
788 case AArch64::ADDSWri:
789 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
790 case AArch64::ADDSWrs:
791 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
792 case AArch64::ADDSWrx:
793 return AArch64::ADDWrx;
794 case AArch64::ADDSXrr:
795 return AArch64::ADDXrr;
796 case AArch64::ADDSXri:
797 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
798 case AArch64::ADDSXrs:
799 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
800 case AArch64::ADDSXrx:
801 return AArch64::ADDXrx;
802 case AArch64::SUBSWrr:
803 return AArch64::SUBWrr;
804 case AArch64::SUBSWri:
805 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
806 case AArch64::SUBSWrs:
807 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
808 case AArch64::SUBSWrx:
809 return AArch64::SUBWrx;
810 case AArch64::SUBSXrr:
811 return AArch64::SUBXrr;
812 case AArch64::SUBSXri:
813 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
814 case AArch64::SUBSXrs:
815 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
816 case AArch64::SUBSXrx:
817 return AArch64::SUBXrx;
821 /// True when condition code could be modified on the instruction
822 /// trace starting at from and ending at to.
823 static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
824 const bool CheckOnlyCCWrites,
825 const TargetRegisterInfo *TRI) {
826 // We iterate backward starting \p To until we hit \p From
827 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
829 // Early exit if To is at the beginning of the BB.
833 // Check whether the definition of SrcReg is in the same basic block as
834 // Compare. If not, assume the condition code gets modified on some path.
835 if (To->getParent() != From->getParent())
838 // Check that NZCV isn't set on the trace.
839 for (--I; I != E; --I) {
840 const MachineInstr &Instr = *I;
842 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
843 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
844 // This instruction modifies or uses NZCV after the one we want to
848 // We currently don't allow the instruction trace to cross basic
854 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
855 /// comparison into one that sets the zero bit in the flags register.
856 bool AArch64InstrInfo::optimizeCompareInstr(
857 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
858 int CmpValue, const MachineRegisterInfo *MRI) const {
860 // Replace SUBSWrr with SUBWrr if NZCV is not used.
861 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
862 if (Cmp_NZCV != -1) {
863 if (CmpInstr->definesRegister(AArch64::WZR) ||
864 CmpInstr->definesRegister(AArch64::XZR)) {
865 CmpInstr->eraseFromParent();
868 unsigned Opc = CmpInstr->getOpcode();
869 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
872 const MCInstrDesc &MCID = get(NewOpc);
873 CmpInstr->setDesc(MCID);
874 CmpInstr->RemoveOperand(Cmp_NZCV);
875 bool succeeded = UpdateOperandRegClass(CmpInstr);
877 assert(succeeded && "Some operands reg class are incompatible!");
881 // Continue only if we have a "ri" where immediate is zero.
882 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
884 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
885 if (CmpValue != 0 || SrcReg2 != 0)
888 // CmpInstr is a Compare instruction if destination register is not used.
889 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
892 // Get the unique definition of SrcReg.
893 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
897 bool CheckOnlyCCWrites = false;
898 const TargetRegisterInfo *TRI = &getRegisterInfo();
899 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
902 unsigned NewOpc = MI->getOpcode();
903 switch (MI->getOpcode()) {
906 case AArch64::ADDSWrr:
907 case AArch64::ADDSWri:
908 case AArch64::ADDSXrr:
909 case AArch64::ADDSXri:
910 case AArch64::SUBSWrr:
911 case AArch64::SUBSWri:
912 case AArch64::SUBSXrr:
913 case AArch64::SUBSXri:
915 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
916 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
917 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
918 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
919 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
920 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
921 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
922 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
923 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
924 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
925 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
926 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
927 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
928 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
931 // Scan forward for the use of NZCV.
932 // When checking against MI: if it's a conditional code requires
933 // checking of V bit, then this is not safe to do.
934 // It is safe to remove CmpInstr if NZCV is redefined or killed.
935 // If we are done with the basic block, we need to check whether NZCV is
938 for (MachineBasicBlock::iterator I = CmpInstr,
939 E = CmpInstr->getParent()->end();
940 !IsSafe && ++I != E;) {
941 const MachineInstr &Instr = *I;
942 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
944 const MachineOperand &MO = Instr.getOperand(IO);
945 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
949 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
956 // Decode the condition code.
957 unsigned Opc = Instr.getOpcode();
958 AArch64CC::CondCode CC;
963 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
965 case AArch64::CSINVWr:
966 case AArch64::CSINVXr:
967 case AArch64::CSINCWr:
968 case AArch64::CSINCXr:
969 case AArch64::CSELWr:
970 case AArch64::CSELXr:
971 case AArch64::CSNEGWr:
972 case AArch64::CSNEGXr:
973 case AArch64::FCSELSrrr:
974 case AArch64::FCSELDrrr:
975 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
979 // It is not safe to remove Compare instruction if Overflow(V) is used.
982 // NZCV can be used multiple times, we should continue.
995 // If NZCV is not killed nor re-defined, we should check whether it is
996 // live-out. If it is live-out, do not optimize.
998 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
999 for (auto *MBB : ParentBlock->successors())
1000 if (MBB->isLiveIn(AArch64::NZCV))
1004 // Update the instruction to set NZCV.
1005 MI->setDesc(get(NewOpc));
1006 CmpInstr->eraseFromParent();
1007 bool succeeded = UpdateOperandRegClass(MI);
1009 assert(succeeded && "Some operands reg class are incompatible!");
1010 MI->addRegisterDefined(AArch64::NZCV, TRI);
1015 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
1016 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
1019 MachineBasicBlock &MBB = *MI->getParent();
1020 DebugLoc DL = MI->getDebugLoc();
1021 unsigned Reg = MI->getOperand(0).getReg();
1022 const GlobalValue *GV =
1023 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
1024 const TargetMachine &TM = MBB.getParent()->getTarget();
1025 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
1026 const unsigned char MO_NC = AArch64II::MO_NC;
1028 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1029 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1030 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1031 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1032 .addReg(Reg, RegState::Kill).addImm(0)
1033 .addMemOperand(*MI->memoperands_begin());
1034 } else if (TM.getCodeModel() == CodeModel::Large) {
1035 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1036 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1037 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1038 .addReg(Reg, RegState::Kill)
1039 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1040 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1041 .addReg(Reg, RegState::Kill)
1042 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1043 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1044 .addReg(Reg, RegState::Kill)
1045 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1046 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1047 .addReg(Reg, RegState::Kill).addImm(0)
1048 .addMemOperand(*MI->memoperands_begin());
1050 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1051 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1052 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1053 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1054 .addReg(Reg, RegState::Kill)
1055 .addGlobalAddress(GV, 0, LoFlags)
1056 .addMemOperand(*MI->memoperands_begin());
1064 /// Return true if this is this instruction has a non-zero immediate
1065 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1066 switch (MI->getOpcode()) {
1069 case AArch64::ADDSWrs:
1070 case AArch64::ADDSXrs:
1071 case AArch64::ADDWrs:
1072 case AArch64::ADDXrs:
1073 case AArch64::ANDSWrs:
1074 case AArch64::ANDSXrs:
1075 case AArch64::ANDWrs:
1076 case AArch64::ANDXrs:
1077 case AArch64::BICSWrs:
1078 case AArch64::BICSXrs:
1079 case AArch64::BICWrs:
1080 case AArch64::BICXrs:
1081 case AArch64::CRC32Brr:
1082 case AArch64::CRC32CBrr:
1083 case AArch64::CRC32CHrr:
1084 case AArch64::CRC32CWrr:
1085 case AArch64::CRC32CXrr:
1086 case AArch64::CRC32Hrr:
1087 case AArch64::CRC32Wrr:
1088 case AArch64::CRC32Xrr:
1089 case AArch64::EONWrs:
1090 case AArch64::EONXrs:
1091 case AArch64::EORWrs:
1092 case AArch64::EORXrs:
1093 case AArch64::ORNWrs:
1094 case AArch64::ORNXrs:
1095 case AArch64::ORRWrs:
1096 case AArch64::ORRXrs:
1097 case AArch64::SUBSWrs:
1098 case AArch64::SUBSXrs:
1099 case AArch64::SUBWrs:
1100 case AArch64::SUBXrs:
1101 if (MI->getOperand(3).isImm()) {
1102 unsigned val = MI->getOperand(3).getImm();
1110 /// Return true if this is this instruction has a non-zero immediate
1111 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1112 switch (MI->getOpcode()) {
1115 case AArch64::ADDSWrx:
1116 case AArch64::ADDSXrx:
1117 case AArch64::ADDSXrx64:
1118 case AArch64::ADDWrx:
1119 case AArch64::ADDXrx:
1120 case AArch64::ADDXrx64:
1121 case AArch64::SUBSWrx:
1122 case AArch64::SUBSXrx:
1123 case AArch64::SUBSXrx64:
1124 case AArch64::SUBWrx:
1125 case AArch64::SUBXrx:
1126 case AArch64::SUBXrx64:
1127 if (MI->getOperand(3).isImm()) {
1128 unsigned val = MI->getOperand(3).getImm();
1137 // Return true if this instruction simply sets its single destination register
1138 // to zero. This is equivalent to a register rename of the zero-register.
1139 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1140 switch (MI->getOpcode()) {
1143 case AArch64::MOVZWi:
1144 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1145 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1146 assert(MI->getDesc().getNumOperands() == 3 &&
1147 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1151 case AArch64::ANDWri: // and Rd, Rzr, #imm
1152 return MI->getOperand(1).getReg() == AArch64::WZR;
1153 case AArch64::ANDXri:
1154 return MI->getOperand(1).getReg() == AArch64::XZR;
1155 case TargetOpcode::COPY:
1156 return MI->getOperand(1).getReg() == AArch64::WZR;
1161 // Return true if this instruction simply renames a general register without
1163 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1164 switch (MI->getOpcode()) {
1167 case TargetOpcode::COPY: {
1168 // GPR32 copies will by lowered to ORRXrs
1169 unsigned DstReg = MI->getOperand(0).getReg();
1170 return (AArch64::GPR32RegClass.contains(DstReg) ||
1171 AArch64::GPR64RegClass.contains(DstReg));
1173 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1174 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1175 assert(MI->getDesc().getNumOperands() == 4 &&
1176 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1180 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1181 if (MI->getOperand(2).getImm() == 0) {
1182 assert(MI->getDesc().getNumOperands() == 4 &&
1183 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1191 // Return true if this instruction simply renames a general register without
1193 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1194 switch (MI->getOpcode()) {
1197 case TargetOpcode::COPY: {
1198 // FPR64 copies will by lowered to ORR.16b
1199 unsigned DstReg = MI->getOperand(0).getReg();
1200 return (AArch64::FPR64RegClass.contains(DstReg) ||
1201 AArch64::FPR128RegClass.contains(DstReg));
1203 case AArch64::ORRv16i8:
1204 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1205 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1206 "invalid ORRv16i8 operands");
1214 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1215 int &FrameIndex) const {
1216 switch (MI->getOpcode()) {
1219 case AArch64::LDRWui:
1220 case AArch64::LDRXui:
1221 case AArch64::LDRBui:
1222 case AArch64::LDRHui:
1223 case AArch64::LDRSui:
1224 case AArch64::LDRDui:
1225 case AArch64::LDRQui:
1226 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1227 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1228 FrameIndex = MI->getOperand(1).getIndex();
1229 return MI->getOperand(0).getReg();
1237 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1238 int &FrameIndex) const {
1239 switch (MI->getOpcode()) {
1242 case AArch64::STRWui:
1243 case AArch64::STRXui:
1244 case AArch64::STRBui:
1245 case AArch64::STRHui:
1246 case AArch64::STRSui:
1247 case AArch64::STRDui:
1248 case AArch64::STRQui:
1249 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1250 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1251 FrameIndex = MI->getOperand(1).getIndex();
1252 return MI->getOperand(0).getReg();
1259 /// Return true if this is load/store scales or extends its register offset.
1260 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1261 /// MI should be a memory op that allows scaled addressing.
1262 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1263 switch (MI->getOpcode()) {
1266 case AArch64::LDRBBroW:
1267 case AArch64::LDRBroW:
1268 case AArch64::LDRDroW:
1269 case AArch64::LDRHHroW:
1270 case AArch64::LDRHroW:
1271 case AArch64::LDRQroW:
1272 case AArch64::LDRSBWroW:
1273 case AArch64::LDRSBXroW:
1274 case AArch64::LDRSHWroW:
1275 case AArch64::LDRSHXroW:
1276 case AArch64::LDRSWroW:
1277 case AArch64::LDRSroW:
1278 case AArch64::LDRWroW:
1279 case AArch64::LDRXroW:
1280 case AArch64::STRBBroW:
1281 case AArch64::STRBroW:
1282 case AArch64::STRDroW:
1283 case AArch64::STRHHroW:
1284 case AArch64::STRHroW:
1285 case AArch64::STRQroW:
1286 case AArch64::STRSroW:
1287 case AArch64::STRWroW:
1288 case AArch64::STRXroW:
1289 case AArch64::LDRBBroX:
1290 case AArch64::LDRBroX:
1291 case AArch64::LDRDroX:
1292 case AArch64::LDRHHroX:
1293 case AArch64::LDRHroX:
1294 case AArch64::LDRQroX:
1295 case AArch64::LDRSBWroX:
1296 case AArch64::LDRSBXroX:
1297 case AArch64::LDRSHWroX:
1298 case AArch64::LDRSHXroX:
1299 case AArch64::LDRSWroX:
1300 case AArch64::LDRSroX:
1301 case AArch64::LDRWroX:
1302 case AArch64::LDRXroX:
1303 case AArch64::STRBBroX:
1304 case AArch64::STRBroX:
1305 case AArch64::STRDroX:
1306 case AArch64::STRHHroX:
1307 case AArch64::STRHroX:
1308 case AArch64::STRQroX:
1309 case AArch64::STRSroX:
1310 case AArch64::STRWroX:
1311 case AArch64::STRXroX:
1313 unsigned Val = MI->getOperand(3).getImm();
1314 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1315 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1320 /// Check all MachineMemOperands for a hint to suppress pairing.
1321 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1322 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1323 "Too many target MO flags");
1324 for (auto *MM : MI->memoperands()) {
1325 if (MM->getFlags() &
1326 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1333 /// Set a flag on the first MachineMemOperand to suppress pairing.
1334 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1335 if (MI->memoperands_empty())
1338 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1339 "Too many target MO flags");
1340 (*MI->memoperands_begin())
1341 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1345 AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1347 const TargetRegisterInfo *TRI) const {
1348 switch (LdSt->getOpcode()) {
1351 case AArch64::STRSui:
1352 case AArch64::STRDui:
1353 case AArch64::STRQui:
1354 case AArch64::STRXui:
1355 case AArch64::STRWui:
1356 case AArch64::LDRSui:
1357 case AArch64::LDRDui:
1358 case AArch64::LDRQui:
1359 case AArch64::LDRXui:
1360 case AArch64::LDRWui:
1361 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1363 BaseReg = LdSt->getOperand(1).getReg();
1364 MachineFunction &MF = *LdSt->getParent()->getParent();
1365 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1366 Offset = LdSt->getOperand(2).getImm() * Width;
1371 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1372 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1373 const TargetRegisterInfo *TRI) const {
1374 // Handle only loads/stores with base register followed by immediate offset.
1375 if (LdSt->getNumOperands() != 3)
1377 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1380 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1381 // Unscaled instructions have scaling factor set to 1.
1383 switch (LdSt->getOpcode()) {
1386 case AArch64::LDURQi:
1387 case AArch64::STURQi:
1391 case AArch64::LDURXi:
1392 case AArch64::LDURDi:
1393 case AArch64::STURXi:
1394 case AArch64::STURDi:
1398 case AArch64::LDURWi:
1399 case AArch64::LDURSi:
1400 case AArch64::LDURSWi:
1401 case AArch64::STURWi:
1402 case AArch64::STURSi:
1406 case AArch64::LDURHi:
1407 case AArch64::LDURHHi:
1408 case AArch64::LDURSHXi:
1409 case AArch64::LDURSHWi:
1410 case AArch64::STURHi:
1411 case AArch64::STURHHi:
1415 case AArch64::LDURBi:
1416 case AArch64::LDURBBi:
1417 case AArch64::LDURSBXi:
1418 case AArch64::LDURSBWi:
1419 case AArch64::STURBi:
1420 case AArch64::STURBBi:
1424 case AArch64::LDRQui:
1425 case AArch64::STRQui:
1428 case AArch64::LDRXui:
1429 case AArch64::LDRDui:
1430 case AArch64::STRXui:
1431 case AArch64::STRDui:
1434 case AArch64::LDRWui:
1435 case AArch64::LDRSui:
1436 case AArch64::STRWui:
1437 case AArch64::STRSui:
1440 case AArch64::LDRHui:
1441 case AArch64::LDRHHui:
1442 case AArch64::STRHui:
1443 case AArch64::STRHHui:
1446 case AArch64::LDRBui:
1447 case AArch64::LDRBBui:
1448 case AArch64::STRBui:
1449 case AArch64::STRBBui:
1454 BaseReg = LdSt->getOperand(1).getReg();
1455 Offset = LdSt->getOperand(2).getImm() * Scale;
1459 /// Detect opportunities for ldp/stp formation.
1461 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
1462 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1463 MachineInstr *SecondLdSt,
1464 unsigned NumLoads) const {
1465 // Only cluster up to a single pair.
1468 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1470 // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
1471 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1472 // Allow 6 bits of positive range.
1475 // The caller should already have ordered First/SecondLdSt by offset.
1476 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1477 return Ofs1 + 1 == Ofs2;
1480 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1481 MachineInstr *Second) const {
1482 if (Subtarget.isCyclone()) {
1483 // Cyclone can fuse CMN, CMP, TST followed by Bcc.
1484 unsigned SecondOpcode = Second->getOpcode();
1485 if (SecondOpcode == AArch64::Bcc) {
1486 switch (First->getOpcode()) {
1489 case AArch64::SUBSWri:
1490 case AArch64::ADDSWri:
1491 case AArch64::ANDSWri:
1492 case AArch64::SUBSXri:
1493 case AArch64::ADDSXri:
1494 case AArch64::ANDSXri:
1498 // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
1499 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1500 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1501 switch (First->getOpcode()) {
1504 case AArch64::ADDWri:
1505 case AArch64::ADDXri:
1506 case AArch64::ANDWri:
1507 case AArch64::ANDXri:
1508 case AArch64::EORWri:
1509 case AArch64::EORXri:
1510 case AArch64::ORRWri:
1511 case AArch64::ORRXri:
1512 case AArch64::SUBWri:
1513 case AArch64::SUBXri:
1521 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1522 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1523 const MDNode *Expr, DebugLoc DL) const {
1524 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1525 .addFrameIndex(FrameIx)
1533 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1534 unsigned Reg, unsigned SubIdx,
1536 const TargetRegisterInfo *TRI) {
1538 return MIB.addReg(Reg, State);
1540 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1541 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1542 return MIB.addReg(Reg, State, SubIdx);
1545 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1547 // We really want the positive remainder mod 32 here, that happens to be
1548 // easily obtainable with a mask.
1549 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1552 void AArch64InstrInfo::copyPhysRegTuple(
1553 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1554 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1555 llvm::ArrayRef<unsigned> Indices) const {
1556 assert(Subtarget.hasNEON() &&
1557 "Unexpected register copy without NEON");
1558 const TargetRegisterInfo *TRI = &getRegisterInfo();
1559 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1560 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1561 unsigned NumRegs = Indices.size();
1563 int SubReg = 0, End = NumRegs, Incr = 1;
1564 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1565 SubReg = NumRegs - 1;
1570 for (; SubReg != End; SubReg += Incr) {
1571 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
1572 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1573 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1574 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1578 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1579 MachineBasicBlock::iterator I, DebugLoc DL,
1580 unsigned DestReg, unsigned SrcReg,
1581 bool KillSrc) const {
1582 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1583 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1584 const TargetRegisterInfo *TRI = &getRegisterInfo();
1586 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1587 // If either operand is WSP, expand to ADD #0.
1588 if (Subtarget.hasZeroCycleRegMove()) {
1589 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1590 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1591 &AArch64::GPR64spRegClass);
1592 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1593 &AArch64::GPR64spRegClass);
1594 // This instruction is reading and writing X registers. This may upset
1595 // the register scavenger and machine verifier, so we need to indicate
1596 // that we are reading an undefined value from SrcRegX, but a proper
1597 // value from SrcReg.
1598 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1599 .addReg(SrcRegX, RegState::Undef)
1601 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1602 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1604 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1605 .addReg(SrcReg, getKillRegState(KillSrc))
1607 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1609 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1610 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1611 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1613 if (Subtarget.hasZeroCycleRegMove()) {
1614 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1615 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1616 &AArch64::GPR64spRegClass);
1617 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1618 &AArch64::GPR64spRegClass);
1619 // This instruction is reading and writing X registers. This may upset
1620 // the register scavenger and machine verifier, so we need to indicate
1621 // that we are reading an undefined value from SrcRegX, but a proper
1622 // value from SrcReg.
1623 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1624 .addReg(AArch64::XZR)
1625 .addReg(SrcRegX, RegState::Undef)
1626 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1628 // Otherwise, expand to ORR WZR.
1629 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1630 .addReg(AArch64::WZR)
1631 .addReg(SrcReg, getKillRegState(KillSrc));
1637 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1638 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1639 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1640 // If either operand is SP, expand to ADD #0.
1641 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1642 .addReg(SrcReg, getKillRegState(KillSrc))
1644 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1645 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1646 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1647 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1649 // Otherwise, expand to ORR XZR.
1650 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1651 .addReg(AArch64::XZR)
1652 .addReg(SrcReg, getKillRegState(KillSrc));
1657 // Copy a DDDD register quad by copying the individual sub-registers.
1658 if (AArch64::DDDDRegClass.contains(DestReg) &&
1659 AArch64::DDDDRegClass.contains(SrcReg)) {
1660 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1661 AArch64::dsub2, AArch64::dsub3 };
1662 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1667 // Copy a DDD register triple by copying the individual sub-registers.
1668 if (AArch64::DDDRegClass.contains(DestReg) &&
1669 AArch64::DDDRegClass.contains(SrcReg)) {
1670 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1672 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1677 // Copy a DD register pair by copying the individual sub-registers.
1678 if (AArch64::DDRegClass.contains(DestReg) &&
1679 AArch64::DDRegClass.contains(SrcReg)) {
1680 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1681 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1686 // Copy a QQQQ register quad by copying the individual sub-registers.
1687 if (AArch64::QQQQRegClass.contains(DestReg) &&
1688 AArch64::QQQQRegClass.contains(SrcReg)) {
1689 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1690 AArch64::qsub2, AArch64::qsub3 };
1691 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1696 // Copy a QQQ register triple by copying the individual sub-registers.
1697 if (AArch64::QQQRegClass.contains(DestReg) &&
1698 AArch64::QQQRegClass.contains(SrcReg)) {
1699 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1701 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1706 // Copy a QQ register pair by copying the individual sub-registers.
1707 if (AArch64::QQRegClass.contains(DestReg) &&
1708 AArch64::QQRegClass.contains(SrcReg)) {
1709 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1710 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1715 if (AArch64::FPR128RegClass.contains(DestReg) &&
1716 AArch64::FPR128RegClass.contains(SrcReg)) {
1717 if(Subtarget.hasNEON()) {
1718 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1720 .addReg(SrcReg, getKillRegState(KillSrc));
1722 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1723 .addReg(AArch64::SP, RegState::Define)
1724 .addReg(SrcReg, getKillRegState(KillSrc))
1725 .addReg(AArch64::SP)
1727 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1728 .addReg(AArch64::SP, RegState::Define)
1729 .addReg(DestReg, RegState::Define)
1730 .addReg(AArch64::SP)
1736 if (AArch64::FPR64RegClass.contains(DestReg) &&
1737 AArch64::FPR64RegClass.contains(SrcReg)) {
1738 if(Subtarget.hasNEON()) {
1739 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1740 &AArch64::FPR128RegClass);
1741 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1742 &AArch64::FPR128RegClass);
1743 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1745 .addReg(SrcReg, getKillRegState(KillSrc));
1747 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1748 .addReg(SrcReg, getKillRegState(KillSrc));
1753 if (AArch64::FPR32RegClass.contains(DestReg) &&
1754 AArch64::FPR32RegClass.contains(SrcReg)) {
1755 if(Subtarget.hasNEON()) {
1756 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1757 &AArch64::FPR128RegClass);
1758 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1759 &AArch64::FPR128RegClass);
1760 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1762 .addReg(SrcReg, getKillRegState(KillSrc));
1764 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1765 .addReg(SrcReg, getKillRegState(KillSrc));
1770 if (AArch64::FPR16RegClass.contains(DestReg) &&
1771 AArch64::FPR16RegClass.contains(SrcReg)) {
1772 if(Subtarget.hasNEON()) {
1773 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1774 &AArch64::FPR128RegClass);
1775 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1776 &AArch64::FPR128RegClass);
1777 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1779 .addReg(SrcReg, getKillRegState(KillSrc));
1781 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1782 &AArch64::FPR32RegClass);
1783 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1784 &AArch64::FPR32RegClass);
1785 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1786 .addReg(SrcReg, getKillRegState(KillSrc));
1791 if (AArch64::FPR8RegClass.contains(DestReg) &&
1792 AArch64::FPR8RegClass.contains(SrcReg)) {
1793 if(Subtarget.hasNEON()) {
1794 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1795 &AArch64::FPR128RegClass);
1796 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1797 &AArch64::FPR128RegClass);
1798 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1800 .addReg(SrcReg, getKillRegState(KillSrc));
1802 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1803 &AArch64::FPR32RegClass);
1804 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1805 &AArch64::FPR32RegClass);
1806 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1807 .addReg(SrcReg, getKillRegState(KillSrc));
1812 // Copies between GPR64 and FPR64.
1813 if (AArch64::FPR64RegClass.contains(DestReg) &&
1814 AArch64::GPR64RegClass.contains(SrcReg)) {
1815 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1816 .addReg(SrcReg, getKillRegState(KillSrc));
1819 if (AArch64::GPR64RegClass.contains(DestReg) &&
1820 AArch64::FPR64RegClass.contains(SrcReg)) {
1821 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1822 .addReg(SrcReg, getKillRegState(KillSrc));
1825 // Copies between GPR32 and FPR32.
1826 if (AArch64::FPR32RegClass.contains(DestReg) &&
1827 AArch64::GPR32RegClass.contains(SrcReg)) {
1828 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1829 .addReg(SrcReg, getKillRegState(KillSrc));
1832 if (AArch64::GPR32RegClass.contains(DestReg) &&
1833 AArch64::FPR32RegClass.contains(SrcReg)) {
1834 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1835 .addReg(SrcReg, getKillRegState(KillSrc));
1839 if (DestReg == AArch64::NZCV) {
1840 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1841 BuildMI(MBB, I, DL, get(AArch64::MSR))
1842 .addImm(AArch64SysReg::NZCV)
1843 .addReg(SrcReg, getKillRegState(KillSrc))
1844 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1848 if (SrcReg == AArch64::NZCV) {
1849 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1850 BuildMI(MBB, I, DL, get(AArch64::MRS))
1852 .addImm(AArch64SysReg::NZCV)
1853 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1857 llvm_unreachable("unimplemented reg-to-reg copy");
1860 void AArch64InstrInfo::storeRegToStackSlot(
1861 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1862 bool isKill, int FI, const TargetRegisterClass *RC,
1863 const TargetRegisterInfo *TRI) const {
1865 if (MBBI != MBB.end())
1866 DL = MBBI->getDebugLoc();
1867 MachineFunction &MF = *MBB.getParent();
1868 MachineFrameInfo &MFI = *MF.getFrameInfo();
1869 unsigned Align = MFI.getObjectAlignment(FI);
1871 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1872 MachineMemOperand *MMO = MF.getMachineMemOperand(
1873 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1876 switch (RC->getSize()) {
1878 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1879 Opc = AArch64::STRBui;
1882 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1883 Opc = AArch64::STRHui;
1886 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1887 Opc = AArch64::STRWui;
1888 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1889 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1891 assert(SrcReg != AArch64::WSP);
1892 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1893 Opc = AArch64::STRSui;
1896 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1897 Opc = AArch64::STRXui;
1898 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1899 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1901 assert(SrcReg != AArch64::SP);
1902 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1903 Opc = AArch64::STRDui;
1906 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1907 Opc = AArch64::STRQui;
1908 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1909 assert(Subtarget.hasNEON() &&
1910 "Unexpected register store without NEON");
1911 Opc = AArch64::ST1Twov1d, Offset = false;
1915 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1916 assert(Subtarget.hasNEON() &&
1917 "Unexpected register store without NEON");
1918 Opc = AArch64::ST1Threev1d, Offset = false;
1922 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1923 assert(Subtarget.hasNEON() &&
1924 "Unexpected register store without NEON");
1925 Opc = AArch64::ST1Fourv1d, Offset = false;
1926 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1927 assert(Subtarget.hasNEON() &&
1928 "Unexpected register store without NEON");
1929 Opc = AArch64::ST1Twov2d, Offset = false;
1933 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1934 assert(Subtarget.hasNEON() &&
1935 "Unexpected register store without NEON");
1936 Opc = AArch64::ST1Threev2d, Offset = false;
1940 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1941 assert(Subtarget.hasNEON() &&
1942 "Unexpected register store without NEON");
1943 Opc = AArch64::ST1Fourv2d, Offset = false;
1947 assert(Opc && "Unknown register class");
1949 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
1950 .addReg(SrcReg, getKillRegState(isKill))
1955 MI.addMemOperand(MMO);
1958 void AArch64InstrInfo::loadRegFromStackSlot(
1959 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1960 int FI, const TargetRegisterClass *RC,
1961 const TargetRegisterInfo *TRI) const {
1963 if (MBBI != MBB.end())
1964 DL = MBBI->getDebugLoc();
1965 MachineFunction &MF = *MBB.getParent();
1966 MachineFrameInfo &MFI = *MF.getFrameInfo();
1967 unsigned Align = MFI.getObjectAlignment(FI);
1968 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1969 MachineMemOperand *MMO = MF.getMachineMemOperand(
1970 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1974 switch (RC->getSize()) {
1976 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1977 Opc = AArch64::LDRBui;
1980 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1981 Opc = AArch64::LDRHui;
1984 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1985 Opc = AArch64::LDRWui;
1986 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1987 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1989 assert(DestReg != AArch64::WSP);
1990 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1991 Opc = AArch64::LDRSui;
1994 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1995 Opc = AArch64::LDRXui;
1996 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1997 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1999 assert(DestReg != AArch64::SP);
2000 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2001 Opc = AArch64::LDRDui;
2004 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2005 Opc = AArch64::LDRQui;
2006 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2007 assert(Subtarget.hasNEON() &&
2008 "Unexpected register load without NEON");
2009 Opc = AArch64::LD1Twov1d, Offset = false;
2013 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2014 assert(Subtarget.hasNEON() &&
2015 "Unexpected register load without NEON");
2016 Opc = AArch64::LD1Threev1d, Offset = false;
2020 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2021 assert(Subtarget.hasNEON() &&
2022 "Unexpected register load without NEON");
2023 Opc = AArch64::LD1Fourv1d, Offset = false;
2024 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2025 assert(Subtarget.hasNEON() &&
2026 "Unexpected register load without NEON");
2027 Opc = AArch64::LD1Twov2d, Offset = false;
2031 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2032 assert(Subtarget.hasNEON() &&
2033 "Unexpected register load without NEON");
2034 Opc = AArch64::LD1Threev2d, Offset = false;
2038 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2039 assert(Subtarget.hasNEON() &&
2040 "Unexpected register load without NEON");
2041 Opc = AArch64::LD1Fourv2d, Offset = false;
2045 assert(Opc && "Unknown register class");
2047 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2048 .addReg(DestReg, getDefRegState(true))
2052 MI.addMemOperand(MMO);
2055 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2056 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2057 unsigned DestReg, unsigned SrcReg, int Offset,
2058 const TargetInstrInfo *TII,
2059 MachineInstr::MIFlag Flag, bool SetNZCV) {
2060 if (DestReg == SrcReg && Offset == 0)
2063 bool isSub = Offset < 0;
2067 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2068 // scratch register. If DestReg is a virtual register, use it as the
2069 // scratch register; otherwise, create a new virtual register (to be
2070 // replaced by the scavenger at the end of PEI). That case can be optimized
2071 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2072 // register can be loaded with offset%8 and the add/sub can use an extending
2073 // instruction with LSL#3.
2074 // Currently the function handles any offsets but generates a poor sequence
2076 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2080 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2082 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2083 const unsigned MaxEncoding = 0xfff;
2084 const unsigned ShiftSize = 12;
2085 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2086 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2088 if (((unsigned)Offset) > MaxEncodableValue) {
2089 ThisVal = MaxEncodableValue;
2091 ThisVal = Offset & MaxEncodableValue;
2093 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2094 "Encoding cannot handle value that big");
2095 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2097 .addImm(ThisVal >> ShiftSize)
2098 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2106 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2109 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2113 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2114 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
2115 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
2116 // This is a bit of a hack. Consider this instruction:
2118 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2120 // We explicitly chose GPR64all for the virtual register so such a copy might
2121 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2122 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2123 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2125 // To prevent that, we are going to constrain the %vreg0 register class here.
2127 // <rdar://problem/11522048>
2130 unsigned DstReg = MI->getOperand(0).getReg();
2131 unsigned SrcReg = MI->getOperand(1).getReg();
2132 if (SrcReg == AArch64::SP &&
2133 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2134 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2137 if (DstReg == AArch64::SP &&
2138 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2139 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2148 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2149 bool *OutUseUnscaledOp,
2150 unsigned *OutUnscaledOp,
2151 int *EmittableOffset) {
2153 bool IsSigned = false;
2154 // The ImmIdx should be changed case by case if it is not 2.
2155 unsigned ImmIdx = 2;
2156 unsigned UnscaledOp = 0;
2157 // Set output values in case of early exit.
2158 if (EmittableOffset)
2159 *EmittableOffset = 0;
2160 if (OutUseUnscaledOp)
2161 *OutUseUnscaledOp = false;
2164 switch (MI.getOpcode()) {
2166 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2167 // Vector spills/fills can't take an immediate offset.
2168 case AArch64::LD1Twov2d:
2169 case AArch64::LD1Threev2d:
2170 case AArch64::LD1Fourv2d:
2171 case AArch64::LD1Twov1d:
2172 case AArch64::LD1Threev1d:
2173 case AArch64::LD1Fourv1d:
2174 case AArch64::ST1Twov2d:
2175 case AArch64::ST1Threev2d:
2176 case AArch64::ST1Fourv2d:
2177 case AArch64::ST1Twov1d:
2178 case AArch64::ST1Threev1d:
2179 case AArch64::ST1Fourv1d:
2180 return AArch64FrameOffsetCannotUpdate;
2181 case AArch64::PRFMui:
2183 UnscaledOp = AArch64::PRFUMi;
2185 case AArch64::LDRXui:
2187 UnscaledOp = AArch64::LDURXi;
2189 case AArch64::LDRWui:
2191 UnscaledOp = AArch64::LDURWi;
2193 case AArch64::LDRBui:
2195 UnscaledOp = AArch64::LDURBi;
2197 case AArch64::LDRHui:
2199 UnscaledOp = AArch64::LDURHi;
2201 case AArch64::LDRSui:
2203 UnscaledOp = AArch64::LDURSi;
2205 case AArch64::LDRDui:
2207 UnscaledOp = AArch64::LDURDi;
2209 case AArch64::LDRQui:
2211 UnscaledOp = AArch64::LDURQi;
2213 case AArch64::LDRBBui:
2215 UnscaledOp = AArch64::LDURBBi;
2217 case AArch64::LDRHHui:
2219 UnscaledOp = AArch64::LDURHHi;
2221 case AArch64::LDRSBXui:
2223 UnscaledOp = AArch64::LDURSBXi;
2225 case AArch64::LDRSBWui:
2227 UnscaledOp = AArch64::LDURSBWi;
2229 case AArch64::LDRSHXui:
2231 UnscaledOp = AArch64::LDURSHXi;
2233 case AArch64::LDRSHWui:
2235 UnscaledOp = AArch64::LDURSHWi;
2237 case AArch64::LDRSWui:
2239 UnscaledOp = AArch64::LDURSWi;
2242 case AArch64::STRXui:
2244 UnscaledOp = AArch64::STURXi;
2246 case AArch64::STRWui:
2248 UnscaledOp = AArch64::STURWi;
2250 case AArch64::STRBui:
2252 UnscaledOp = AArch64::STURBi;
2254 case AArch64::STRHui:
2256 UnscaledOp = AArch64::STURHi;
2258 case AArch64::STRSui:
2260 UnscaledOp = AArch64::STURSi;
2262 case AArch64::STRDui:
2264 UnscaledOp = AArch64::STURDi;
2266 case AArch64::STRQui:
2268 UnscaledOp = AArch64::STURQi;
2270 case AArch64::STRBBui:
2272 UnscaledOp = AArch64::STURBBi;
2274 case AArch64::STRHHui:
2276 UnscaledOp = AArch64::STURHHi;
2279 case AArch64::LDPXi:
2280 case AArch64::LDPDi:
2281 case AArch64::STPXi:
2282 case AArch64::STPDi:
2283 case AArch64::LDNPXi:
2284 case AArch64::LDNPDi:
2285 case AArch64::STNPXi:
2286 case AArch64::STNPDi:
2291 case AArch64::LDPQi:
2292 case AArch64::STPQi:
2293 case AArch64::LDNPQi:
2294 case AArch64::STNPQi:
2299 case AArch64::LDPWi:
2300 case AArch64::LDPSi:
2301 case AArch64::STPWi:
2302 case AArch64::STPSi:
2303 case AArch64::LDNPWi:
2304 case AArch64::LDNPSi:
2305 case AArch64::STNPWi:
2306 case AArch64::STNPSi:
2312 case AArch64::LDURXi:
2313 case AArch64::LDURWi:
2314 case AArch64::LDURBi:
2315 case AArch64::LDURHi:
2316 case AArch64::LDURSi:
2317 case AArch64::LDURDi:
2318 case AArch64::LDURQi:
2319 case AArch64::LDURHHi:
2320 case AArch64::LDURBBi:
2321 case AArch64::LDURSBXi:
2322 case AArch64::LDURSBWi:
2323 case AArch64::LDURSHXi:
2324 case AArch64::LDURSHWi:
2325 case AArch64::LDURSWi:
2326 case AArch64::STURXi:
2327 case AArch64::STURWi:
2328 case AArch64::STURBi:
2329 case AArch64::STURHi:
2330 case AArch64::STURSi:
2331 case AArch64::STURDi:
2332 case AArch64::STURQi:
2333 case AArch64::STURBBi:
2334 case AArch64::STURHHi:
2339 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2341 bool useUnscaledOp = false;
2342 // If the offset doesn't match the scale, we rewrite the instruction to
2343 // use the unscaled instruction instead. Likewise, if we have a negative
2344 // offset (and have an unscaled op to use).
2345 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2346 useUnscaledOp = true;
2348 // Use an unscaled addressing mode if the instruction has a negative offset
2349 // (or if the instruction is already using an unscaled addressing mode).
2352 // ldp/stp instructions.
2355 } else if (UnscaledOp == 0 || useUnscaledOp) {
2365 // Attempt to fold address computation.
2366 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2367 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2368 if (Offset >= MinOff && Offset <= MaxOff) {
2369 if (EmittableOffset)
2370 *EmittableOffset = Offset;
2373 int NewOff = Offset < 0 ? MinOff : MaxOff;
2374 if (EmittableOffset)
2375 *EmittableOffset = NewOff;
2376 Offset = (Offset - NewOff) * Scale;
2378 if (OutUseUnscaledOp)
2379 *OutUseUnscaledOp = useUnscaledOp;
2381 *OutUnscaledOp = UnscaledOp;
2382 return AArch64FrameOffsetCanUpdate |
2383 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2386 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2387 unsigned FrameReg, int &Offset,
2388 const AArch64InstrInfo *TII) {
2389 unsigned Opcode = MI.getOpcode();
2390 unsigned ImmIdx = FrameRegIdx + 1;
2392 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2393 Offset += MI.getOperand(ImmIdx).getImm();
2394 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2395 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2396 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2397 MI.eraseFromParent();
2403 unsigned UnscaledOp;
2405 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2406 &UnscaledOp, &NewOffset);
2407 if (Status & AArch64FrameOffsetCanUpdate) {
2408 if (Status & AArch64FrameOffsetIsLegal)
2409 // Replace the FrameIndex with FrameReg.
2410 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2412 MI.setDesc(TII->get(UnscaledOp));
2414 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2421 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2422 NopInst.setOpcode(AArch64::HINT);
2423 NopInst.addOperand(MCOperand::createImm(0));
2425 /// useMachineCombiner - return true when a target supports MachineCombiner
2426 bool AArch64InstrInfo::useMachineCombiner() const {
2427 // AArch64 supports the combiner
2431 // True when Opc sets flag
2432 static bool isCombineInstrSettingFlag(unsigned Opc) {
2434 case AArch64::ADDSWrr:
2435 case AArch64::ADDSWri:
2436 case AArch64::ADDSXrr:
2437 case AArch64::ADDSXri:
2438 case AArch64::SUBSWrr:
2439 case AArch64::SUBSXrr:
2440 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2441 case AArch64::SUBSWri:
2442 case AArch64::SUBSXri:
2450 // 32b Opcodes that can be combined with a MUL
2451 static bool isCombineInstrCandidate32(unsigned Opc) {
2453 case AArch64::ADDWrr:
2454 case AArch64::ADDWri:
2455 case AArch64::SUBWrr:
2456 case AArch64::ADDSWrr:
2457 case AArch64::ADDSWri:
2458 case AArch64::SUBSWrr:
2459 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2460 case AArch64::SUBWri:
2461 case AArch64::SUBSWri:
2469 // 64b Opcodes that can be combined with a MUL
2470 static bool isCombineInstrCandidate64(unsigned Opc) {
2472 case AArch64::ADDXrr:
2473 case AArch64::ADDXri:
2474 case AArch64::SUBXrr:
2475 case AArch64::ADDSXrr:
2476 case AArch64::ADDSXri:
2477 case AArch64::SUBSXrr:
2478 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2479 case AArch64::SUBXri:
2480 case AArch64::SUBSXri:
2488 // Opcodes that can be combined with a MUL
2489 static bool isCombineInstrCandidate(unsigned Opc) {
2490 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2493 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2494 unsigned MulOpc, unsigned ZeroReg) {
2495 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2496 MachineInstr *MI = nullptr;
2497 // We need a virtual register definition.
2498 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2499 MI = MRI.getUniqueVRegDef(MO.getReg());
2500 // And it needs to be in the trace (otherwise, it won't have a depth).
2501 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2504 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2505 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2506 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2508 // The third input reg must be zero.
2509 if (MI->getOperand(3).getReg() != ZeroReg)
2512 // Must only used by the user we combine with.
2513 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2519 // TODO: There are many more machine instruction opcodes to match:
2520 // 1. Other data types (integer, vectors)
2521 // 2. Other math / logic operations (xor, or)
2522 // 3. Other forms of the same operation (intrinsics and other variants)
2523 bool AArch64InstrInfo::isAssociativeAndCommutative(const MachineInstr &Inst) const {
2524 switch (Inst.getOpcode()) {
2525 case AArch64::FADDDrr:
2526 case AArch64::FADDSrr:
2527 case AArch64::FADDv2f32:
2528 case AArch64::FADDv2f64:
2529 case AArch64::FADDv4f32:
2530 case AArch64::FMULDrr:
2531 case AArch64::FMULSrr:
2532 case AArch64::FMULX32:
2533 case AArch64::FMULX64:
2534 case AArch64::FMULXv2f32:
2535 case AArch64::FMULXv2f64:
2536 case AArch64::FMULXv4f32:
2537 case AArch64::FMULv2f32:
2538 case AArch64::FMULv2f64:
2539 case AArch64::FMULv4f32:
2540 return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath;
2546 /// Find instructions that can be turned into madd.
2547 static bool getMaddPatterns(MachineInstr &Root,
2548 SmallVectorImpl<MachineCombinerPattern> &Patterns) {
2549 unsigned Opc = Root.getOpcode();
2550 MachineBasicBlock &MBB = *Root.getParent();
2553 if (!isCombineInstrCandidate(Opc))
2555 if (isCombineInstrSettingFlag(Opc)) {
2556 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2557 // When NZCV is live bail out.
2560 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2561 // When opcode can't change bail out.
2562 // CHECKME: do we miss any cases for opcode conversion?
2571 case AArch64::ADDWrr:
2572 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2573 "ADDWrr does not have register operands");
2574 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2576 Patterns.push_back(MachineCombinerPattern::MULADDW_OP1);
2579 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2581 Patterns.push_back(MachineCombinerPattern::MULADDW_OP2);
2585 case AArch64::ADDXrr:
2586 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2588 Patterns.push_back(MachineCombinerPattern::MULADDX_OP1);
2591 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2593 Patterns.push_back(MachineCombinerPattern::MULADDX_OP2);
2597 case AArch64::SUBWrr:
2598 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2600 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP1);
2603 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2605 Patterns.push_back(MachineCombinerPattern::MULSUBW_OP2);
2609 case AArch64::SUBXrr:
2610 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2612 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP1);
2615 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2617 Patterns.push_back(MachineCombinerPattern::MULSUBX_OP2);
2621 case AArch64::ADDWri:
2622 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2624 Patterns.push_back(MachineCombinerPattern::MULADDWI_OP1);
2628 case AArch64::ADDXri:
2629 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2631 Patterns.push_back(MachineCombinerPattern::MULADDXI_OP1);
2635 case AArch64::SUBWri:
2636 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2638 Patterns.push_back(MachineCombinerPattern::MULSUBWI_OP1);
2642 case AArch64::SUBXri:
2643 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2645 Patterns.push_back(MachineCombinerPattern::MULSUBXI_OP1);
2653 /// Return true when there is potentially a faster code sequence for an
2654 /// instruction chain ending in \p Root. All potential patterns are listed in
2655 /// the \p Pattern vector. Pattern should be sorted in priority order since the
2656 /// pattern evaluator stops checking as soon as it finds a faster sequence.
2658 bool AArch64InstrInfo::getMachineCombinerPatterns(
2660 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
2661 if (getMaddPatterns(Root, Patterns))
2664 return TargetInstrInfo::getMachineCombinerPatterns(Root, Patterns);
2667 /// genMadd - Generate madd instruction and combine mul and add.
2671 /// ==> MADD R,A,B,C
2672 /// \param Root is the ADD instruction
2673 /// \param [out] InsInstrs is a vector of machine instructions and will
2674 /// contain the generated madd instruction
2675 /// \param IdxMulOpd is index of operand in Root that is the result of
2676 /// the MUL. In the example above IdxMulOpd is 1.
2677 /// \param MaddOpc the opcode fo the madd instruction
2678 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2679 const TargetInstrInfo *TII, MachineInstr &Root,
2680 SmallVectorImpl<MachineInstr *> &InsInstrs,
2681 unsigned IdxMulOpd, unsigned MaddOpc,
2682 const TargetRegisterClass *RC) {
2683 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2685 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2686 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2687 unsigned ResultReg = Root.getOperand(0).getReg();
2688 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2689 bool Src0IsKill = MUL->getOperand(1).isKill();
2690 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2691 bool Src1IsKill = MUL->getOperand(2).isKill();
2692 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2693 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2695 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2696 MRI.constrainRegClass(ResultReg, RC);
2697 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2698 MRI.constrainRegClass(SrcReg0, RC);
2699 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2700 MRI.constrainRegClass(SrcReg1, RC);
2701 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2702 MRI.constrainRegClass(SrcReg2, RC);
2704 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2706 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2707 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2708 .addReg(SrcReg2, getKillRegState(Src2IsKill));
2710 InsInstrs.push_back(MIB);
2714 /// genMaddR - Generate madd instruction and combine mul and add using
2715 /// an extra virtual register
2716 /// Example - an ADD intermediate needs to be stored in a register:
2719 /// ==> ORR V, ZR, Imm
2720 /// ==> MADD R,A,B,V
2721 /// \param Root is the ADD instruction
2722 /// \param [out] InsInstrs is a vector of machine instructions and will
2723 /// contain the generated madd instruction
2724 /// \param IdxMulOpd is index of operand in Root that is the result of
2725 /// the MUL. In the example above IdxMulOpd is 1.
2726 /// \param MaddOpc the opcode fo the madd instruction
2727 /// \param VR is a virtual register that holds the value of an ADD operand
2728 /// (V in the example above).
2729 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2730 const TargetInstrInfo *TII, MachineInstr &Root,
2731 SmallVectorImpl<MachineInstr *> &InsInstrs,
2732 unsigned IdxMulOpd, unsigned MaddOpc,
2733 unsigned VR, const TargetRegisterClass *RC) {
2734 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2736 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2737 unsigned ResultReg = Root.getOperand(0).getReg();
2738 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2739 bool Src0IsKill = MUL->getOperand(1).isKill();
2740 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2741 bool Src1IsKill = MUL->getOperand(2).isKill();
2743 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2744 MRI.constrainRegClass(ResultReg, RC);
2745 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2746 MRI.constrainRegClass(SrcReg0, RC);
2747 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2748 MRI.constrainRegClass(SrcReg1, RC);
2749 if (TargetRegisterInfo::isVirtualRegister(VR))
2750 MRI.constrainRegClass(VR, RC);
2752 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2754 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2755 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2758 InsInstrs.push_back(MIB);
2762 /// When getMachineCombinerPatterns() finds potential patterns,
2763 /// this function generates the instructions that could replace the
2764 /// original code sequence
2765 void AArch64InstrInfo::genAlternativeCodeSequence(
2766 MachineInstr &Root, MachineCombinerPattern Pattern,
2767 SmallVectorImpl<MachineInstr *> &InsInstrs,
2768 SmallVectorImpl<MachineInstr *> &DelInstrs,
2769 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2770 MachineBasicBlock &MBB = *Root.getParent();
2771 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2772 MachineFunction &MF = *MBB.getParent();
2773 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2776 const TargetRegisterClass *RC;
2780 // Reassociate instructions.
2781 TargetInstrInfo::genAlternativeCodeSequence(Root, Pattern, InsInstrs,
2782 DelInstrs, InstrIdxForVirtReg);
2784 case MachineCombinerPattern::MULADDW_OP1:
2785 case MachineCombinerPattern::MULADDX_OP1:
2789 // --- Create(MADD);
2790 if (Pattern == MachineCombinerPattern::MULADDW_OP1) {
2791 Opc = AArch64::MADDWrrr;
2792 RC = &AArch64::GPR32RegClass;
2794 Opc = AArch64::MADDXrrr;
2795 RC = &AArch64::GPR64RegClass;
2797 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
2799 case MachineCombinerPattern::MULADDW_OP2:
2800 case MachineCombinerPattern::MULADDX_OP2:
2804 // --- Create(MADD);
2805 if (Pattern == MachineCombinerPattern::MULADDW_OP2) {
2806 Opc = AArch64::MADDWrrr;
2807 RC = &AArch64::GPR32RegClass;
2809 Opc = AArch64::MADDXrrr;
2810 RC = &AArch64::GPR64RegClass;
2812 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2814 case MachineCombinerPattern::MULADDWI_OP1:
2815 case MachineCombinerPattern::MULADDXI_OP1: {
2818 // ==> ORR V, ZR, Imm
2820 // --- Create(MADD);
2821 const TargetRegisterClass *OrrRC;
2822 unsigned BitSize, OrrOpc, ZeroReg;
2823 if (Pattern == MachineCombinerPattern::MULADDWI_OP1) {
2824 OrrOpc = AArch64::ORRWri;
2825 OrrRC = &AArch64::GPR32spRegClass;
2827 ZeroReg = AArch64::WZR;
2828 Opc = AArch64::MADDWrrr;
2829 RC = &AArch64::GPR32RegClass;
2831 OrrOpc = AArch64::ORRXri;
2832 OrrRC = &AArch64::GPR64spRegClass;
2834 ZeroReg = AArch64::XZR;
2835 Opc = AArch64::MADDXrrr;
2836 RC = &AArch64::GPR64RegClass;
2838 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2839 uint64_t Imm = Root.getOperand(2).getImm();
2841 if (Root.getOperand(3).isImm()) {
2842 unsigned Val = Root.getOperand(3).getImm();
2845 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2847 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2848 MachineInstrBuilder MIB1 =
2849 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2852 InsInstrs.push_back(MIB1);
2853 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2854 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2858 case MachineCombinerPattern::MULSUBW_OP1:
2859 case MachineCombinerPattern::MULSUBX_OP1: {
2863 // ==> MADD R,A,B,V // = -C + A*B
2864 // --- Create(MADD);
2865 const TargetRegisterClass *SubRC;
2866 unsigned SubOpc, ZeroReg;
2867 if (Pattern == MachineCombinerPattern::MULSUBW_OP1) {
2868 SubOpc = AArch64::SUBWrr;
2869 SubRC = &AArch64::GPR32spRegClass;
2870 ZeroReg = AArch64::WZR;
2871 Opc = AArch64::MADDWrrr;
2872 RC = &AArch64::GPR32RegClass;
2874 SubOpc = AArch64::SUBXrr;
2875 SubRC = &AArch64::GPR64spRegClass;
2876 ZeroReg = AArch64::XZR;
2877 Opc = AArch64::MADDXrrr;
2878 RC = &AArch64::GPR64RegClass;
2880 unsigned NewVR = MRI.createVirtualRegister(SubRC);
2882 MachineInstrBuilder MIB1 =
2883 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
2885 .addOperand(Root.getOperand(2));
2886 InsInstrs.push_back(MIB1);
2887 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2888 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2891 case MachineCombinerPattern::MULSUBW_OP2:
2892 case MachineCombinerPattern::MULSUBX_OP2:
2895 // ==> MSUB R,A,B,C (computes C - A*B)
2896 // --- Create(MSUB);
2897 if (Pattern == MachineCombinerPattern::MULSUBW_OP2) {
2898 Opc = AArch64::MSUBWrrr;
2899 RC = &AArch64::GPR32RegClass;
2901 Opc = AArch64::MSUBXrrr;
2902 RC = &AArch64::GPR64RegClass;
2904 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2906 case MachineCombinerPattern::MULSUBWI_OP1:
2907 case MachineCombinerPattern::MULSUBXI_OP1: {
2910 // ==> ORR V, ZR, -Imm
2911 // ==> MADD R,A,B,V // = -Imm + A*B
2912 // --- Create(MADD);
2913 const TargetRegisterClass *OrrRC;
2914 unsigned BitSize, OrrOpc, ZeroReg;
2915 if (Pattern == MachineCombinerPattern::MULSUBWI_OP1) {
2916 OrrOpc = AArch64::ORRWri;
2917 OrrRC = &AArch64::GPR32spRegClass;
2919 ZeroReg = AArch64::WZR;
2920 Opc = AArch64::MADDWrrr;
2921 RC = &AArch64::GPR32RegClass;
2923 OrrOpc = AArch64::ORRXri;
2924 OrrRC = &AArch64::GPR64spRegClass;
2926 ZeroReg = AArch64::XZR;
2927 Opc = AArch64::MADDXrrr;
2928 RC = &AArch64::GPR64RegClass;
2930 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2931 int Imm = Root.getOperand(2).getImm();
2932 if (Root.getOperand(3).isImm()) {
2933 unsigned Val = Root.getOperand(3).getImm();
2936 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2938 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2939 MachineInstrBuilder MIB1 =
2940 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2943 InsInstrs.push_back(MIB1);
2944 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2945 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2949 } // end switch (Pattern)
2950 // Record MUL and ADD/SUB for deletion
2951 DelInstrs.push_back(MUL);
2952 DelInstrs.push_back(&Root);
2957 /// \brief Replace csincr-branch sequence by simple conditional branch
2961 /// csinc w9, wzr, wzr, <condition code>
2962 /// tbnz w9, #0, 0x44
2964 /// b.<inverted condition code>
2967 /// csinc w9, wzr, wzr, <condition code>
2968 /// tbz w9, #0, 0x44
2970 /// b.<condition code>
2972 /// \param MI Conditional Branch
2973 /// \return True when the simple conditional branch is generated
2975 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2976 bool IsNegativeBranch = false;
2977 bool IsTestAndBranch = false;
2978 unsigned TargetBBInMI = 0;
2979 switch (MI->getOpcode()) {
2981 llvm_unreachable("Unknown branch instruction?");
2988 case AArch64::CBNZW:
2989 case AArch64::CBNZX:
2991 IsNegativeBranch = true;
2996 IsTestAndBranch = true;
2998 case AArch64::TBNZW:
2999 case AArch64::TBNZX:
3001 IsNegativeBranch = true;
3002 IsTestAndBranch = true;
3005 // So we increment a zero register and test for bits other
3006 // than bit 0? Conservatively bail out in case the verifier
3007 // missed this case.
3008 if (IsTestAndBranch && MI->getOperand(1).getImm())
3012 assert(MI->getParent() && "Incomplete machine instruciton\n");
3013 MachineBasicBlock *MBB = MI->getParent();
3014 MachineFunction *MF = MBB->getParent();
3015 MachineRegisterInfo *MRI = &MF->getRegInfo();
3016 unsigned VReg = MI->getOperand(0).getReg();
3017 if (!TargetRegisterInfo::isVirtualRegister(VReg))
3020 MachineInstr *DefMI = MRI->getVRegDef(VReg);
3023 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
3024 DefMI->getOperand(1).getReg() == AArch64::WZR &&
3025 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
3026 !(DefMI->getOpcode() == AArch64::CSINCXr &&
3027 DefMI->getOperand(1).getReg() == AArch64::XZR &&
3028 DefMI->getOperand(2).getReg() == AArch64::XZR))
3031 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
3034 AArch64CC::CondCode CC =
3035 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
3036 bool CheckOnlyCCWrites = true;
3037 // Convert only when the condition code is not modified between
3038 // the CSINC and the branch. The CC may be used by other
3039 // instructions in between.
3040 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
3042 MachineBasicBlock &RefToMBB = *MBB;
3043 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
3044 DebugLoc DL = MI->getDebugLoc();
3045 if (IsNegativeBranch)
3046 CC = AArch64CC::getInvertedCondCode(CC);
3047 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
3048 MI->eraseFromParent();
3052 std::pair<unsigned, unsigned>
3053 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
3054 const unsigned Mask = AArch64II::MO_FRAGMENT;
3055 return std::make_pair(TF & Mask, TF & ~Mask);
3058 ArrayRef<std::pair<unsigned, const char *>>
3059 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
3060 using namespace AArch64II;
3061 static const std::pair<unsigned, const char *> TargetFlags[] = {
3062 {MO_PAGE, "aarch64-page"},
3063 {MO_PAGEOFF, "aarch64-pageoff"},
3064 {MO_G3, "aarch64-g3"},
3065 {MO_G2, "aarch64-g2"},
3066 {MO_G1, "aarch64-g1"},
3067 {MO_G0, "aarch64-g0"},
3068 {MO_HI12, "aarch64-hi12"}};
3069 return makeArrayRef(TargetFlags);
3072 ArrayRef<std::pair<unsigned, const char *>>
3073 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
3074 using namespace AArch64II;
3075 static const std::pair<unsigned, const char *> TargetFlags[] = {
3076 {MO_GOT, "aarch64-got"},
3077 {MO_NC, "aarch64-nc"},
3078 {MO_TLS, "aarch64-tls"},
3079 {MO_CONSTPOOL, "aarch64-constant-pool"}};
3080 return makeArrayRef(TargetFlags);