1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64MachineCombinerPattern.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
29 #define GET_INSTRINFO_CTOR_DTOR
30 #include "AArch64GenInstrInfo.inc"
32 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
33 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
34 RI(STI.getTargetTriple()), Subtarget(STI) {}
36 /// GetInstSize - Return the number of bytes of code the specified
37 /// instruction may be. This returns the maximum number of bytes.
38 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
39 const MachineBasicBlock &MBB = *MI->getParent();
40 const MachineFunction *MF = MBB.getParent();
41 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
43 if (MI->getOpcode() == AArch64::INLINEASM)
44 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
46 const MCInstrDesc &Desc = MI->getDesc();
47 switch (Desc.getOpcode()) {
49 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
51 case TargetOpcode::DBG_VALUE:
52 case TargetOpcode::EH_LABEL:
53 case TargetOpcode::IMPLICIT_DEF:
54 case TargetOpcode::KILL:
58 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
61 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
62 SmallVectorImpl<MachineOperand> &Cond) {
63 // Block ends with fall-through condbranch.
64 switch (LastInst->getOpcode()) {
66 llvm_unreachable("Unknown branch instruction?");
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(LastInst->getOperand(0));
75 Target = LastInst->getOperand(1).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
84 Target = LastInst->getOperand(2).getMBB();
85 Cond.push_back(MachineOperand::CreateImm(-1));
86 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
87 Cond.push_back(LastInst->getOperand(0));
88 Cond.push_back(LastInst->getOperand(1));
93 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
94 MachineBasicBlock *&TBB,
95 MachineBasicBlock *&FBB,
96 SmallVectorImpl<MachineOperand> &Cond,
97 bool AllowModify) const {
98 // If the block has no terminators, it just falls into the block after it.
99 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
103 if (!isUnpredicatedTerminator(I))
106 // Get the last instruction in the block.
107 MachineInstr *LastInst = I;
109 // If there is only one terminator instruction, process it.
110 unsigned LastOpc = LastInst->getOpcode();
111 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
112 if (isUncondBranchOpcode(LastOpc)) {
113 TBB = LastInst->getOperand(0).getMBB();
116 if (isCondBranchOpcode(LastOpc)) {
117 // Block ends with fall-through condbranch.
118 parseCondBranch(LastInst, TBB, Cond);
121 return true; // Can't handle indirect branch.
124 // Get the instruction before it if it is a terminator.
125 MachineInstr *SecondLastInst = I;
126 unsigned SecondLastOpc = SecondLastInst->getOpcode();
128 // If AllowModify is true and the block ends with two or more unconditional
129 // branches, delete all but the first unconditional branch.
130 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
131 while (isUncondBranchOpcode(SecondLastOpc)) {
132 LastInst->eraseFromParent();
133 LastInst = SecondLastInst;
134 LastOpc = LastInst->getOpcode();
135 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
136 // Return now the only terminator is an unconditional branch.
137 TBB = LastInst->getOperand(0).getMBB();
141 SecondLastOpc = SecondLastInst->getOpcode();
146 // If there are three terminators, we don't know what sort of block this is.
147 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
150 // If the block ends with a B and a Bcc, handle it.
151 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
152 parseCondBranch(SecondLastInst, TBB, Cond);
153 FBB = LastInst->getOperand(0).getMBB();
157 // If the block ends with two unconditional branches, handle it. The second
158 // one is not executed, so remove it.
159 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
160 TBB = SecondLastInst->getOperand(0).getMBB();
163 I->eraseFromParent();
167 // ...likewise if it ends with an indirect branch followed by an unconditional
169 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
172 I->eraseFromParent();
176 // Otherwise, can't handle this.
180 bool AArch64InstrInfo::ReverseBranchCondition(
181 SmallVectorImpl<MachineOperand> &Cond) const {
182 if (Cond[0].getImm() != -1) {
184 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
185 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
187 // Folded compare-and-branch
188 switch (Cond[1].getImm()) {
190 llvm_unreachable("Unknown conditional branch!");
192 Cond[1].setImm(AArch64::CBNZW);
195 Cond[1].setImm(AArch64::CBZW);
198 Cond[1].setImm(AArch64::CBNZX);
201 Cond[1].setImm(AArch64::CBZX);
204 Cond[1].setImm(AArch64::TBNZW);
207 Cond[1].setImm(AArch64::TBZW);
210 Cond[1].setImm(AArch64::TBNZX);
213 Cond[1].setImm(AArch64::TBZX);
221 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
222 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
226 if (!isUncondBranchOpcode(I->getOpcode()) &&
227 !isCondBranchOpcode(I->getOpcode()))
230 // Remove the branch.
231 I->eraseFromParent();
235 if (I == MBB.begin())
238 if (!isCondBranchOpcode(I->getOpcode()))
241 // Remove the branch.
242 I->eraseFromParent();
246 void AArch64InstrInfo::instantiateCondBranch(
247 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
248 ArrayRef<MachineOperand> Cond) const {
249 if (Cond[0].getImm() != -1) {
251 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
253 // Folded compare-and-branch
254 // Note that we use addOperand instead of addReg to keep the flags.
255 const MachineInstrBuilder MIB =
256 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
258 MIB.addImm(Cond[3].getImm());
263 unsigned AArch64InstrInfo::InsertBranch(
264 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
265 ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
266 // Shouldn't be a fall through.
267 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
270 if (Cond.empty()) // Unconditional branch?
271 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
273 instantiateCondBranch(MBB, DL, TBB, Cond);
277 // Two-way conditional branch.
278 instantiateCondBranch(MBB, DL, TBB, Cond);
279 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
283 // Find the original register that VReg is copied from.
284 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
285 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
286 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
287 if (!DefMI->isFullCopy())
289 VReg = DefMI->getOperand(1).getReg();
294 // Determine if VReg is defined by an instruction that can be folded into a
295 // csel instruction. If so, return the folded opcode, and the replacement
297 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
298 unsigned *NewVReg = nullptr) {
299 VReg = removeCopies(MRI, VReg);
300 if (!TargetRegisterInfo::isVirtualRegister(VReg))
303 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
304 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
306 unsigned SrcOpNum = 0;
307 switch (DefMI->getOpcode()) {
308 case AArch64::ADDSXri:
309 case AArch64::ADDSWri:
310 // if NZCV is used, do not fold.
311 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
313 // fall-through to ADDXri and ADDWri.
314 case AArch64::ADDXri:
315 case AArch64::ADDWri:
316 // add x, 1 -> csinc.
317 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
318 DefMI->getOperand(3).getImm() != 0)
321 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
324 case AArch64::ORNXrr:
325 case AArch64::ORNWrr: {
326 // not x -> csinv, represented as orn dst, xzr, src.
327 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
328 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
331 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
335 case AArch64::SUBSXrr:
336 case AArch64::SUBSWrr:
337 // if NZCV is used, do not fold.
338 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
340 // fall-through to SUBXrr and SUBWrr.
341 case AArch64::SUBXrr:
342 case AArch64::SUBWrr: {
343 // neg x -> csneg, represented as sub dst, xzr, src.
344 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
345 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
348 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
354 assert(Opc && SrcOpNum && "Missing parameters");
357 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
361 bool AArch64InstrInfo::canInsertSelect(
362 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
363 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
364 int &FalseCycles) const {
365 // Check register classes.
366 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
367 const TargetRegisterClass *RC =
368 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
372 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
373 unsigned ExtraCondLat = Cond.size() != 1;
375 // GPRs are handled by csel.
376 // FIXME: Fold in x+1, -x, and ~x when applicable.
377 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
378 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
379 // Single-cycle csel, csinc, csinv, and csneg.
380 CondCycles = 1 + ExtraCondLat;
381 TrueCycles = FalseCycles = 1;
382 if (canFoldIntoCSel(MRI, TrueReg))
384 else if (canFoldIntoCSel(MRI, FalseReg))
389 // Scalar floating point is handled by fcsel.
390 // FIXME: Form fabs, fmin, and fmax when applicable.
391 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
392 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
393 CondCycles = 5 + ExtraCondLat;
394 TrueCycles = FalseCycles = 2;
402 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
403 MachineBasicBlock::iterator I, DebugLoc DL,
405 ArrayRef<MachineOperand> Cond,
406 unsigned TrueReg, unsigned FalseReg) const {
407 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
409 // Parse the condition code, see parseCondBranch() above.
410 AArch64CC::CondCode CC;
411 switch (Cond.size()) {
413 llvm_unreachable("Unknown condition opcode in Cond");
415 CC = AArch64CC::CondCode(Cond[0].getImm());
417 case 3: { // cbz/cbnz
418 // We must insert a compare against 0.
420 switch (Cond[1].getImm()) {
422 llvm_unreachable("Unknown branch opcode in Cond");
440 unsigned SrcReg = Cond[2].getReg();
442 // cmp reg, #0 is actually subs xzr, reg, #0.
443 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
444 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
449 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
450 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
457 case 4: { // tbz/tbnz
458 // We must insert a tst instruction.
459 switch (Cond[1].getImm()) {
461 llvm_unreachable("Unknown branch opcode in Cond");
471 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
472 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
473 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
474 .addReg(Cond[2].getReg())
476 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
478 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
479 .addReg(Cond[2].getReg())
481 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
487 const TargetRegisterClass *RC = nullptr;
488 bool TryFold = false;
489 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
490 RC = &AArch64::GPR64RegClass;
491 Opc = AArch64::CSELXr;
493 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
494 RC = &AArch64::GPR32RegClass;
495 Opc = AArch64::CSELWr;
497 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
498 RC = &AArch64::FPR64RegClass;
499 Opc = AArch64::FCSELDrrr;
500 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
501 RC = &AArch64::FPR32RegClass;
502 Opc = AArch64::FCSELSrrr;
504 assert(RC && "Unsupported regclass");
506 // Try folding simple instructions into the csel.
508 unsigned NewVReg = 0;
509 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
511 // The folded opcodes csinc, csinc and csneg apply the operation to
512 // FalseReg, so we need to invert the condition.
513 CC = AArch64CC::getInvertedCondCode(CC);
516 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
518 // Fold the operation. Leave any dead instructions for DCE to clean up.
522 // The extends the live range of NewVReg.
523 MRI.clearKillFlags(NewVReg);
527 // Pull all virtual register into the appropriate class.
528 MRI.constrainRegClass(TrueReg, RC);
529 MRI.constrainRegClass(FalseReg, RC);
532 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
536 /// Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
537 static bool canBeExpandedToORR(const MachineInstr *MI, unsigned BitSize) {
538 uint64_t Imm = MI->getOperand(1).getImm();
539 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
541 return AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding);
544 // FIXME: this implementation should be micro-architecture dependent, so a
545 // micro-architecture target hook should be introduced here in future.
546 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
547 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
548 return MI->isAsCheapAsAMove();
550 switch (MI->getOpcode()) {
554 // add/sub on register without shift
555 case AArch64::ADDWri:
556 case AArch64::ADDXri:
557 case AArch64::SUBWri:
558 case AArch64::SUBXri:
559 return (MI->getOperand(3).getImm() == 0);
561 // logical ops on immediate
562 case AArch64::ANDWri:
563 case AArch64::ANDXri:
564 case AArch64::EORWri:
565 case AArch64::EORXri:
566 case AArch64::ORRWri:
567 case AArch64::ORRXri:
570 // logical ops on register without shift
571 case AArch64::ANDWrr:
572 case AArch64::ANDXrr:
573 case AArch64::BICWrr:
574 case AArch64::BICXrr:
575 case AArch64::EONWrr:
576 case AArch64::EONXrr:
577 case AArch64::EORWrr:
578 case AArch64::EORXrr:
579 case AArch64::ORNWrr:
580 case AArch64::ORNXrr:
581 case AArch64::ORRWrr:
582 case AArch64::ORRXrr:
584 // If MOVi32imm or MOVi64imm can be expanded into ORRWri or
585 // ORRXri, it is as cheap as MOV
586 case AArch64::MOVi32imm:
587 return canBeExpandedToORR(MI, 32);
588 case AArch64::MOVi64imm:
589 return canBeExpandedToORR(MI, 64);
592 llvm_unreachable("Unknown opcode to check as cheap as a move!");
595 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
596 unsigned &SrcReg, unsigned &DstReg,
597 unsigned &SubIdx) const {
598 switch (MI.getOpcode()) {
601 case AArch64::SBFMXri: // aka sxtw
602 case AArch64::UBFMXri: // aka uxtw
603 // Check for the 32 -> 64 bit extension case, these instructions can do
605 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
607 // This is a signed or unsigned 32 -> 64 bit extension.
608 SrcReg = MI.getOperand(1).getReg();
609 DstReg = MI.getOperand(0).getReg();
610 SubIdx = AArch64::sub_32;
616 AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
618 AliasAnalysis *AA) const {
619 const TargetRegisterInfo *TRI = &getRegisterInfo();
620 unsigned BaseRegA = 0, BaseRegB = 0;
621 int OffsetA = 0, OffsetB = 0;
622 int WidthA = 0, WidthB = 0;
624 assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
625 assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
627 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
628 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
631 // Retrieve the base register, offset from the base register and width. Width
632 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
633 // base registers are identical, and the offset of a lower memory access +
634 // the width doesn't overlap the offset of a higher memory access,
635 // then the memory accesses are different.
636 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
637 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
638 if (BaseRegA == BaseRegB) {
639 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
640 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
641 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
642 if (LowOffset + LowWidth <= HighOffset)
649 /// analyzeCompare - For a comparison instruction, return the source registers
650 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
651 /// Return true if the comparison instruction can be analyzed.
652 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
653 unsigned &SrcReg2, int &CmpMask,
654 int &CmpValue) const {
655 switch (MI->getOpcode()) {
658 case AArch64::SUBSWrr:
659 case AArch64::SUBSWrs:
660 case AArch64::SUBSWrx:
661 case AArch64::SUBSXrr:
662 case AArch64::SUBSXrs:
663 case AArch64::SUBSXrx:
664 case AArch64::ADDSWrr:
665 case AArch64::ADDSWrs:
666 case AArch64::ADDSWrx:
667 case AArch64::ADDSXrr:
668 case AArch64::ADDSXrs:
669 case AArch64::ADDSXrx:
670 // Replace SUBSWrr with SUBWrr if NZCV is not used.
671 SrcReg = MI->getOperand(1).getReg();
672 SrcReg2 = MI->getOperand(2).getReg();
676 case AArch64::SUBSWri:
677 case AArch64::ADDSWri:
678 case AArch64::SUBSXri:
679 case AArch64::ADDSXri:
680 SrcReg = MI->getOperand(1).getReg();
683 // FIXME: In order to convert CmpValue to 0 or 1
684 CmpValue = (MI->getOperand(2).getImm() != 0);
686 case AArch64::ANDSWri:
687 case AArch64::ANDSXri:
688 // ANDS does not use the same encoding scheme as the others xxxS
690 SrcReg = MI->getOperand(1).getReg();
693 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
694 // while the type of CmpValue is int. When converting uint64_t to int,
695 // the high 32 bits of uint64_t will be lost.
696 // In fact it causes a bug in spec2006-483.xalancbmk
697 // CmpValue is only used to compare with zero in OptimizeCompareInstr
698 CmpValue = (AArch64_AM::decodeLogicalImmediate(
699 MI->getOperand(2).getImm(),
700 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
707 static bool UpdateOperandRegClass(MachineInstr *Instr) {
708 MachineBasicBlock *MBB = Instr->getParent();
709 assert(MBB && "Can't get MachineBasicBlock here");
710 MachineFunction *MF = MBB->getParent();
711 assert(MF && "Can't get MachineFunction here");
712 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
713 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
714 MachineRegisterInfo *MRI = &MF->getRegInfo();
716 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
718 MachineOperand &MO = Instr->getOperand(OpIdx);
719 const TargetRegisterClass *OpRegCstraints =
720 Instr->getRegClassConstraint(OpIdx, TII, TRI);
722 // If there's no constraint, there's nothing to do.
725 // If the operand is a frame index, there's nothing to do here.
726 // A frame index operand will resolve correctly during PEI.
731 "Operand has register constraints without being a register!");
733 unsigned Reg = MO.getReg();
734 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
735 if (!OpRegCstraints->contains(Reg))
737 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
738 !MRI->constrainRegClass(Reg, OpRegCstraints))
745 /// \brief Return the opcode that does not set flags when possible - otherwise
746 /// return the original opcode. The caller is responsible to do the actual
747 /// substitution and legality checking.
748 static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
749 // Don't convert all compare instructions, because for some the zero register
750 // encoding becomes the sp register.
751 bool MIDefinesZeroReg = false;
752 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
753 MIDefinesZeroReg = true;
755 switch (MI->getOpcode()) {
757 return MI->getOpcode();
758 case AArch64::ADDSWrr:
759 return AArch64::ADDWrr;
760 case AArch64::ADDSWri:
761 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
762 case AArch64::ADDSWrs:
763 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
764 case AArch64::ADDSWrx:
765 return AArch64::ADDWrx;
766 case AArch64::ADDSXrr:
767 return AArch64::ADDXrr;
768 case AArch64::ADDSXri:
769 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
770 case AArch64::ADDSXrs:
771 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
772 case AArch64::ADDSXrx:
773 return AArch64::ADDXrx;
774 case AArch64::SUBSWrr:
775 return AArch64::SUBWrr;
776 case AArch64::SUBSWri:
777 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
778 case AArch64::SUBSWrs:
779 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
780 case AArch64::SUBSWrx:
781 return AArch64::SUBWrx;
782 case AArch64::SUBSXrr:
783 return AArch64::SUBXrr;
784 case AArch64::SUBSXri:
785 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
786 case AArch64::SUBSXrs:
787 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
788 case AArch64::SUBSXrx:
789 return AArch64::SUBXrx;
793 /// True when condition code could be modified on the instruction
794 /// trace starting at from and ending at to.
795 static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
796 const bool CheckOnlyCCWrites,
797 const TargetRegisterInfo *TRI) {
798 // We iterate backward starting \p To until we hit \p From
799 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
801 // Early exit if To is at the beginning of the BB.
805 // Check whether the definition of SrcReg is in the same basic block as
806 // Compare. If not, assume the condition code gets modified on some path.
807 if (To->getParent() != From->getParent())
810 // Check that NZCV isn't set on the trace.
811 for (--I; I != E; --I) {
812 const MachineInstr &Instr = *I;
814 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
815 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
816 // This instruction modifies or uses NZCV after the one we want to
820 // We currently don't allow the instruction trace to cross basic
826 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
827 /// comparison into one that sets the zero bit in the flags register.
828 bool AArch64InstrInfo::optimizeCompareInstr(
829 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
830 int CmpValue, const MachineRegisterInfo *MRI) const {
832 // Replace SUBSWrr with SUBWrr if NZCV is not used.
833 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
834 if (Cmp_NZCV != -1) {
835 if (CmpInstr->definesRegister(AArch64::WZR) ||
836 CmpInstr->definesRegister(AArch64::XZR)) {
837 CmpInstr->eraseFromParent();
840 unsigned Opc = CmpInstr->getOpcode();
841 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
844 const MCInstrDesc &MCID = get(NewOpc);
845 CmpInstr->setDesc(MCID);
846 CmpInstr->RemoveOperand(Cmp_NZCV);
847 bool succeeded = UpdateOperandRegClass(CmpInstr);
849 assert(succeeded && "Some operands reg class are incompatible!");
853 // Continue only if we have a "ri" where immediate is zero.
854 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
856 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
857 if (CmpValue != 0 || SrcReg2 != 0)
860 // CmpInstr is a Compare instruction if destination register is not used.
861 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
864 // Get the unique definition of SrcReg.
865 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
869 bool CheckOnlyCCWrites = false;
870 const TargetRegisterInfo *TRI = &getRegisterInfo();
871 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
874 unsigned NewOpc = MI->getOpcode();
875 switch (MI->getOpcode()) {
878 case AArch64::ADDSWrr:
879 case AArch64::ADDSWri:
880 case AArch64::ADDSXrr:
881 case AArch64::ADDSXri:
882 case AArch64::SUBSWrr:
883 case AArch64::SUBSWri:
884 case AArch64::SUBSXrr:
885 case AArch64::SUBSXri:
887 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
888 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
889 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
890 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
891 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
892 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
893 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
894 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
895 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
896 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
897 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
898 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
899 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
900 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
903 // Scan forward for the use of NZCV.
904 // When checking against MI: if it's a conditional code requires
905 // checking of V bit, then this is not safe to do.
906 // It is safe to remove CmpInstr if NZCV is redefined or killed.
907 // If we are done with the basic block, we need to check whether NZCV is
910 for (MachineBasicBlock::iterator I = CmpInstr,
911 E = CmpInstr->getParent()->end();
912 !IsSafe && ++I != E;) {
913 const MachineInstr &Instr = *I;
914 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
916 const MachineOperand &MO = Instr.getOperand(IO);
917 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
921 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
928 // Decode the condition code.
929 unsigned Opc = Instr.getOpcode();
930 AArch64CC::CondCode CC;
935 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
937 case AArch64::CSINVWr:
938 case AArch64::CSINVXr:
939 case AArch64::CSINCWr:
940 case AArch64::CSINCXr:
941 case AArch64::CSELWr:
942 case AArch64::CSELXr:
943 case AArch64::CSNEGWr:
944 case AArch64::CSNEGXr:
945 case AArch64::FCSELSrrr:
946 case AArch64::FCSELDrrr:
947 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
951 // It is not safe to remove Compare instruction if Overflow(V) is used.
954 // NZCV can be used multiple times, we should continue.
967 // If NZCV is not killed nor re-defined, we should check whether it is
968 // live-out. If it is live-out, do not optimize.
970 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
971 for (auto *MBB : ParentBlock->successors())
972 if (MBB->isLiveIn(AArch64::NZCV))
976 // Update the instruction to set NZCV.
977 MI->setDesc(get(NewOpc));
978 CmpInstr->eraseFromParent();
979 bool succeeded = UpdateOperandRegClass(MI);
981 assert(succeeded && "Some operands reg class are incompatible!");
982 MI->addRegisterDefined(AArch64::NZCV, TRI);
987 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
988 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
991 MachineBasicBlock &MBB = *MI->getParent();
992 DebugLoc DL = MI->getDebugLoc();
993 unsigned Reg = MI->getOperand(0).getReg();
994 const GlobalValue *GV =
995 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
996 const TargetMachine &TM = MBB.getParent()->getTarget();
997 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
998 const unsigned char MO_NC = AArch64II::MO_NC;
1000 if ((OpFlags & AArch64II::MO_GOT) != 0) {
1001 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
1002 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
1003 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1004 .addReg(Reg, RegState::Kill).addImm(0)
1005 .addMemOperand(*MI->memoperands_begin());
1006 } else if (TM.getCodeModel() == CodeModel::Large) {
1007 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
1008 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
1009 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1010 .addReg(Reg, RegState::Kill)
1011 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
1012 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1013 .addReg(Reg, RegState::Kill)
1014 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1015 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1016 .addReg(Reg, RegState::Kill)
1017 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1018 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1019 .addReg(Reg, RegState::Kill).addImm(0)
1020 .addMemOperand(*MI->memoperands_begin());
1022 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1023 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1024 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1025 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1026 .addReg(Reg, RegState::Kill)
1027 .addGlobalAddress(GV, 0, LoFlags)
1028 .addMemOperand(*MI->memoperands_begin());
1036 /// Return true if this is this instruction has a non-zero immediate
1037 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1038 switch (MI->getOpcode()) {
1041 case AArch64::ADDSWrs:
1042 case AArch64::ADDSXrs:
1043 case AArch64::ADDWrs:
1044 case AArch64::ADDXrs:
1045 case AArch64::ANDSWrs:
1046 case AArch64::ANDSXrs:
1047 case AArch64::ANDWrs:
1048 case AArch64::ANDXrs:
1049 case AArch64::BICSWrs:
1050 case AArch64::BICSXrs:
1051 case AArch64::BICWrs:
1052 case AArch64::BICXrs:
1053 case AArch64::CRC32Brr:
1054 case AArch64::CRC32CBrr:
1055 case AArch64::CRC32CHrr:
1056 case AArch64::CRC32CWrr:
1057 case AArch64::CRC32CXrr:
1058 case AArch64::CRC32Hrr:
1059 case AArch64::CRC32Wrr:
1060 case AArch64::CRC32Xrr:
1061 case AArch64::EONWrs:
1062 case AArch64::EONXrs:
1063 case AArch64::EORWrs:
1064 case AArch64::EORXrs:
1065 case AArch64::ORNWrs:
1066 case AArch64::ORNXrs:
1067 case AArch64::ORRWrs:
1068 case AArch64::ORRXrs:
1069 case AArch64::SUBSWrs:
1070 case AArch64::SUBSXrs:
1071 case AArch64::SUBWrs:
1072 case AArch64::SUBXrs:
1073 if (MI->getOperand(3).isImm()) {
1074 unsigned val = MI->getOperand(3).getImm();
1082 /// Return true if this is this instruction has a non-zero immediate
1083 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1084 switch (MI->getOpcode()) {
1087 case AArch64::ADDSWrx:
1088 case AArch64::ADDSXrx:
1089 case AArch64::ADDSXrx64:
1090 case AArch64::ADDWrx:
1091 case AArch64::ADDXrx:
1092 case AArch64::ADDXrx64:
1093 case AArch64::SUBSWrx:
1094 case AArch64::SUBSXrx:
1095 case AArch64::SUBSXrx64:
1096 case AArch64::SUBWrx:
1097 case AArch64::SUBXrx:
1098 case AArch64::SUBXrx64:
1099 if (MI->getOperand(3).isImm()) {
1100 unsigned val = MI->getOperand(3).getImm();
1109 // Return true if this instruction simply sets its single destination register
1110 // to zero. This is equivalent to a register rename of the zero-register.
1111 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1112 switch (MI->getOpcode()) {
1115 case AArch64::MOVZWi:
1116 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1117 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1118 assert(MI->getDesc().getNumOperands() == 3 &&
1119 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1123 case AArch64::ANDWri: // and Rd, Rzr, #imm
1124 return MI->getOperand(1).getReg() == AArch64::WZR;
1125 case AArch64::ANDXri:
1126 return MI->getOperand(1).getReg() == AArch64::XZR;
1127 case TargetOpcode::COPY:
1128 return MI->getOperand(1).getReg() == AArch64::WZR;
1133 // Return true if this instruction simply renames a general register without
1135 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1136 switch (MI->getOpcode()) {
1139 case TargetOpcode::COPY: {
1140 // GPR32 copies will by lowered to ORRXrs
1141 unsigned DstReg = MI->getOperand(0).getReg();
1142 return (AArch64::GPR32RegClass.contains(DstReg) ||
1143 AArch64::GPR64RegClass.contains(DstReg));
1145 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1146 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1147 assert(MI->getDesc().getNumOperands() == 4 &&
1148 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1152 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1153 if (MI->getOperand(2).getImm() == 0) {
1154 assert(MI->getDesc().getNumOperands() == 4 &&
1155 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1163 // Return true if this instruction simply renames a general register without
1165 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1166 switch (MI->getOpcode()) {
1169 case TargetOpcode::COPY: {
1170 // FPR64 copies will by lowered to ORR.16b
1171 unsigned DstReg = MI->getOperand(0).getReg();
1172 return (AArch64::FPR64RegClass.contains(DstReg) ||
1173 AArch64::FPR128RegClass.contains(DstReg));
1175 case AArch64::ORRv16i8:
1176 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1177 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1178 "invalid ORRv16i8 operands");
1186 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1187 int &FrameIndex) const {
1188 switch (MI->getOpcode()) {
1191 case AArch64::LDRWui:
1192 case AArch64::LDRXui:
1193 case AArch64::LDRBui:
1194 case AArch64::LDRHui:
1195 case AArch64::LDRSui:
1196 case AArch64::LDRDui:
1197 case AArch64::LDRQui:
1198 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1199 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1200 FrameIndex = MI->getOperand(1).getIndex();
1201 return MI->getOperand(0).getReg();
1209 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1210 int &FrameIndex) const {
1211 switch (MI->getOpcode()) {
1214 case AArch64::STRWui:
1215 case AArch64::STRXui:
1216 case AArch64::STRBui:
1217 case AArch64::STRHui:
1218 case AArch64::STRSui:
1219 case AArch64::STRDui:
1220 case AArch64::STRQui:
1221 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1222 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1223 FrameIndex = MI->getOperand(1).getIndex();
1224 return MI->getOperand(0).getReg();
1231 /// Return true if this is load/store scales or extends its register offset.
1232 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1233 /// MI should be a memory op that allows scaled addressing.
1234 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1235 switch (MI->getOpcode()) {
1238 case AArch64::LDRBBroW:
1239 case AArch64::LDRBroW:
1240 case AArch64::LDRDroW:
1241 case AArch64::LDRHHroW:
1242 case AArch64::LDRHroW:
1243 case AArch64::LDRQroW:
1244 case AArch64::LDRSBWroW:
1245 case AArch64::LDRSBXroW:
1246 case AArch64::LDRSHWroW:
1247 case AArch64::LDRSHXroW:
1248 case AArch64::LDRSWroW:
1249 case AArch64::LDRSroW:
1250 case AArch64::LDRWroW:
1251 case AArch64::LDRXroW:
1252 case AArch64::STRBBroW:
1253 case AArch64::STRBroW:
1254 case AArch64::STRDroW:
1255 case AArch64::STRHHroW:
1256 case AArch64::STRHroW:
1257 case AArch64::STRQroW:
1258 case AArch64::STRSroW:
1259 case AArch64::STRWroW:
1260 case AArch64::STRXroW:
1261 case AArch64::LDRBBroX:
1262 case AArch64::LDRBroX:
1263 case AArch64::LDRDroX:
1264 case AArch64::LDRHHroX:
1265 case AArch64::LDRHroX:
1266 case AArch64::LDRQroX:
1267 case AArch64::LDRSBWroX:
1268 case AArch64::LDRSBXroX:
1269 case AArch64::LDRSHWroX:
1270 case AArch64::LDRSHXroX:
1271 case AArch64::LDRSWroX:
1272 case AArch64::LDRSroX:
1273 case AArch64::LDRWroX:
1274 case AArch64::LDRXroX:
1275 case AArch64::STRBBroX:
1276 case AArch64::STRBroX:
1277 case AArch64::STRDroX:
1278 case AArch64::STRHHroX:
1279 case AArch64::STRHroX:
1280 case AArch64::STRQroX:
1281 case AArch64::STRSroX:
1282 case AArch64::STRWroX:
1283 case AArch64::STRXroX:
1285 unsigned Val = MI->getOperand(3).getImm();
1286 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1287 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1292 /// Check all MachineMemOperands for a hint to suppress pairing.
1293 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1294 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1295 "Too many target MO flags");
1296 for (auto *MM : MI->memoperands()) {
1297 if (MM->getFlags() &
1298 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1305 /// Set a flag on the first MachineMemOperand to suppress pairing.
1306 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1307 if (MI->memoperands_empty())
1310 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1311 "Too many target MO flags");
1312 (*MI->memoperands_begin())
1313 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1317 AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1319 const TargetRegisterInfo *TRI) const {
1320 switch (LdSt->getOpcode()) {
1323 case AArch64::STRSui:
1324 case AArch64::STRDui:
1325 case AArch64::STRQui:
1326 case AArch64::STRXui:
1327 case AArch64::STRWui:
1328 case AArch64::LDRSui:
1329 case AArch64::LDRDui:
1330 case AArch64::LDRQui:
1331 case AArch64::LDRXui:
1332 case AArch64::LDRWui:
1333 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1335 BaseReg = LdSt->getOperand(1).getReg();
1336 MachineFunction &MF = *LdSt->getParent()->getParent();
1337 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1338 Offset = LdSt->getOperand(2).getImm() * Width;
1343 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1344 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1345 const TargetRegisterInfo *TRI) const {
1346 // Handle only loads/stores with base register followed by immediate offset.
1347 if (LdSt->getNumOperands() != 3)
1349 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1352 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1353 // Unscaled instructions have scaling factor set to 1.
1355 switch (LdSt->getOpcode()) {
1358 case AArch64::LDURQi:
1359 case AArch64::STURQi:
1363 case AArch64::LDURXi:
1364 case AArch64::LDURDi:
1365 case AArch64::STURXi:
1366 case AArch64::STURDi:
1370 case AArch64::LDURWi:
1371 case AArch64::LDURSi:
1372 case AArch64::LDURSWi:
1373 case AArch64::STURWi:
1374 case AArch64::STURSi:
1378 case AArch64::LDURHi:
1379 case AArch64::LDURHHi:
1380 case AArch64::LDURSHXi:
1381 case AArch64::LDURSHWi:
1382 case AArch64::STURHi:
1383 case AArch64::STURHHi:
1387 case AArch64::LDURBi:
1388 case AArch64::LDURBBi:
1389 case AArch64::LDURSBXi:
1390 case AArch64::LDURSBWi:
1391 case AArch64::STURBi:
1392 case AArch64::STURBBi:
1396 case AArch64::LDRXui:
1397 case AArch64::STRXui:
1400 case AArch64::LDRWui:
1401 case AArch64::STRWui:
1404 case AArch64::LDRBui:
1405 case AArch64::STRBui:
1408 case AArch64::LDRHui:
1409 case AArch64::STRHui:
1412 case AArch64::LDRSui:
1413 case AArch64::STRSui:
1416 case AArch64::LDRDui:
1417 case AArch64::STRDui:
1420 case AArch64::LDRQui:
1421 case AArch64::STRQui:
1424 case AArch64::LDRBBui:
1425 case AArch64::STRBBui:
1428 case AArch64::LDRHHui:
1429 case AArch64::STRHHui:
1434 BaseReg = LdSt->getOperand(1).getReg();
1435 Offset = LdSt->getOperand(2).getImm() * Scale;
1439 /// Detect opportunities for ldp/stp formation.
1441 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
1442 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1443 MachineInstr *SecondLdSt,
1444 unsigned NumLoads) const {
1445 // Only cluster up to a single pair.
1448 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1450 // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
1451 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1452 // Allow 6 bits of positive range.
1455 // The caller should already have ordered First/SecondLdSt by offset.
1456 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1457 return Ofs1 + 1 == Ofs2;
1460 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1461 MachineInstr *Second) const {
1462 if (Subtarget.isCyclone()) {
1463 // Cyclone can fuse CMN, CMP, TST followed by Bcc.
1464 unsigned SecondOpcode = Second->getOpcode();
1465 if (SecondOpcode == AArch64::Bcc) {
1466 switch (First->getOpcode()) {
1469 case AArch64::SUBSWri:
1470 case AArch64::ADDSWri:
1471 case AArch64::ANDSWri:
1472 case AArch64::SUBSXri:
1473 case AArch64::ADDSXri:
1474 case AArch64::ANDSXri:
1478 // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
1479 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1480 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1481 switch (First->getOpcode()) {
1484 case AArch64::ADDWri:
1485 case AArch64::ADDXri:
1486 case AArch64::ANDWri:
1487 case AArch64::ANDXri:
1488 case AArch64::EORWri:
1489 case AArch64::EORXri:
1490 case AArch64::ORRWri:
1491 case AArch64::ORRXri:
1492 case AArch64::SUBWri:
1493 case AArch64::SUBXri:
1501 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1502 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1503 const MDNode *Expr, DebugLoc DL) const {
1504 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1505 .addFrameIndex(FrameIx)
1513 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1514 unsigned Reg, unsigned SubIdx,
1516 const TargetRegisterInfo *TRI) {
1518 return MIB.addReg(Reg, State);
1520 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1521 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1522 return MIB.addReg(Reg, State, SubIdx);
1525 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1527 // We really want the positive remainder mod 32 here, that happens to be
1528 // easily obtainable with a mask.
1529 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1532 void AArch64InstrInfo::copyPhysRegTuple(
1533 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1534 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1535 llvm::ArrayRef<unsigned> Indices) const {
1536 assert(Subtarget.hasNEON() &&
1537 "Unexpected register copy without NEON");
1538 const TargetRegisterInfo *TRI = &getRegisterInfo();
1539 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1540 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1541 unsigned NumRegs = Indices.size();
1543 int SubReg = 0, End = NumRegs, Incr = 1;
1544 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1545 SubReg = NumRegs - 1;
1550 for (; SubReg != End; SubReg += Incr) {
1551 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
1552 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1553 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1554 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1558 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1559 MachineBasicBlock::iterator I, DebugLoc DL,
1560 unsigned DestReg, unsigned SrcReg,
1561 bool KillSrc) const {
1562 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1563 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1564 const TargetRegisterInfo *TRI = &getRegisterInfo();
1566 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1567 // If either operand is WSP, expand to ADD #0.
1568 if (Subtarget.hasZeroCycleRegMove()) {
1569 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1570 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1571 &AArch64::GPR64spRegClass);
1572 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1573 &AArch64::GPR64spRegClass);
1574 // This instruction is reading and writing X registers. This may upset
1575 // the register scavenger and machine verifier, so we need to indicate
1576 // that we are reading an undefined value from SrcRegX, but a proper
1577 // value from SrcReg.
1578 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1579 .addReg(SrcRegX, RegState::Undef)
1581 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1582 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1584 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1585 .addReg(SrcReg, getKillRegState(KillSrc))
1587 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1589 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1590 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1591 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1593 if (Subtarget.hasZeroCycleRegMove()) {
1594 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1595 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1596 &AArch64::GPR64spRegClass);
1597 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1598 &AArch64::GPR64spRegClass);
1599 // This instruction is reading and writing X registers. This may upset
1600 // the register scavenger and machine verifier, so we need to indicate
1601 // that we are reading an undefined value from SrcRegX, but a proper
1602 // value from SrcReg.
1603 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1604 .addReg(AArch64::XZR)
1605 .addReg(SrcRegX, RegState::Undef)
1606 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1608 // Otherwise, expand to ORR WZR.
1609 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1610 .addReg(AArch64::WZR)
1611 .addReg(SrcReg, getKillRegState(KillSrc));
1617 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1618 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1619 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1620 // If either operand is SP, expand to ADD #0.
1621 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1622 .addReg(SrcReg, getKillRegState(KillSrc))
1624 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1625 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1626 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1627 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1629 // Otherwise, expand to ORR XZR.
1630 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1631 .addReg(AArch64::XZR)
1632 .addReg(SrcReg, getKillRegState(KillSrc));
1637 // Copy a DDDD register quad by copying the individual sub-registers.
1638 if (AArch64::DDDDRegClass.contains(DestReg) &&
1639 AArch64::DDDDRegClass.contains(SrcReg)) {
1640 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1641 AArch64::dsub2, AArch64::dsub3 };
1642 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1647 // Copy a DDD register triple by copying the individual sub-registers.
1648 if (AArch64::DDDRegClass.contains(DestReg) &&
1649 AArch64::DDDRegClass.contains(SrcReg)) {
1650 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1652 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1657 // Copy a DD register pair by copying the individual sub-registers.
1658 if (AArch64::DDRegClass.contains(DestReg) &&
1659 AArch64::DDRegClass.contains(SrcReg)) {
1660 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1661 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1666 // Copy a QQQQ register quad by copying the individual sub-registers.
1667 if (AArch64::QQQQRegClass.contains(DestReg) &&
1668 AArch64::QQQQRegClass.contains(SrcReg)) {
1669 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1670 AArch64::qsub2, AArch64::qsub3 };
1671 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1676 // Copy a QQQ register triple by copying the individual sub-registers.
1677 if (AArch64::QQQRegClass.contains(DestReg) &&
1678 AArch64::QQQRegClass.contains(SrcReg)) {
1679 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1681 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1686 // Copy a QQ register pair by copying the individual sub-registers.
1687 if (AArch64::QQRegClass.contains(DestReg) &&
1688 AArch64::QQRegClass.contains(SrcReg)) {
1689 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1690 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1695 if (AArch64::FPR128RegClass.contains(DestReg) &&
1696 AArch64::FPR128RegClass.contains(SrcReg)) {
1697 if(Subtarget.hasNEON()) {
1698 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1700 .addReg(SrcReg, getKillRegState(KillSrc));
1702 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1703 .addReg(AArch64::SP, RegState::Define)
1704 .addReg(SrcReg, getKillRegState(KillSrc))
1705 .addReg(AArch64::SP)
1707 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1708 .addReg(AArch64::SP, RegState::Define)
1709 .addReg(DestReg, RegState::Define)
1710 .addReg(AArch64::SP)
1716 if (AArch64::FPR64RegClass.contains(DestReg) &&
1717 AArch64::FPR64RegClass.contains(SrcReg)) {
1718 if(Subtarget.hasNEON()) {
1719 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1720 &AArch64::FPR128RegClass);
1721 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1722 &AArch64::FPR128RegClass);
1723 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1725 .addReg(SrcReg, getKillRegState(KillSrc));
1727 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1728 .addReg(SrcReg, getKillRegState(KillSrc));
1733 if (AArch64::FPR32RegClass.contains(DestReg) &&
1734 AArch64::FPR32RegClass.contains(SrcReg)) {
1735 if(Subtarget.hasNEON()) {
1736 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1737 &AArch64::FPR128RegClass);
1738 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1739 &AArch64::FPR128RegClass);
1740 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1742 .addReg(SrcReg, getKillRegState(KillSrc));
1744 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1745 .addReg(SrcReg, getKillRegState(KillSrc));
1750 if (AArch64::FPR16RegClass.contains(DestReg) &&
1751 AArch64::FPR16RegClass.contains(SrcReg)) {
1752 if(Subtarget.hasNEON()) {
1753 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1754 &AArch64::FPR128RegClass);
1755 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1756 &AArch64::FPR128RegClass);
1757 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1759 .addReg(SrcReg, getKillRegState(KillSrc));
1761 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1762 &AArch64::FPR32RegClass);
1763 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1764 &AArch64::FPR32RegClass);
1765 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1766 .addReg(SrcReg, getKillRegState(KillSrc));
1771 if (AArch64::FPR8RegClass.contains(DestReg) &&
1772 AArch64::FPR8RegClass.contains(SrcReg)) {
1773 if(Subtarget.hasNEON()) {
1774 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1775 &AArch64::FPR128RegClass);
1776 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1777 &AArch64::FPR128RegClass);
1778 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1780 .addReg(SrcReg, getKillRegState(KillSrc));
1782 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1783 &AArch64::FPR32RegClass);
1784 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1785 &AArch64::FPR32RegClass);
1786 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1787 .addReg(SrcReg, getKillRegState(KillSrc));
1792 // Copies between GPR64 and FPR64.
1793 if (AArch64::FPR64RegClass.contains(DestReg) &&
1794 AArch64::GPR64RegClass.contains(SrcReg)) {
1795 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1796 .addReg(SrcReg, getKillRegState(KillSrc));
1799 if (AArch64::GPR64RegClass.contains(DestReg) &&
1800 AArch64::FPR64RegClass.contains(SrcReg)) {
1801 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1802 .addReg(SrcReg, getKillRegState(KillSrc));
1805 // Copies between GPR32 and FPR32.
1806 if (AArch64::FPR32RegClass.contains(DestReg) &&
1807 AArch64::GPR32RegClass.contains(SrcReg)) {
1808 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1809 .addReg(SrcReg, getKillRegState(KillSrc));
1812 if (AArch64::GPR32RegClass.contains(DestReg) &&
1813 AArch64::FPR32RegClass.contains(SrcReg)) {
1814 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1815 .addReg(SrcReg, getKillRegState(KillSrc));
1819 if (DestReg == AArch64::NZCV) {
1820 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1821 BuildMI(MBB, I, DL, get(AArch64::MSR))
1822 .addImm(AArch64SysReg::NZCV)
1823 .addReg(SrcReg, getKillRegState(KillSrc))
1824 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1828 if (SrcReg == AArch64::NZCV) {
1829 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1830 BuildMI(MBB, I, DL, get(AArch64::MRS))
1832 .addImm(AArch64SysReg::NZCV)
1833 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1837 llvm_unreachable("unimplemented reg-to-reg copy");
1840 void AArch64InstrInfo::storeRegToStackSlot(
1841 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1842 bool isKill, int FI, const TargetRegisterClass *RC,
1843 const TargetRegisterInfo *TRI) const {
1845 if (MBBI != MBB.end())
1846 DL = MBBI->getDebugLoc();
1847 MachineFunction &MF = *MBB.getParent();
1848 MachineFrameInfo &MFI = *MF.getFrameInfo();
1849 unsigned Align = MFI.getObjectAlignment(FI);
1851 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1852 MachineMemOperand *MMO = MF.getMachineMemOperand(
1853 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1856 switch (RC->getSize()) {
1858 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1859 Opc = AArch64::STRBui;
1862 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1863 Opc = AArch64::STRHui;
1866 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1867 Opc = AArch64::STRWui;
1868 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1869 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1871 assert(SrcReg != AArch64::WSP);
1872 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1873 Opc = AArch64::STRSui;
1876 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1877 Opc = AArch64::STRXui;
1878 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1879 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1881 assert(SrcReg != AArch64::SP);
1882 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1883 Opc = AArch64::STRDui;
1886 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1887 Opc = AArch64::STRQui;
1888 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1889 assert(Subtarget.hasNEON() &&
1890 "Unexpected register store without NEON");
1891 Opc = AArch64::ST1Twov1d, Offset = false;
1895 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1896 assert(Subtarget.hasNEON() &&
1897 "Unexpected register store without NEON");
1898 Opc = AArch64::ST1Threev1d, Offset = false;
1902 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1903 assert(Subtarget.hasNEON() &&
1904 "Unexpected register store without NEON");
1905 Opc = AArch64::ST1Fourv1d, Offset = false;
1906 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1907 assert(Subtarget.hasNEON() &&
1908 "Unexpected register store without NEON");
1909 Opc = AArch64::ST1Twov2d, Offset = false;
1913 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1914 assert(Subtarget.hasNEON() &&
1915 "Unexpected register store without NEON");
1916 Opc = AArch64::ST1Threev2d, Offset = false;
1920 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1921 assert(Subtarget.hasNEON() &&
1922 "Unexpected register store without NEON");
1923 Opc = AArch64::ST1Fourv2d, Offset = false;
1927 assert(Opc && "Unknown register class");
1929 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
1930 .addReg(SrcReg, getKillRegState(isKill))
1935 MI.addMemOperand(MMO);
1938 void AArch64InstrInfo::loadRegFromStackSlot(
1939 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1940 int FI, const TargetRegisterClass *RC,
1941 const TargetRegisterInfo *TRI) const {
1943 if (MBBI != MBB.end())
1944 DL = MBBI->getDebugLoc();
1945 MachineFunction &MF = *MBB.getParent();
1946 MachineFrameInfo &MFI = *MF.getFrameInfo();
1947 unsigned Align = MFI.getObjectAlignment(FI);
1948 MachinePointerInfo PtrInfo = MachinePointerInfo::getFixedStack(MF, FI);
1949 MachineMemOperand *MMO = MF.getMachineMemOperand(
1950 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1954 switch (RC->getSize()) {
1956 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1957 Opc = AArch64::LDRBui;
1960 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1961 Opc = AArch64::LDRHui;
1964 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1965 Opc = AArch64::LDRWui;
1966 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1967 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1969 assert(DestReg != AArch64::WSP);
1970 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1971 Opc = AArch64::LDRSui;
1974 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1975 Opc = AArch64::LDRXui;
1976 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1977 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1979 assert(DestReg != AArch64::SP);
1980 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1981 Opc = AArch64::LDRDui;
1984 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1985 Opc = AArch64::LDRQui;
1986 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1987 assert(Subtarget.hasNEON() &&
1988 "Unexpected register load without NEON");
1989 Opc = AArch64::LD1Twov1d, Offset = false;
1993 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1994 assert(Subtarget.hasNEON() &&
1995 "Unexpected register load without NEON");
1996 Opc = AArch64::LD1Threev1d, Offset = false;
2000 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2001 assert(Subtarget.hasNEON() &&
2002 "Unexpected register load without NEON");
2003 Opc = AArch64::LD1Fourv1d, Offset = false;
2004 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2005 assert(Subtarget.hasNEON() &&
2006 "Unexpected register load without NEON");
2007 Opc = AArch64::LD1Twov2d, Offset = false;
2011 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2012 assert(Subtarget.hasNEON() &&
2013 "Unexpected register load without NEON");
2014 Opc = AArch64::LD1Threev2d, Offset = false;
2018 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2019 assert(Subtarget.hasNEON() &&
2020 "Unexpected register load without NEON");
2021 Opc = AArch64::LD1Fourv2d, Offset = false;
2025 assert(Opc && "Unknown register class");
2027 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2028 .addReg(DestReg, getDefRegState(true))
2032 MI.addMemOperand(MMO);
2035 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2036 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2037 unsigned DestReg, unsigned SrcReg, int Offset,
2038 const TargetInstrInfo *TII,
2039 MachineInstr::MIFlag Flag, bool SetNZCV) {
2040 if (DestReg == SrcReg && Offset == 0)
2043 bool isSub = Offset < 0;
2047 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2048 // scratch register. If DestReg is a virtual register, use it as the
2049 // scratch register; otherwise, create a new virtual register (to be
2050 // replaced by the scavenger at the end of PEI). That case can be optimized
2051 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2052 // register can be loaded with offset%8 and the add/sub can use an extending
2053 // instruction with LSL#3.
2054 // Currently the function handles any offsets but generates a poor sequence
2056 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2060 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2062 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2063 const unsigned MaxEncoding = 0xfff;
2064 const unsigned ShiftSize = 12;
2065 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2066 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2068 if (((unsigned)Offset) > MaxEncodableValue) {
2069 ThisVal = MaxEncodableValue;
2071 ThisVal = Offset & MaxEncodableValue;
2073 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2074 "Encoding cannot handle value that big");
2075 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2077 .addImm(ThisVal >> ShiftSize)
2078 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2086 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2089 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2093 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2094 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
2095 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
2096 // This is a bit of a hack. Consider this instruction:
2098 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2100 // We explicitly chose GPR64all for the virtual register so such a copy might
2101 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2102 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2103 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2105 // To prevent that, we are going to constrain the %vreg0 register class here.
2107 // <rdar://problem/11522048>
2110 unsigned DstReg = MI->getOperand(0).getReg();
2111 unsigned SrcReg = MI->getOperand(1).getReg();
2112 if (SrcReg == AArch64::SP &&
2113 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2114 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2117 if (DstReg == AArch64::SP &&
2118 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2119 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2128 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2129 bool *OutUseUnscaledOp,
2130 unsigned *OutUnscaledOp,
2131 int *EmittableOffset) {
2133 bool IsSigned = false;
2134 // The ImmIdx should be changed case by case if it is not 2.
2135 unsigned ImmIdx = 2;
2136 unsigned UnscaledOp = 0;
2137 // Set output values in case of early exit.
2138 if (EmittableOffset)
2139 *EmittableOffset = 0;
2140 if (OutUseUnscaledOp)
2141 *OutUseUnscaledOp = false;
2144 switch (MI.getOpcode()) {
2146 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2147 // Vector spills/fills can't take an immediate offset.
2148 case AArch64::LD1Twov2d:
2149 case AArch64::LD1Threev2d:
2150 case AArch64::LD1Fourv2d:
2151 case AArch64::LD1Twov1d:
2152 case AArch64::LD1Threev1d:
2153 case AArch64::LD1Fourv1d:
2154 case AArch64::ST1Twov2d:
2155 case AArch64::ST1Threev2d:
2156 case AArch64::ST1Fourv2d:
2157 case AArch64::ST1Twov1d:
2158 case AArch64::ST1Threev1d:
2159 case AArch64::ST1Fourv1d:
2160 return AArch64FrameOffsetCannotUpdate;
2161 case AArch64::PRFMui:
2163 UnscaledOp = AArch64::PRFUMi;
2165 case AArch64::LDRXui:
2167 UnscaledOp = AArch64::LDURXi;
2169 case AArch64::LDRWui:
2171 UnscaledOp = AArch64::LDURWi;
2173 case AArch64::LDRBui:
2175 UnscaledOp = AArch64::LDURBi;
2177 case AArch64::LDRHui:
2179 UnscaledOp = AArch64::LDURHi;
2181 case AArch64::LDRSui:
2183 UnscaledOp = AArch64::LDURSi;
2185 case AArch64::LDRDui:
2187 UnscaledOp = AArch64::LDURDi;
2189 case AArch64::LDRQui:
2191 UnscaledOp = AArch64::LDURQi;
2193 case AArch64::LDRBBui:
2195 UnscaledOp = AArch64::LDURBBi;
2197 case AArch64::LDRHHui:
2199 UnscaledOp = AArch64::LDURHHi;
2201 case AArch64::LDRSBXui:
2203 UnscaledOp = AArch64::LDURSBXi;
2205 case AArch64::LDRSBWui:
2207 UnscaledOp = AArch64::LDURSBWi;
2209 case AArch64::LDRSHXui:
2211 UnscaledOp = AArch64::LDURSHXi;
2213 case AArch64::LDRSHWui:
2215 UnscaledOp = AArch64::LDURSHWi;
2217 case AArch64::LDRSWui:
2219 UnscaledOp = AArch64::LDURSWi;
2222 case AArch64::STRXui:
2224 UnscaledOp = AArch64::STURXi;
2226 case AArch64::STRWui:
2228 UnscaledOp = AArch64::STURWi;
2230 case AArch64::STRBui:
2232 UnscaledOp = AArch64::STURBi;
2234 case AArch64::STRHui:
2236 UnscaledOp = AArch64::STURHi;
2238 case AArch64::STRSui:
2240 UnscaledOp = AArch64::STURSi;
2242 case AArch64::STRDui:
2244 UnscaledOp = AArch64::STURDi;
2246 case AArch64::STRQui:
2248 UnscaledOp = AArch64::STURQi;
2250 case AArch64::STRBBui:
2252 UnscaledOp = AArch64::STURBBi;
2254 case AArch64::STRHHui:
2256 UnscaledOp = AArch64::STURHHi;
2259 case AArch64::LDPXi:
2260 case AArch64::LDPDi:
2261 case AArch64::STPXi:
2262 case AArch64::STPDi:
2266 case AArch64::LDPQi:
2267 case AArch64::STPQi:
2271 case AArch64::LDPWi:
2272 case AArch64::LDPSi:
2273 case AArch64::STPWi:
2274 case AArch64::STPSi:
2279 case AArch64::LDURXi:
2280 case AArch64::LDURWi:
2281 case AArch64::LDURBi:
2282 case AArch64::LDURHi:
2283 case AArch64::LDURSi:
2284 case AArch64::LDURDi:
2285 case AArch64::LDURQi:
2286 case AArch64::LDURHHi:
2287 case AArch64::LDURBBi:
2288 case AArch64::LDURSBXi:
2289 case AArch64::LDURSBWi:
2290 case AArch64::LDURSHXi:
2291 case AArch64::LDURSHWi:
2292 case AArch64::LDURSWi:
2293 case AArch64::STURXi:
2294 case AArch64::STURWi:
2295 case AArch64::STURBi:
2296 case AArch64::STURHi:
2297 case AArch64::STURSi:
2298 case AArch64::STURDi:
2299 case AArch64::STURQi:
2300 case AArch64::STURBBi:
2301 case AArch64::STURHHi:
2306 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2308 bool useUnscaledOp = false;
2309 // If the offset doesn't match the scale, we rewrite the instruction to
2310 // use the unscaled instruction instead. Likewise, if we have a negative
2311 // offset (and have an unscaled op to use).
2312 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2313 useUnscaledOp = true;
2315 // Use an unscaled addressing mode if the instruction has a negative offset
2316 // (or if the instruction is already using an unscaled addressing mode).
2319 // ldp/stp instructions.
2322 } else if (UnscaledOp == 0 || useUnscaledOp) {
2332 // Attempt to fold address computation.
2333 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2334 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2335 if (Offset >= MinOff && Offset <= MaxOff) {
2336 if (EmittableOffset)
2337 *EmittableOffset = Offset;
2340 int NewOff = Offset < 0 ? MinOff : MaxOff;
2341 if (EmittableOffset)
2342 *EmittableOffset = NewOff;
2343 Offset = (Offset - NewOff) * Scale;
2345 if (OutUseUnscaledOp)
2346 *OutUseUnscaledOp = useUnscaledOp;
2348 *OutUnscaledOp = UnscaledOp;
2349 return AArch64FrameOffsetCanUpdate |
2350 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2353 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2354 unsigned FrameReg, int &Offset,
2355 const AArch64InstrInfo *TII) {
2356 unsigned Opcode = MI.getOpcode();
2357 unsigned ImmIdx = FrameRegIdx + 1;
2359 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2360 Offset += MI.getOperand(ImmIdx).getImm();
2361 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2362 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2363 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2364 MI.eraseFromParent();
2370 unsigned UnscaledOp;
2372 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2373 &UnscaledOp, &NewOffset);
2374 if (Status & AArch64FrameOffsetCanUpdate) {
2375 if (Status & AArch64FrameOffsetIsLegal)
2376 // Replace the FrameIndex with FrameReg.
2377 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2379 MI.setDesc(TII->get(UnscaledOp));
2381 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2388 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2389 NopInst.setOpcode(AArch64::HINT);
2390 NopInst.addOperand(MCOperand::createImm(0));
2392 /// useMachineCombiner - return true when a target supports MachineCombiner
2393 bool AArch64InstrInfo::useMachineCombiner() const {
2394 // AArch64 supports the combiner
2398 // True when Opc sets flag
2399 static bool isCombineInstrSettingFlag(unsigned Opc) {
2401 case AArch64::ADDSWrr:
2402 case AArch64::ADDSWri:
2403 case AArch64::ADDSXrr:
2404 case AArch64::ADDSXri:
2405 case AArch64::SUBSWrr:
2406 case AArch64::SUBSXrr:
2407 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2408 case AArch64::SUBSWri:
2409 case AArch64::SUBSXri:
2417 // 32b Opcodes that can be combined with a MUL
2418 static bool isCombineInstrCandidate32(unsigned Opc) {
2420 case AArch64::ADDWrr:
2421 case AArch64::ADDWri:
2422 case AArch64::SUBWrr:
2423 case AArch64::ADDSWrr:
2424 case AArch64::ADDSWri:
2425 case AArch64::SUBSWrr:
2426 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2427 case AArch64::SUBWri:
2428 case AArch64::SUBSWri:
2436 // 64b Opcodes that can be combined with a MUL
2437 static bool isCombineInstrCandidate64(unsigned Opc) {
2439 case AArch64::ADDXrr:
2440 case AArch64::ADDXri:
2441 case AArch64::SUBXrr:
2442 case AArch64::ADDSXrr:
2443 case AArch64::ADDSXri:
2444 case AArch64::SUBSXrr:
2445 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2446 case AArch64::SUBXri:
2447 case AArch64::SUBSXri:
2455 // Opcodes that can be combined with a MUL
2456 static bool isCombineInstrCandidate(unsigned Opc) {
2457 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2460 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2461 unsigned MulOpc, unsigned ZeroReg) {
2462 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2463 MachineInstr *MI = nullptr;
2464 // We need a virtual register definition.
2465 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2466 MI = MRI.getUniqueVRegDef(MO.getReg());
2467 // And it needs to be in the trace (otherwise, it won't have a depth).
2468 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2471 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2472 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2473 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2475 // The third input reg must be zero.
2476 if (MI->getOperand(3).getReg() != ZeroReg)
2479 // Must only used by the user we combine with.
2480 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2486 /// Return true when there is potentially a faster code sequence
2487 /// for an instruction chain ending in \p Root. All potential patterns are
2489 /// in the \p Pattern vector. Pattern should be sorted in priority order since
2490 /// the pattern evaluator stops checking as soon as it finds a faster sequence.
2492 bool AArch64InstrInfo::getMachineCombinerPatterns(
2494 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
2495 unsigned Opc = Root.getOpcode();
2496 MachineBasicBlock &MBB = *Root.getParent();
2499 if (!isCombineInstrCandidate(Opc))
2501 if (isCombineInstrSettingFlag(Opc)) {
2502 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2503 // When NZCV is live bail out.
2506 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2507 // When opcode can't change bail out.
2508 // CHECKME: do we miss any cases for opcode conversion?
2517 case AArch64::ADDWrr:
2518 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2519 "ADDWrr does not have register operands");
2520 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2522 Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
2525 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2527 Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
2531 case AArch64::ADDXrr:
2532 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2534 Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
2537 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2539 Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
2543 case AArch64::SUBWrr:
2544 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2546 Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
2549 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2551 Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
2555 case AArch64::SUBXrr:
2556 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2558 Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
2561 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2563 Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
2567 case AArch64::ADDWri:
2568 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2570 Patterns.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
2574 case AArch64::ADDXri:
2575 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2577 Patterns.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
2581 case AArch64::SUBWri:
2582 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2584 Patterns.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
2588 case AArch64::SUBXri:
2589 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2591 Patterns.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
2599 /// genMadd - Generate madd instruction and combine mul and add.
2603 /// ==> MADD R,A,B,C
2604 /// \param Root is the ADD instruction
2605 /// \param [out] InsInstrs is a vector of machine instructions and will
2606 /// contain the generated madd instruction
2607 /// \param IdxMulOpd is index of operand in Root that is the result of
2608 /// the MUL. In the example above IdxMulOpd is 1.
2609 /// \param MaddOpc the opcode fo the madd instruction
2610 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2611 const TargetInstrInfo *TII, MachineInstr &Root,
2612 SmallVectorImpl<MachineInstr *> &InsInstrs,
2613 unsigned IdxMulOpd, unsigned MaddOpc,
2614 const TargetRegisterClass *RC) {
2615 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2617 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2618 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2619 unsigned ResultReg = Root.getOperand(0).getReg();
2620 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2621 bool Src0IsKill = MUL->getOperand(1).isKill();
2622 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2623 bool Src1IsKill = MUL->getOperand(2).isKill();
2624 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2625 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2627 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2628 MRI.constrainRegClass(ResultReg, RC);
2629 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2630 MRI.constrainRegClass(SrcReg0, RC);
2631 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2632 MRI.constrainRegClass(SrcReg1, RC);
2633 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2634 MRI.constrainRegClass(SrcReg2, RC);
2636 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2638 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2639 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2640 .addReg(SrcReg2, getKillRegState(Src2IsKill));
2642 InsInstrs.push_back(MIB);
2646 /// genMaddR - Generate madd instruction and combine mul and add using
2647 /// an extra virtual register
2648 /// Example - an ADD intermediate needs to be stored in a register:
2651 /// ==> ORR V, ZR, Imm
2652 /// ==> MADD R,A,B,V
2653 /// \param Root is the ADD instruction
2654 /// \param [out] InsInstrs is a vector of machine instructions and will
2655 /// contain the generated madd instruction
2656 /// \param IdxMulOpd is index of operand in Root that is the result of
2657 /// the MUL. In the example above IdxMulOpd is 1.
2658 /// \param MaddOpc the opcode fo the madd instruction
2659 /// \param VR is a virtual register that holds the value of an ADD operand
2660 /// (V in the example above).
2661 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2662 const TargetInstrInfo *TII, MachineInstr &Root,
2663 SmallVectorImpl<MachineInstr *> &InsInstrs,
2664 unsigned IdxMulOpd, unsigned MaddOpc,
2665 unsigned VR, const TargetRegisterClass *RC) {
2666 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2668 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2669 unsigned ResultReg = Root.getOperand(0).getReg();
2670 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2671 bool Src0IsKill = MUL->getOperand(1).isKill();
2672 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2673 bool Src1IsKill = MUL->getOperand(2).isKill();
2675 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2676 MRI.constrainRegClass(ResultReg, RC);
2677 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2678 MRI.constrainRegClass(SrcReg0, RC);
2679 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2680 MRI.constrainRegClass(SrcReg1, RC);
2681 if (TargetRegisterInfo::isVirtualRegister(VR))
2682 MRI.constrainRegClass(VR, RC);
2684 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2686 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2687 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2690 InsInstrs.push_back(MIB);
2694 /// When getMachineCombinerPatterns() finds potential patterns,
2695 /// this function generates the instructions that could replace the
2696 /// original code sequence
2697 void AArch64InstrInfo::genAlternativeCodeSequence(
2698 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
2699 SmallVectorImpl<MachineInstr *> &InsInstrs,
2700 SmallVectorImpl<MachineInstr *> &DelInstrs,
2701 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2702 MachineBasicBlock &MBB = *Root.getParent();
2703 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2704 MachineFunction &MF = *MBB.getParent();
2705 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2708 const TargetRegisterClass *RC;
2714 case MachineCombinerPattern::MC_MULADDW_OP1:
2715 case MachineCombinerPattern::MC_MULADDX_OP1:
2719 // --- Create(MADD);
2720 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) {
2721 Opc = AArch64::MADDWrrr;
2722 RC = &AArch64::GPR32RegClass;
2724 Opc = AArch64::MADDXrrr;
2725 RC = &AArch64::GPR64RegClass;
2727 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
2729 case MachineCombinerPattern::MC_MULADDW_OP2:
2730 case MachineCombinerPattern::MC_MULADDX_OP2:
2734 // --- Create(MADD);
2735 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) {
2736 Opc = AArch64::MADDWrrr;
2737 RC = &AArch64::GPR32RegClass;
2739 Opc = AArch64::MADDXrrr;
2740 RC = &AArch64::GPR64RegClass;
2742 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2744 case MachineCombinerPattern::MC_MULADDWI_OP1:
2745 case MachineCombinerPattern::MC_MULADDXI_OP1: {
2748 // ==> ORR V, ZR, Imm
2750 // --- Create(MADD);
2751 const TargetRegisterClass *OrrRC;
2752 unsigned BitSize, OrrOpc, ZeroReg;
2753 if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
2754 OrrOpc = AArch64::ORRWri;
2755 OrrRC = &AArch64::GPR32spRegClass;
2757 ZeroReg = AArch64::WZR;
2758 Opc = AArch64::MADDWrrr;
2759 RC = &AArch64::GPR32RegClass;
2761 OrrOpc = AArch64::ORRXri;
2762 OrrRC = &AArch64::GPR64spRegClass;
2764 ZeroReg = AArch64::XZR;
2765 Opc = AArch64::MADDXrrr;
2766 RC = &AArch64::GPR64RegClass;
2768 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2769 uint64_t Imm = Root.getOperand(2).getImm();
2771 if (Root.getOperand(3).isImm()) {
2772 unsigned Val = Root.getOperand(3).getImm();
2775 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2777 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2778 MachineInstrBuilder MIB1 =
2779 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2782 InsInstrs.push_back(MIB1);
2783 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2784 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2788 case MachineCombinerPattern::MC_MULSUBW_OP1:
2789 case MachineCombinerPattern::MC_MULSUBX_OP1: {
2793 // ==> MADD R,A,B,V // = -C + A*B
2794 // --- Create(MADD);
2795 const TargetRegisterClass *SubRC;
2796 unsigned SubOpc, ZeroReg;
2797 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
2798 SubOpc = AArch64::SUBWrr;
2799 SubRC = &AArch64::GPR32spRegClass;
2800 ZeroReg = AArch64::WZR;
2801 Opc = AArch64::MADDWrrr;
2802 RC = &AArch64::GPR32RegClass;
2804 SubOpc = AArch64::SUBXrr;
2805 SubRC = &AArch64::GPR64spRegClass;
2806 ZeroReg = AArch64::XZR;
2807 Opc = AArch64::MADDXrrr;
2808 RC = &AArch64::GPR64RegClass;
2810 unsigned NewVR = MRI.createVirtualRegister(SubRC);
2812 MachineInstrBuilder MIB1 =
2813 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
2815 .addOperand(Root.getOperand(2));
2816 InsInstrs.push_back(MIB1);
2817 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2818 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2821 case MachineCombinerPattern::MC_MULSUBW_OP2:
2822 case MachineCombinerPattern::MC_MULSUBX_OP2:
2825 // ==> MSUB R,A,B,C (computes C - A*B)
2826 // --- Create(MSUB);
2827 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) {
2828 Opc = AArch64::MSUBWrrr;
2829 RC = &AArch64::GPR32RegClass;
2831 Opc = AArch64::MSUBXrrr;
2832 RC = &AArch64::GPR64RegClass;
2834 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2836 case MachineCombinerPattern::MC_MULSUBWI_OP1:
2837 case MachineCombinerPattern::MC_MULSUBXI_OP1: {
2840 // ==> ORR V, ZR, -Imm
2841 // ==> MADD R,A,B,V // = -Imm + A*B
2842 // --- Create(MADD);
2843 const TargetRegisterClass *OrrRC;
2844 unsigned BitSize, OrrOpc, ZeroReg;
2845 if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
2846 OrrOpc = AArch64::ORRWri;
2847 OrrRC = &AArch64::GPR32spRegClass;
2849 ZeroReg = AArch64::WZR;
2850 Opc = AArch64::MADDWrrr;
2851 RC = &AArch64::GPR32RegClass;
2853 OrrOpc = AArch64::ORRXri;
2854 OrrRC = &AArch64::GPR64spRegClass;
2856 ZeroReg = AArch64::XZR;
2857 Opc = AArch64::MADDXrrr;
2858 RC = &AArch64::GPR64RegClass;
2860 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2861 int Imm = Root.getOperand(2).getImm();
2862 if (Root.getOperand(3).isImm()) {
2863 unsigned Val = Root.getOperand(3).getImm();
2866 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2868 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2869 MachineInstrBuilder MIB1 =
2870 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2873 InsInstrs.push_back(MIB1);
2874 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2875 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2879 } // end switch (Pattern)
2880 // Record MUL and ADD/SUB for deletion
2881 DelInstrs.push_back(MUL);
2882 DelInstrs.push_back(&Root);
2887 /// \brief Replace csincr-branch sequence by simple conditional branch
2891 /// csinc w9, wzr, wzr, <condition code>
2892 /// tbnz w9, #0, 0x44
2894 /// b.<inverted condition code>
2897 /// csinc w9, wzr, wzr, <condition code>
2898 /// tbz w9, #0, 0x44
2900 /// b.<condition code>
2902 /// \param MI Conditional Branch
2903 /// \return True when the simple conditional branch is generated
2905 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2906 bool IsNegativeBranch = false;
2907 bool IsTestAndBranch = false;
2908 unsigned TargetBBInMI = 0;
2909 switch (MI->getOpcode()) {
2911 llvm_unreachable("Unknown branch instruction?");
2918 case AArch64::CBNZW:
2919 case AArch64::CBNZX:
2921 IsNegativeBranch = true;
2926 IsTestAndBranch = true;
2928 case AArch64::TBNZW:
2929 case AArch64::TBNZX:
2931 IsNegativeBranch = true;
2932 IsTestAndBranch = true;
2935 // So we increment a zero register and test for bits other
2936 // than bit 0? Conservatively bail out in case the verifier
2937 // missed this case.
2938 if (IsTestAndBranch && MI->getOperand(1).getImm())
2942 assert(MI->getParent() && "Incomplete machine instruciton\n");
2943 MachineBasicBlock *MBB = MI->getParent();
2944 MachineFunction *MF = MBB->getParent();
2945 MachineRegisterInfo *MRI = &MF->getRegInfo();
2946 unsigned VReg = MI->getOperand(0).getReg();
2947 if (!TargetRegisterInfo::isVirtualRegister(VReg))
2950 MachineInstr *DefMI = MRI->getVRegDef(VReg);
2953 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
2954 DefMI->getOperand(1).getReg() == AArch64::WZR &&
2955 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
2956 !(DefMI->getOpcode() == AArch64::CSINCXr &&
2957 DefMI->getOperand(1).getReg() == AArch64::XZR &&
2958 DefMI->getOperand(2).getReg() == AArch64::XZR))
2961 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
2964 AArch64CC::CondCode CC =
2965 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
2966 bool CheckOnlyCCWrites = true;
2967 // Convert only when the condition code is not modified between
2968 // the CSINC and the branch. The CC may be used by other
2969 // instructions in between.
2970 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
2972 MachineBasicBlock &RefToMBB = *MBB;
2973 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
2974 DebugLoc DL = MI->getDebugLoc();
2975 if (IsNegativeBranch)
2976 CC = AArch64CC::getInvertedCondCode(CC);
2977 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
2978 MI->eraseFromParent();
2982 std::pair<unsigned, unsigned>
2983 AArch64InstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const {
2984 const unsigned Mask = AArch64II::MO_FRAGMENT;
2985 return std::make_pair(TF & Mask, TF & ~Mask);
2988 ArrayRef<std::pair<unsigned, const char *>>
2989 AArch64InstrInfo::getSerializableDirectMachineOperandTargetFlags() const {
2990 using namespace AArch64II;
2991 static std::pair<unsigned, const char *> TargetFlags[] = {
2992 {MO_PAGE, "aarch64-page"},
2993 {MO_PAGEOFF, "aarch64-pageoff"},
2994 {MO_G3, "aarch64-g3"},
2995 {MO_G2, "aarch64-g2"},
2996 {MO_G1, "aarch64-g1"},
2997 {MO_G0, "aarch64-g0"},
2998 {MO_HI12, "aarch64-hi12"}};
2999 return makeArrayRef(TargetFlags);
3002 ArrayRef<std::pair<unsigned, const char *>>
3003 AArch64InstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const {
3004 using namespace AArch64II;
3005 static std::pair<unsigned, const char *> TargetFlags[] = {
3006 {MO_GOT, "aarch64-got"},
3007 {MO_NC, "aarch64-nc"},
3008 {MO_TLS, "aarch64-tls"},
3009 {MO_CONSTPOOL, "aarch64-constant-pool"}};
3010 return makeArrayRef(TargetFlags);