1 //===- AArch64InstrInfo.cpp - AArch64 Instruction Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the AArch64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "AArch64InstrInfo.h"
15 #include "AArch64MachineCombinerPattern.h"
16 #include "AArch64Subtarget.h"
17 #include "MCTargetDesc/AArch64AddressingModes.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/MachineMemOperand.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/PseudoSourceValue.h"
23 #include "llvm/MC/MCInst.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/TargetRegistry.h"
29 #define GET_INSTRINFO_CTOR_DTOR
30 #include "AArch64GenInstrInfo.inc"
32 AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI)
33 : AArch64GenInstrInfo(AArch64::ADJCALLSTACKDOWN, AArch64::ADJCALLSTACKUP),
34 RI(STI.getTargetTriple()), Subtarget(STI) {}
36 /// GetInstSize - Return the number of bytes of code the specified
37 /// instruction may be. This returns the maximum number of bytes.
38 unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
39 const MachineBasicBlock &MBB = *MI->getParent();
40 const MachineFunction *MF = MBB.getParent();
41 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
43 if (MI->getOpcode() == AArch64::INLINEASM)
44 return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
46 const MCInstrDesc &Desc = MI->getDesc();
47 switch (Desc.getOpcode()) {
49 // Anything not explicitly designated otherwise is a nomal 4-byte insn.
51 case TargetOpcode::DBG_VALUE:
52 case TargetOpcode::EH_LABEL:
53 case TargetOpcode::IMPLICIT_DEF:
54 case TargetOpcode::KILL:
58 llvm_unreachable("GetInstSizeInBytes()- Unable to determin insn size");
61 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
62 SmallVectorImpl<MachineOperand> &Cond) {
63 // Block ends with fall-through condbranch.
64 switch (LastInst->getOpcode()) {
66 llvm_unreachable("Unknown branch instruction?");
68 Target = LastInst->getOperand(1).getMBB();
69 Cond.push_back(LastInst->getOperand(0));
75 Target = LastInst->getOperand(1).getMBB();
76 Cond.push_back(MachineOperand::CreateImm(-1));
77 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
78 Cond.push_back(LastInst->getOperand(0));
84 Target = LastInst->getOperand(2).getMBB();
85 Cond.push_back(MachineOperand::CreateImm(-1));
86 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
87 Cond.push_back(LastInst->getOperand(0));
88 Cond.push_back(LastInst->getOperand(1));
93 bool AArch64InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
94 MachineBasicBlock *&TBB,
95 MachineBasicBlock *&FBB,
96 SmallVectorImpl<MachineOperand> &Cond,
97 bool AllowModify) const {
98 // If the block has no terminators, it just falls into the block after it.
99 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
103 if (!isUnpredicatedTerminator(I))
106 // Get the last instruction in the block.
107 MachineInstr *LastInst = I;
109 // If there is only one terminator instruction, process it.
110 unsigned LastOpc = LastInst->getOpcode();
111 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
112 if (isUncondBranchOpcode(LastOpc)) {
113 TBB = LastInst->getOperand(0).getMBB();
116 if (isCondBranchOpcode(LastOpc)) {
117 // Block ends with fall-through condbranch.
118 parseCondBranch(LastInst, TBB, Cond);
121 return true; // Can't handle indirect branch.
124 // Get the instruction before it if it is a terminator.
125 MachineInstr *SecondLastInst = I;
126 unsigned SecondLastOpc = SecondLastInst->getOpcode();
128 // If AllowModify is true and the block ends with two or more unconditional
129 // branches, delete all but the first unconditional branch.
130 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
131 while (isUncondBranchOpcode(SecondLastOpc)) {
132 LastInst->eraseFromParent();
133 LastInst = SecondLastInst;
134 LastOpc = LastInst->getOpcode();
135 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
136 // Return now the only terminator is an unconditional branch.
137 TBB = LastInst->getOperand(0).getMBB();
141 SecondLastOpc = SecondLastInst->getOpcode();
146 // If there are three terminators, we don't know what sort of block this is.
147 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
150 // If the block ends with a B and a Bcc, handle it.
151 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
152 parseCondBranch(SecondLastInst, TBB, Cond);
153 FBB = LastInst->getOperand(0).getMBB();
157 // If the block ends with two unconditional branches, handle it. The second
158 // one is not executed, so remove it.
159 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
160 TBB = SecondLastInst->getOperand(0).getMBB();
163 I->eraseFromParent();
167 // ...likewise if it ends with an indirect branch followed by an unconditional
169 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
172 I->eraseFromParent();
176 // Otherwise, can't handle this.
180 bool AArch64InstrInfo::ReverseBranchCondition(
181 SmallVectorImpl<MachineOperand> &Cond) const {
182 if (Cond[0].getImm() != -1) {
184 AArch64CC::CondCode CC = (AArch64CC::CondCode)(int)Cond[0].getImm();
185 Cond[0].setImm(AArch64CC::getInvertedCondCode(CC));
187 // Folded compare-and-branch
188 switch (Cond[1].getImm()) {
190 llvm_unreachable("Unknown conditional branch!");
192 Cond[1].setImm(AArch64::CBNZW);
195 Cond[1].setImm(AArch64::CBZW);
198 Cond[1].setImm(AArch64::CBNZX);
201 Cond[1].setImm(AArch64::CBZX);
204 Cond[1].setImm(AArch64::TBNZW);
207 Cond[1].setImm(AArch64::TBZW);
210 Cond[1].setImm(AArch64::TBNZX);
213 Cond[1].setImm(AArch64::TBZX);
221 unsigned AArch64InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
222 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
226 if (!isUncondBranchOpcode(I->getOpcode()) &&
227 !isCondBranchOpcode(I->getOpcode()))
230 // Remove the branch.
231 I->eraseFromParent();
235 if (I == MBB.begin())
238 if (!isCondBranchOpcode(I->getOpcode()))
241 // Remove the branch.
242 I->eraseFromParent();
246 void AArch64InstrInfo::instantiateCondBranch(
247 MachineBasicBlock &MBB, DebugLoc DL, MachineBasicBlock *TBB,
248 ArrayRef<MachineOperand> Cond) const {
249 if (Cond[0].getImm() != -1) {
251 BuildMI(&MBB, DL, get(AArch64::Bcc)).addImm(Cond[0].getImm()).addMBB(TBB);
253 // Folded compare-and-branch
254 // Note that we use addOperand instead of addReg to keep the flags.
255 const MachineInstrBuilder MIB =
256 BuildMI(&MBB, DL, get(Cond[1].getImm())).addOperand(Cond[2]);
258 MIB.addImm(Cond[3].getImm());
263 unsigned AArch64InstrInfo::InsertBranch(
264 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
265 ArrayRef<MachineOperand> Cond, DebugLoc DL) const {
266 // Shouldn't be a fall through.
267 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
270 if (Cond.empty()) // Unconditional branch?
271 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(TBB);
273 instantiateCondBranch(MBB, DL, TBB, Cond);
277 // Two-way conditional branch.
278 instantiateCondBranch(MBB, DL, TBB, Cond);
279 BuildMI(&MBB, DL, get(AArch64::B)).addMBB(FBB);
283 // Find the original register that VReg is copied from.
284 static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg) {
285 while (TargetRegisterInfo::isVirtualRegister(VReg)) {
286 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
287 if (!DefMI->isFullCopy())
289 VReg = DefMI->getOperand(1).getReg();
294 // Determine if VReg is defined by an instruction that can be folded into a
295 // csel instruction. If so, return the folded opcode, and the replacement
297 static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg,
298 unsigned *NewVReg = nullptr) {
299 VReg = removeCopies(MRI, VReg);
300 if (!TargetRegisterInfo::isVirtualRegister(VReg))
303 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.getRegClass(VReg));
304 const MachineInstr *DefMI = MRI.getVRegDef(VReg);
306 unsigned SrcOpNum = 0;
307 switch (DefMI->getOpcode()) {
308 case AArch64::ADDSXri:
309 case AArch64::ADDSWri:
310 // if NZCV is used, do not fold.
311 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
313 // fall-through to ADDXri and ADDWri.
314 case AArch64::ADDXri:
315 case AArch64::ADDWri:
316 // add x, 1 -> csinc.
317 if (!DefMI->getOperand(2).isImm() || DefMI->getOperand(2).getImm() != 1 ||
318 DefMI->getOperand(3).getImm() != 0)
321 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
324 case AArch64::ORNXrr:
325 case AArch64::ORNWrr: {
326 // not x -> csinv, represented as orn dst, xzr, src.
327 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
328 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
331 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
335 case AArch64::SUBSXrr:
336 case AArch64::SUBSWrr:
337 // if NZCV is used, do not fold.
338 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) == -1)
340 // fall-through to SUBXrr and SUBWrr.
341 case AArch64::SUBXrr:
342 case AArch64::SUBWrr: {
343 // neg x -> csneg, represented as sub dst, xzr, src.
344 unsigned ZReg = removeCopies(MRI, DefMI->getOperand(1).getReg());
345 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
348 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
354 assert(Opc && SrcOpNum && "Missing parameters");
357 *NewVReg = DefMI->getOperand(SrcOpNum).getReg();
361 bool AArch64InstrInfo::canInsertSelect(
362 const MachineBasicBlock &MBB, ArrayRef<MachineOperand> Cond,
363 unsigned TrueReg, unsigned FalseReg, int &CondCycles, int &TrueCycles,
364 int &FalseCycles) const {
365 // Check register classes.
366 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
367 const TargetRegisterClass *RC =
368 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
372 // Expanding cbz/tbz requires an extra cycle of latency on the condition.
373 unsigned ExtraCondLat = Cond.size() != 1;
375 // GPRs are handled by csel.
376 // FIXME: Fold in x+1, -x, and ~x when applicable.
377 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
378 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
379 // Single-cycle csel, csinc, csinv, and csneg.
380 CondCycles = 1 + ExtraCondLat;
381 TrueCycles = FalseCycles = 1;
382 if (canFoldIntoCSel(MRI, TrueReg))
384 else if (canFoldIntoCSel(MRI, FalseReg))
389 // Scalar floating point is handled by fcsel.
390 // FIXME: Form fabs, fmin, and fmax when applicable.
391 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
392 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
393 CondCycles = 5 + ExtraCondLat;
394 TrueCycles = FalseCycles = 2;
402 void AArch64InstrInfo::insertSelect(MachineBasicBlock &MBB,
403 MachineBasicBlock::iterator I, DebugLoc DL,
405 ArrayRef<MachineOperand> Cond,
406 unsigned TrueReg, unsigned FalseReg) const {
407 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
409 // Parse the condition code, see parseCondBranch() above.
410 AArch64CC::CondCode CC;
411 switch (Cond.size()) {
413 llvm_unreachable("Unknown condition opcode in Cond");
415 CC = AArch64CC::CondCode(Cond[0].getImm());
417 case 3: { // cbz/cbnz
418 // We must insert a compare against 0.
420 switch (Cond[1].getImm()) {
422 llvm_unreachable("Unknown branch opcode in Cond");
440 unsigned SrcReg = Cond[2].getReg();
442 // cmp reg, #0 is actually subs xzr, reg, #0.
443 MRI.constrainRegClass(SrcReg, &AArch64::GPR64spRegClass);
444 BuildMI(MBB, I, DL, get(AArch64::SUBSXri), AArch64::XZR)
449 MRI.constrainRegClass(SrcReg, &AArch64::GPR32spRegClass);
450 BuildMI(MBB, I, DL, get(AArch64::SUBSWri), AArch64::WZR)
457 case 4: { // tbz/tbnz
458 // We must insert a tst instruction.
459 switch (Cond[1].getImm()) {
461 llvm_unreachable("Unknown branch opcode in Cond");
471 // cmp reg, #foo is actually ands xzr, reg, #1<<foo.
472 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
473 BuildMI(MBB, I, DL, get(AArch64::ANDSWri), AArch64::WZR)
474 .addReg(Cond[2].getReg())
476 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 32));
478 BuildMI(MBB, I, DL, get(AArch64::ANDSXri), AArch64::XZR)
479 .addReg(Cond[2].getReg())
481 AArch64_AM::encodeLogicalImmediate(1ull << Cond[3].getImm(), 64));
487 const TargetRegisterClass *RC = nullptr;
488 bool TryFold = false;
489 if (MRI.constrainRegClass(DstReg, &AArch64::GPR64RegClass)) {
490 RC = &AArch64::GPR64RegClass;
491 Opc = AArch64::CSELXr;
493 } else if (MRI.constrainRegClass(DstReg, &AArch64::GPR32RegClass)) {
494 RC = &AArch64::GPR32RegClass;
495 Opc = AArch64::CSELWr;
497 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR64RegClass)) {
498 RC = &AArch64::FPR64RegClass;
499 Opc = AArch64::FCSELDrrr;
500 } else if (MRI.constrainRegClass(DstReg, &AArch64::FPR32RegClass)) {
501 RC = &AArch64::FPR32RegClass;
502 Opc = AArch64::FCSELSrrr;
504 assert(RC && "Unsupported regclass");
506 // Try folding simple instructions into the csel.
508 unsigned NewVReg = 0;
509 unsigned FoldedOpc = canFoldIntoCSel(MRI, TrueReg, &NewVReg);
511 // The folded opcodes csinc, csinc and csneg apply the operation to
512 // FalseReg, so we need to invert the condition.
513 CC = AArch64CC::getInvertedCondCode(CC);
516 FoldedOpc = canFoldIntoCSel(MRI, FalseReg, &NewVReg);
518 // Fold the operation. Leave any dead instructions for DCE to clean up.
522 // The extends the live range of NewVReg.
523 MRI.clearKillFlags(NewVReg);
527 // Pull all virtual register into the appropriate class.
528 MRI.constrainRegClass(TrueReg, RC);
529 MRI.constrainRegClass(FalseReg, RC);
532 BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(TrueReg).addReg(FalseReg).addImm(
536 // FIXME: this implementation should be micro-architecture dependent, so a
537 // micro-architecture target hook should be introduced here in future.
538 bool AArch64InstrInfo::isAsCheapAsAMove(const MachineInstr *MI) const {
539 if (!Subtarget.isCortexA57() && !Subtarget.isCortexA53())
540 return MI->isAsCheapAsAMove();
542 switch (MI->getOpcode()) {
546 // add/sub on register without shift
547 case AArch64::ADDWri:
548 case AArch64::ADDXri:
549 case AArch64::SUBWri:
550 case AArch64::SUBXri:
551 return (MI->getOperand(3).getImm() == 0);
553 // logical ops on immediate
554 case AArch64::ANDWri:
555 case AArch64::ANDXri:
556 case AArch64::EORWri:
557 case AArch64::EORXri:
558 case AArch64::ORRWri:
559 case AArch64::ORRXri:
562 // logical ops on register without shift
563 case AArch64::ANDWrr:
564 case AArch64::ANDXrr:
565 case AArch64::BICWrr:
566 case AArch64::BICXrr:
567 case AArch64::EONWrr:
568 case AArch64::EONXrr:
569 case AArch64::EORWrr:
570 case AArch64::EORXrr:
571 case AArch64::ORNWrr:
572 case AArch64::ORNXrr:
573 case AArch64::ORRWrr:
574 case AArch64::ORRXrr:
578 llvm_unreachable("Unknown opcode to check as cheap as a move!");
581 bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
582 unsigned &SrcReg, unsigned &DstReg,
583 unsigned &SubIdx) const {
584 switch (MI.getOpcode()) {
587 case AArch64::SBFMXri: // aka sxtw
588 case AArch64::UBFMXri: // aka uxtw
589 // Check for the 32 -> 64 bit extension case, these instructions can do
591 if (MI.getOperand(2).getImm() != 0 || MI.getOperand(3).getImm() != 31)
593 // This is a signed or unsigned 32 -> 64 bit extension.
594 SrcReg = MI.getOperand(1).getReg();
595 DstReg = MI.getOperand(0).getReg();
596 SubIdx = AArch64::sub_32;
602 AArch64InstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa,
604 AliasAnalysis *AA) const {
605 const TargetRegisterInfo *TRI = &getRegisterInfo();
606 unsigned BaseRegA = 0, BaseRegB = 0;
607 int OffsetA = 0, OffsetB = 0;
608 int WidthA = 0, WidthB = 0;
610 assert(MIa && MIa->mayLoadOrStore() && "MIa must be a load or store.");
611 assert(MIb && MIb->mayLoadOrStore() && "MIb must be a load or store.");
613 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects() ||
614 MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef())
617 // Retrieve the base register, offset from the base register and width. Width
618 // is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
619 // base registers are identical, and the offset of a lower memory access +
620 // the width doesn't overlap the offset of a higher memory access,
621 // then the memory accesses are different.
622 if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
623 getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
624 if (BaseRegA == BaseRegB) {
625 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
626 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
627 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
628 if (LowOffset + LowWidth <= HighOffset)
635 /// analyzeCompare - For a comparison instruction, return the source registers
636 /// in SrcReg and SrcReg2, and the value it compares against in CmpValue.
637 /// Return true if the comparison instruction can be analyzed.
638 bool AArch64InstrInfo::analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
639 unsigned &SrcReg2, int &CmpMask,
640 int &CmpValue) const {
641 switch (MI->getOpcode()) {
644 case AArch64::SUBSWrr:
645 case AArch64::SUBSWrs:
646 case AArch64::SUBSWrx:
647 case AArch64::SUBSXrr:
648 case AArch64::SUBSXrs:
649 case AArch64::SUBSXrx:
650 case AArch64::ADDSWrr:
651 case AArch64::ADDSWrs:
652 case AArch64::ADDSWrx:
653 case AArch64::ADDSXrr:
654 case AArch64::ADDSXrs:
655 case AArch64::ADDSXrx:
656 // Replace SUBSWrr with SUBWrr if NZCV is not used.
657 SrcReg = MI->getOperand(1).getReg();
658 SrcReg2 = MI->getOperand(2).getReg();
662 case AArch64::SUBSWri:
663 case AArch64::ADDSWri:
664 case AArch64::SUBSXri:
665 case AArch64::ADDSXri:
666 SrcReg = MI->getOperand(1).getReg();
669 // FIXME: In order to convert CmpValue to 0 or 1
670 CmpValue = (MI->getOperand(2).getImm() != 0);
672 case AArch64::ANDSWri:
673 case AArch64::ANDSXri:
674 // ANDS does not use the same encoding scheme as the others xxxS
676 SrcReg = MI->getOperand(1).getReg();
679 // FIXME:The return val type of decodeLogicalImmediate is uint64_t,
680 // while the type of CmpValue is int. When converting uint64_t to int,
681 // the high 32 bits of uint64_t will be lost.
682 // In fact it causes a bug in spec2006-483.xalancbmk
683 // CmpValue is only used to compare with zero in OptimizeCompareInstr
684 CmpValue = (AArch64_AM::decodeLogicalImmediate(
685 MI->getOperand(2).getImm(),
686 MI->getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0);
693 static bool UpdateOperandRegClass(MachineInstr *Instr) {
694 MachineBasicBlock *MBB = Instr->getParent();
695 assert(MBB && "Can't get MachineBasicBlock here");
696 MachineFunction *MF = MBB->getParent();
697 assert(MF && "Can't get MachineFunction here");
698 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
699 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
700 MachineRegisterInfo *MRI = &MF->getRegInfo();
702 for (unsigned OpIdx = 0, EndIdx = Instr->getNumOperands(); OpIdx < EndIdx;
704 MachineOperand &MO = Instr->getOperand(OpIdx);
705 const TargetRegisterClass *OpRegCstraints =
706 Instr->getRegClassConstraint(OpIdx, TII, TRI);
708 // If there's no constraint, there's nothing to do.
711 // If the operand is a frame index, there's nothing to do here.
712 // A frame index operand will resolve correctly during PEI.
717 "Operand has register constraints without being a register!");
719 unsigned Reg = MO.getReg();
720 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
721 if (!OpRegCstraints->contains(Reg))
723 } else if (!OpRegCstraints->hasSubClassEq(MRI->getRegClass(Reg)) &&
724 !MRI->constrainRegClass(Reg, OpRegCstraints))
731 /// \brief Return the opcode that does not set flags when possible - otherwise
732 /// return the original opcode. The caller is responsible to do the actual
733 /// substitution and legality checking.
734 static unsigned convertFlagSettingOpcode(const MachineInstr *MI) {
735 // Don't convert all compare instructions, because for some the zero register
736 // encoding becomes the sp register.
737 bool MIDefinesZeroReg = false;
738 if (MI->definesRegister(AArch64::WZR) || MI->definesRegister(AArch64::XZR))
739 MIDefinesZeroReg = true;
741 switch (MI->getOpcode()) {
743 return MI->getOpcode();
744 case AArch64::ADDSWrr:
745 return AArch64::ADDWrr;
746 case AArch64::ADDSWri:
747 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
748 case AArch64::ADDSWrs:
749 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
750 case AArch64::ADDSWrx:
751 return AArch64::ADDWrx;
752 case AArch64::ADDSXrr:
753 return AArch64::ADDXrr;
754 case AArch64::ADDSXri:
755 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
756 case AArch64::ADDSXrs:
757 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
758 case AArch64::ADDSXrx:
759 return AArch64::ADDXrx;
760 case AArch64::SUBSWrr:
761 return AArch64::SUBWrr;
762 case AArch64::SUBSWri:
763 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
764 case AArch64::SUBSWrs:
765 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
766 case AArch64::SUBSWrx:
767 return AArch64::SUBWrx;
768 case AArch64::SUBSXrr:
769 return AArch64::SUBXrr;
770 case AArch64::SUBSXri:
771 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
772 case AArch64::SUBSXrs:
773 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
774 case AArch64::SUBSXrx:
775 return AArch64::SUBXrx;
779 /// True when condition code could be modified on the instruction
780 /// trace starting at from and ending at to.
781 static bool modifiesConditionCode(MachineInstr *From, MachineInstr *To,
782 const bool CheckOnlyCCWrites,
783 const TargetRegisterInfo *TRI) {
784 // We iterate backward starting \p To until we hit \p From
785 MachineBasicBlock::iterator I = To, E = From, B = To->getParent()->begin();
787 // Early exit if To is at the beginning of the BB.
791 // Check whether the definition of SrcReg is in the same basic block as
792 // Compare. If not, assume the condition code gets modified on some path.
793 if (To->getParent() != From->getParent())
796 // Check that NZCV isn't set on the trace.
797 for (--I; I != E; --I) {
798 const MachineInstr &Instr = *I;
800 if (Instr.modifiesRegister(AArch64::NZCV, TRI) ||
801 (!CheckOnlyCCWrites && Instr.readsRegister(AArch64::NZCV, TRI)))
802 // This instruction modifies or uses NZCV after the one we want to
806 // We currently don't allow the instruction trace to cross basic
812 /// optimizeCompareInstr - Convert the instruction supplying the argument to the
813 /// comparison into one that sets the zero bit in the flags register.
814 bool AArch64InstrInfo::optimizeCompareInstr(
815 MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask,
816 int CmpValue, const MachineRegisterInfo *MRI) const {
818 // Replace SUBSWrr with SUBWrr if NZCV is not used.
819 int Cmp_NZCV = CmpInstr->findRegisterDefOperandIdx(AArch64::NZCV, true);
820 if (Cmp_NZCV != -1) {
821 if (CmpInstr->definesRegister(AArch64::WZR) ||
822 CmpInstr->definesRegister(AArch64::XZR)) {
823 CmpInstr->eraseFromParent();
826 unsigned Opc = CmpInstr->getOpcode();
827 unsigned NewOpc = convertFlagSettingOpcode(CmpInstr);
830 const MCInstrDesc &MCID = get(NewOpc);
831 CmpInstr->setDesc(MCID);
832 CmpInstr->RemoveOperand(Cmp_NZCV);
833 bool succeeded = UpdateOperandRegClass(CmpInstr);
835 assert(succeeded && "Some operands reg class are incompatible!");
839 // Continue only if we have a "ri" where immediate is zero.
840 // FIXME:CmpValue has already been converted to 0 or 1 in analyzeCompare
842 assert((CmpValue == 0 || CmpValue == 1) && "CmpValue must be 0 or 1!");
843 if (CmpValue != 0 || SrcReg2 != 0)
846 // CmpInstr is a Compare instruction if destination register is not used.
847 if (!MRI->use_nodbg_empty(CmpInstr->getOperand(0).getReg()))
850 // Get the unique definition of SrcReg.
851 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
855 bool CheckOnlyCCWrites = false;
856 const TargetRegisterInfo *TRI = &getRegisterInfo();
857 if (modifiesConditionCode(MI, CmpInstr, CheckOnlyCCWrites, TRI))
860 unsigned NewOpc = MI->getOpcode();
861 switch (MI->getOpcode()) {
864 case AArch64::ADDSWrr:
865 case AArch64::ADDSWri:
866 case AArch64::ADDSXrr:
867 case AArch64::ADDSXri:
868 case AArch64::SUBSWrr:
869 case AArch64::SUBSWri:
870 case AArch64::SUBSXrr:
871 case AArch64::SUBSXri:
873 case AArch64::ADDWrr: NewOpc = AArch64::ADDSWrr; break;
874 case AArch64::ADDWri: NewOpc = AArch64::ADDSWri; break;
875 case AArch64::ADDXrr: NewOpc = AArch64::ADDSXrr; break;
876 case AArch64::ADDXri: NewOpc = AArch64::ADDSXri; break;
877 case AArch64::ADCWr: NewOpc = AArch64::ADCSWr; break;
878 case AArch64::ADCXr: NewOpc = AArch64::ADCSXr; break;
879 case AArch64::SUBWrr: NewOpc = AArch64::SUBSWrr; break;
880 case AArch64::SUBWri: NewOpc = AArch64::SUBSWri; break;
881 case AArch64::SUBXrr: NewOpc = AArch64::SUBSXrr; break;
882 case AArch64::SUBXri: NewOpc = AArch64::SUBSXri; break;
883 case AArch64::SBCWr: NewOpc = AArch64::SBCSWr; break;
884 case AArch64::SBCXr: NewOpc = AArch64::SBCSXr; break;
885 case AArch64::ANDWri: NewOpc = AArch64::ANDSWri; break;
886 case AArch64::ANDXri: NewOpc = AArch64::ANDSXri; break;
889 // Scan forward for the use of NZCV.
890 // When checking against MI: if it's a conditional code requires
891 // checking of V bit, then this is not safe to do.
892 // It is safe to remove CmpInstr if NZCV is redefined or killed.
893 // If we are done with the basic block, we need to check whether NZCV is
896 for (MachineBasicBlock::iterator I = CmpInstr,
897 E = CmpInstr->getParent()->end();
898 !IsSafe && ++I != E;) {
899 const MachineInstr &Instr = *I;
900 for (unsigned IO = 0, EO = Instr.getNumOperands(); !IsSafe && IO != EO;
902 const MachineOperand &MO = Instr.getOperand(IO);
903 if (MO.isRegMask() && MO.clobbersPhysReg(AArch64::NZCV)) {
907 if (!MO.isReg() || MO.getReg() != AArch64::NZCV)
914 // Decode the condition code.
915 unsigned Opc = Instr.getOpcode();
916 AArch64CC::CondCode CC;
921 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 2).getImm();
923 case AArch64::CSINVWr:
924 case AArch64::CSINVXr:
925 case AArch64::CSINCWr:
926 case AArch64::CSINCXr:
927 case AArch64::CSELWr:
928 case AArch64::CSELXr:
929 case AArch64::CSNEGWr:
930 case AArch64::CSNEGXr:
931 case AArch64::FCSELSrrr:
932 case AArch64::FCSELDrrr:
933 CC = (AArch64CC::CondCode)Instr.getOperand(IO - 1).getImm();
937 // It is not safe to remove Compare instruction if Overflow(V) is used.
940 // NZCV can be used multiple times, we should continue.
953 // If NZCV is not killed nor re-defined, we should check whether it is
954 // live-out. If it is live-out, do not optimize.
956 MachineBasicBlock *ParentBlock = CmpInstr->getParent();
957 for (auto *MBB : ParentBlock->successors())
958 if (MBB->isLiveIn(AArch64::NZCV))
962 // Update the instruction to set NZCV.
963 MI->setDesc(get(NewOpc));
964 CmpInstr->eraseFromParent();
965 bool succeeded = UpdateOperandRegClass(MI);
967 assert(succeeded && "Some operands reg class are incompatible!");
968 MI->addRegisterDefined(AArch64::NZCV, TRI);
973 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
974 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
977 MachineBasicBlock &MBB = *MI->getParent();
978 DebugLoc DL = MI->getDebugLoc();
979 unsigned Reg = MI->getOperand(0).getReg();
980 const GlobalValue *GV =
981 cast<GlobalValue>((*MI->memoperands_begin())->getValue());
982 const TargetMachine &TM = MBB.getParent()->getTarget();
983 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
984 const unsigned char MO_NC = AArch64II::MO_NC;
986 if ((OpFlags & AArch64II::MO_GOT) != 0) {
987 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
988 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
989 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
990 .addReg(Reg, RegState::Kill).addImm(0)
991 .addMemOperand(*MI->memoperands_begin());
992 } else if (TM.getCodeModel() == CodeModel::Large) {
993 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
994 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
995 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
996 .addReg(Reg, RegState::Kill)
997 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
998 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
999 .addReg(Reg, RegState::Kill)
1000 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
1001 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
1002 .addReg(Reg, RegState::Kill)
1003 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
1004 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1005 .addReg(Reg, RegState::Kill).addImm(0)
1006 .addMemOperand(*MI->memoperands_begin());
1008 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
1009 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
1010 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
1011 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
1012 .addReg(Reg, RegState::Kill)
1013 .addGlobalAddress(GV, 0, LoFlags)
1014 .addMemOperand(*MI->memoperands_begin());
1022 /// Return true if this is this instruction has a non-zero immediate
1023 bool AArch64InstrInfo::hasShiftedReg(const MachineInstr *MI) const {
1024 switch (MI->getOpcode()) {
1027 case AArch64::ADDSWrs:
1028 case AArch64::ADDSXrs:
1029 case AArch64::ADDWrs:
1030 case AArch64::ADDXrs:
1031 case AArch64::ANDSWrs:
1032 case AArch64::ANDSXrs:
1033 case AArch64::ANDWrs:
1034 case AArch64::ANDXrs:
1035 case AArch64::BICSWrs:
1036 case AArch64::BICSXrs:
1037 case AArch64::BICWrs:
1038 case AArch64::BICXrs:
1039 case AArch64::CRC32Brr:
1040 case AArch64::CRC32CBrr:
1041 case AArch64::CRC32CHrr:
1042 case AArch64::CRC32CWrr:
1043 case AArch64::CRC32CXrr:
1044 case AArch64::CRC32Hrr:
1045 case AArch64::CRC32Wrr:
1046 case AArch64::CRC32Xrr:
1047 case AArch64::EONWrs:
1048 case AArch64::EONXrs:
1049 case AArch64::EORWrs:
1050 case AArch64::EORXrs:
1051 case AArch64::ORNWrs:
1052 case AArch64::ORNXrs:
1053 case AArch64::ORRWrs:
1054 case AArch64::ORRXrs:
1055 case AArch64::SUBSWrs:
1056 case AArch64::SUBSXrs:
1057 case AArch64::SUBWrs:
1058 case AArch64::SUBXrs:
1059 if (MI->getOperand(3).isImm()) {
1060 unsigned val = MI->getOperand(3).getImm();
1068 /// Return true if this is this instruction has a non-zero immediate
1069 bool AArch64InstrInfo::hasExtendedReg(const MachineInstr *MI) const {
1070 switch (MI->getOpcode()) {
1073 case AArch64::ADDSWrx:
1074 case AArch64::ADDSXrx:
1075 case AArch64::ADDSXrx64:
1076 case AArch64::ADDWrx:
1077 case AArch64::ADDXrx:
1078 case AArch64::ADDXrx64:
1079 case AArch64::SUBSWrx:
1080 case AArch64::SUBSXrx:
1081 case AArch64::SUBSXrx64:
1082 case AArch64::SUBWrx:
1083 case AArch64::SUBXrx:
1084 case AArch64::SUBXrx64:
1085 if (MI->getOperand(3).isImm()) {
1086 unsigned val = MI->getOperand(3).getImm();
1095 // Return true if this instruction simply sets its single destination register
1096 // to zero. This is equivalent to a register rename of the zero-register.
1097 bool AArch64InstrInfo::isGPRZero(const MachineInstr *MI) const {
1098 switch (MI->getOpcode()) {
1101 case AArch64::MOVZWi:
1102 case AArch64::MOVZXi: // movz Rd, #0 (LSL #0)
1103 if (MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) {
1104 assert(MI->getDesc().getNumOperands() == 3 &&
1105 MI->getOperand(2).getImm() == 0 && "invalid MOVZi operands");
1109 case AArch64::ANDWri: // and Rd, Rzr, #imm
1110 return MI->getOperand(1).getReg() == AArch64::WZR;
1111 case AArch64::ANDXri:
1112 return MI->getOperand(1).getReg() == AArch64::XZR;
1113 case TargetOpcode::COPY:
1114 return MI->getOperand(1).getReg() == AArch64::WZR;
1119 // Return true if this instruction simply renames a general register without
1121 bool AArch64InstrInfo::isGPRCopy(const MachineInstr *MI) const {
1122 switch (MI->getOpcode()) {
1125 case TargetOpcode::COPY: {
1126 // GPR32 copies will by lowered to ORRXrs
1127 unsigned DstReg = MI->getOperand(0).getReg();
1128 return (AArch64::GPR32RegClass.contains(DstReg) ||
1129 AArch64::GPR64RegClass.contains(DstReg));
1131 case AArch64::ORRXrs: // orr Xd, Xzr, Xm (LSL #0)
1132 if (MI->getOperand(1).getReg() == AArch64::XZR) {
1133 assert(MI->getDesc().getNumOperands() == 4 &&
1134 MI->getOperand(3).getImm() == 0 && "invalid ORRrs operands");
1138 case AArch64::ADDXri: // add Xd, Xn, #0 (LSL #0)
1139 if (MI->getOperand(2).getImm() == 0) {
1140 assert(MI->getDesc().getNumOperands() == 4 &&
1141 MI->getOperand(3).getImm() == 0 && "invalid ADDXri operands");
1149 // Return true if this instruction simply renames a general register without
1151 bool AArch64InstrInfo::isFPRCopy(const MachineInstr *MI) const {
1152 switch (MI->getOpcode()) {
1155 case TargetOpcode::COPY: {
1156 // FPR64 copies will by lowered to ORR.16b
1157 unsigned DstReg = MI->getOperand(0).getReg();
1158 return (AArch64::FPR64RegClass.contains(DstReg) ||
1159 AArch64::FPR128RegClass.contains(DstReg));
1161 case AArch64::ORRv16i8:
1162 if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
1163 assert(MI->getDesc().getNumOperands() == 3 && MI->getOperand(0).isReg() &&
1164 "invalid ORRv16i8 operands");
1172 unsigned AArch64InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
1173 int &FrameIndex) const {
1174 switch (MI->getOpcode()) {
1177 case AArch64::LDRWui:
1178 case AArch64::LDRXui:
1179 case AArch64::LDRBui:
1180 case AArch64::LDRHui:
1181 case AArch64::LDRSui:
1182 case AArch64::LDRDui:
1183 case AArch64::LDRQui:
1184 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1185 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1186 FrameIndex = MI->getOperand(1).getIndex();
1187 return MI->getOperand(0).getReg();
1195 unsigned AArch64InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
1196 int &FrameIndex) const {
1197 switch (MI->getOpcode()) {
1200 case AArch64::STRWui:
1201 case AArch64::STRXui:
1202 case AArch64::STRBui:
1203 case AArch64::STRHui:
1204 case AArch64::STRSui:
1205 case AArch64::STRDui:
1206 case AArch64::STRQui:
1207 if (MI->getOperand(0).getSubReg() == 0 && MI->getOperand(1).isFI() &&
1208 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) {
1209 FrameIndex = MI->getOperand(1).getIndex();
1210 return MI->getOperand(0).getReg();
1217 /// Return true if this is load/store scales or extends its register offset.
1218 /// This refers to scaling a dynamic index as opposed to scaled immediates.
1219 /// MI should be a memory op that allows scaled addressing.
1220 bool AArch64InstrInfo::isScaledAddr(const MachineInstr *MI) const {
1221 switch (MI->getOpcode()) {
1224 case AArch64::LDRBBroW:
1225 case AArch64::LDRBroW:
1226 case AArch64::LDRDroW:
1227 case AArch64::LDRHHroW:
1228 case AArch64::LDRHroW:
1229 case AArch64::LDRQroW:
1230 case AArch64::LDRSBWroW:
1231 case AArch64::LDRSBXroW:
1232 case AArch64::LDRSHWroW:
1233 case AArch64::LDRSHXroW:
1234 case AArch64::LDRSWroW:
1235 case AArch64::LDRSroW:
1236 case AArch64::LDRWroW:
1237 case AArch64::LDRXroW:
1238 case AArch64::STRBBroW:
1239 case AArch64::STRBroW:
1240 case AArch64::STRDroW:
1241 case AArch64::STRHHroW:
1242 case AArch64::STRHroW:
1243 case AArch64::STRQroW:
1244 case AArch64::STRSroW:
1245 case AArch64::STRWroW:
1246 case AArch64::STRXroW:
1247 case AArch64::LDRBBroX:
1248 case AArch64::LDRBroX:
1249 case AArch64::LDRDroX:
1250 case AArch64::LDRHHroX:
1251 case AArch64::LDRHroX:
1252 case AArch64::LDRQroX:
1253 case AArch64::LDRSBWroX:
1254 case AArch64::LDRSBXroX:
1255 case AArch64::LDRSHWroX:
1256 case AArch64::LDRSHXroX:
1257 case AArch64::LDRSWroX:
1258 case AArch64::LDRSroX:
1259 case AArch64::LDRWroX:
1260 case AArch64::LDRXroX:
1261 case AArch64::STRBBroX:
1262 case AArch64::STRBroX:
1263 case AArch64::STRDroX:
1264 case AArch64::STRHHroX:
1265 case AArch64::STRHroX:
1266 case AArch64::STRQroX:
1267 case AArch64::STRSroX:
1268 case AArch64::STRWroX:
1269 case AArch64::STRXroX:
1271 unsigned Val = MI->getOperand(3).getImm();
1272 AArch64_AM::ShiftExtendType ExtType = AArch64_AM::getMemExtendType(Val);
1273 return (ExtType != AArch64_AM::UXTX) || AArch64_AM::getMemDoShift(Val);
1278 /// Check all MachineMemOperands for a hint to suppress pairing.
1279 bool AArch64InstrInfo::isLdStPairSuppressed(const MachineInstr *MI) const {
1280 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1281 "Too many target MO flags");
1282 for (auto *MM : MI->memoperands()) {
1283 if (MM->getFlags() &
1284 (MOSuppressPair << MachineMemOperand::MOTargetStartBit)) {
1291 /// Set a flag on the first MachineMemOperand to suppress pairing.
1292 void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
1293 if (MI->memoperands_empty())
1296 assert(MOSuppressPair < (1 << MachineMemOperand::MOTargetNumBits) &&
1297 "Too many target MO flags");
1298 (*MI->memoperands_begin())
1299 ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
1303 AArch64InstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg,
1305 const TargetRegisterInfo *TRI) const {
1306 switch (LdSt->getOpcode()) {
1309 case AArch64::STRSui:
1310 case AArch64::STRDui:
1311 case AArch64::STRQui:
1312 case AArch64::STRXui:
1313 case AArch64::STRWui:
1314 case AArch64::LDRSui:
1315 case AArch64::LDRDui:
1316 case AArch64::LDRQui:
1317 case AArch64::LDRXui:
1318 case AArch64::LDRWui:
1319 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1321 BaseReg = LdSt->getOperand(1).getReg();
1322 MachineFunction &MF = *LdSt->getParent()->getParent();
1323 unsigned Width = getRegClass(LdSt->getDesc(), 0, TRI, MF)->getSize();
1324 Offset = LdSt->getOperand(2).getImm() * Width;
1329 bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
1330 MachineInstr *LdSt, unsigned &BaseReg, int &Offset, int &Width,
1331 const TargetRegisterInfo *TRI) const {
1332 // Handle only loads/stores with base register followed by immediate offset.
1333 if (LdSt->getNumOperands() != 3)
1335 if (!LdSt->getOperand(1).isReg() || !LdSt->getOperand(2).isImm())
1338 // Offset is calculated as the immediate operand multiplied by the scaling factor.
1339 // Unscaled instructions have scaling factor set to 1.
1341 switch (LdSt->getOpcode()) {
1344 case AArch64::LDURQi:
1345 case AArch64::STURQi:
1349 case AArch64::LDURXi:
1350 case AArch64::LDURDi:
1351 case AArch64::STURXi:
1352 case AArch64::STURDi:
1356 case AArch64::LDURWi:
1357 case AArch64::LDURSi:
1358 case AArch64::LDURSWi:
1359 case AArch64::STURWi:
1360 case AArch64::STURSi:
1364 case AArch64::LDURHi:
1365 case AArch64::LDURHHi:
1366 case AArch64::LDURSHXi:
1367 case AArch64::LDURSHWi:
1368 case AArch64::STURHi:
1369 case AArch64::STURHHi:
1373 case AArch64::LDURBi:
1374 case AArch64::LDURBBi:
1375 case AArch64::LDURSBXi:
1376 case AArch64::LDURSBWi:
1377 case AArch64::STURBi:
1378 case AArch64::STURBBi:
1382 case AArch64::LDRXui:
1383 case AArch64::STRXui:
1386 case AArch64::LDRWui:
1387 case AArch64::STRWui:
1390 case AArch64::LDRBui:
1391 case AArch64::STRBui:
1394 case AArch64::LDRHui:
1395 case AArch64::STRHui:
1398 case AArch64::LDRSui:
1399 case AArch64::STRSui:
1402 case AArch64::LDRDui:
1403 case AArch64::STRDui:
1406 case AArch64::LDRQui:
1407 case AArch64::STRQui:
1410 case AArch64::LDRBBui:
1411 case AArch64::STRBBui:
1414 case AArch64::LDRHHui:
1415 case AArch64::STRHHui:
1420 BaseReg = LdSt->getOperand(1).getReg();
1421 Offset = LdSt->getOperand(2).getImm() * Scale;
1425 /// Detect opportunities for ldp/stp formation.
1427 /// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
1428 bool AArch64InstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt,
1429 MachineInstr *SecondLdSt,
1430 unsigned NumLoads) const {
1431 // Only cluster up to a single pair.
1434 if (FirstLdSt->getOpcode() != SecondLdSt->getOpcode())
1436 // getMemOpBaseRegImmOfs guarantees that oper 2 isImm.
1437 unsigned Ofs1 = FirstLdSt->getOperand(2).getImm();
1438 // Allow 6 bits of positive range.
1441 // The caller should already have ordered First/SecondLdSt by offset.
1442 unsigned Ofs2 = SecondLdSt->getOperand(2).getImm();
1443 return Ofs1 + 1 == Ofs2;
1446 bool AArch64InstrInfo::shouldScheduleAdjacent(MachineInstr *First,
1447 MachineInstr *Second) const {
1448 if (Subtarget.isCyclone()) {
1449 // Cyclone can fuse CMN, CMP, TST followed by Bcc.
1450 unsigned SecondOpcode = Second->getOpcode();
1451 if (SecondOpcode == AArch64::Bcc) {
1452 switch (First->getOpcode()) {
1455 case AArch64::SUBSWri:
1456 case AArch64::ADDSWri:
1457 case AArch64::ANDSWri:
1458 case AArch64::SUBSXri:
1459 case AArch64::ADDSXri:
1460 case AArch64::ANDSXri:
1464 // Cyclone B0 also supports ALU operations followed by CBZ/CBNZ.
1465 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1466 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1467 switch (First->getOpcode()) {
1470 case AArch64::ADDWri:
1471 case AArch64::ADDXri:
1472 case AArch64::ANDWri:
1473 case AArch64::ANDXri:
1474 case AArch64::EORWri:
1475 case AArch64::EORXri:
1476 case AArch64::ORRWri:
1477 case AArch64::ORRXri:
1478 case AArch64::SUBWri:
1479 case AArch64::SUBXri:
1487 MachineInstr *AArch64InstrInfo::emitFrameIndexDebugValue(
1488 MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var,
1489 const MDNode *Expr, DebugLoc DL) const {
1490 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AArch64::DBG_VALUE))
1491 .addFrameIndex(FrameIx)
1499 static const MachineInstrBuilder &AddSubReg(const MachineInstrBuilder &MIB,
1500 unsigned Reg, unsigned SubIdx,
1502 const TargetRegisterInfo *TRI) {
1504 return MIB.addReg(Reg, State);
1506 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1507 return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
1508 return MIB.addReg(Reg, State, SubIdx);
1511 static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg,
1513 // We really want the positive remainder mod 32 here, that happens to be
1514 // easily obtainable with a mask.
1515 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
1518 void AArch64InstrInfo::copyPhysRegTuple(
1519 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, DebugLoc DL,
1520 unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode,
1521 llvm::ArrayRef<unsigned> Indices) const {
1522 assert(Subtarget.hasNEON() &&
1523 "Unexpected register copy without NEON");
1524 const TargetRegisterInfo *TRI = &getRegisterInfo();
1525 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
1526 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
1527 unsigned NumRegs = Indices.size();
1529 int SubReg = 0, End = NumRegs, Incr = 1;
1530 if (forwardCopyWillClobberTuple(DestEncoding, SrcEncoding, NumRegs)) {
1531 SubReg = NumRegs - 1;
1536 for (; SubReg != End; SubReg += Incr) {
1537 const MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opcode));
1538 AddSubReg(MIB, DestReg, Indices[SubReg], RegState::Define, TRI);
1539 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
1540 AddSubReg(MIB, SrcReg, Indices[SubReg], getKillRegState(KillSrc), TRI);
1544 void AArch64InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1545 MachineBasicBlock::iterator I, DebugLoc DL,
1546 unsigned DestReg, unsigned SrcReg,
1547 bool KillSrc) const {
1548 if (AArch64::GPR32spRegClass.contains(DestReg) &&
1549 (AArch64::GPR32spRegClass.contains(SrcReg) || SrcReg == AArch64::WZR)) {
1550 const TargetRegisterInfo *TRI = &getRegisterInfo();
1552 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
1553 // If either operand is WSP, expand to ADD #0.
1554 if (Subtarget.hasZeroCycleRegMove()) {
1555 // Cyclone recognizes "ADD Xd, Xn, #0" as a zero-cycle register move.
1556 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1557 &AArch64::GPR64spRegClass);
1558 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1559 &AArch64::GPR64spRegClass);
1560 // This instruction is reading and writing X registers. This may upset
1561 // the register scavenger and machine verifier, so we need to indicate
1562 // that we are reading an undefined value from SrcRegX, but a proper
1563 // value from SrcReg.
1564 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestRegX)
1565 .addReg(SrcRegX, RegState::Undef)
1567 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1568 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1570 BuildMI(MBB, I, DL, get(AArch64::ADDWri), DestReg)
1571 .addReg(SrcReg, getKillRegState(KillSrc))
1573 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1575 } else if (SrcReg == AArch64::WZR && Subtarget.hasZeroCycleZeroing()) {
1576 BuildMI(MBB, I, DL, get(AArch64::MOVZWi), DestReg).addImm(0).addImm(
1577 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1579 if (Subtarget.hasZeroCycleRegMove()) {
1580 // Cyclone recognizes "ORR Xd, XZR, Xm" as a zero-cycle register move.
1581 unsigned DestRegX = TRI->getMatchingSuperReg(DestReg, AArch64::sub_32,
1582 &AArch64::GPR64spRegClass);
1583 unsigned SrcRegX = TRI->getMatchingSuperReg(SrcReg, AArch64::sub_32,
1584 &AArch64::GPR64spRegClass);
1585 // This instruction is reading and writing X registers. This may upset
1586 // the register scavenger and machine verifier, so we need to indicate
1587 // that we are reading an undefined value from SrcRegX, but a proper
1588 // value from SrcReg.
1589 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestRegX)
1590 .addReg(AArch64::XZR)
1591 .addReg(SrcRegX, RegState::Undef)
1592 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
1594 // Otherwise, expand to ORR WZR.
1595 BuildMI(MBB, I, DL, get(AArch64::ORRWrr), DestReg)
1596 .addReg(AArch64::WZR)
1597 .addReg(SrcReg, getKillRegState(KillSrc));
1603 if (AArch64::GPR64spRegClass.contains(DestReg) &&
1604 (AArch64::GPR64spRegClass.contains(SrcReg) || SrcReg == AArch64::XZR)) {
1605 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
1606 // If either operand is SP, expand to ADD #0.
1607 BuildMI(MBB, I, DL, get(AArch64::ADDXri), DestReg)
1608 .addReg(SrcReg, getKillRegState(KillSrc))
1610 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1611 } else if (SrcReg == AArch64::XZR && Subtarget.hasZeroCycleZeroing()) {
1612 BuildMI(MBB, I, DL, get(AArch64::MOVZXi), DestReg).addImm(0).addImm(
1613 AArch64_AM::getShifterImm(AArch64_AM::LSL, 0));
1615 // Otherwise, expand to ORR XZR.
1616 BuildMI(MBB, I, DL, get(AArch64::ORRXrr), DestReg)
1617 .addReg(AArch64::XZR)
1618 .addReg(SrcReg, getKillRegState(KillSrc));
1623 // Copy a DDDD register quad by copying the individual sub-registers.
1624 if (AArch64::DDDDRegClass.contains(DestReg) &&
1625 AArch64::DDDDRegClass.contains(SrcReg)) {
1626 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1627 AArch64::dsub2, AArch64::dsub3 };
1628 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1633 // Copy a DDD register triple by copying the individual sub-registers.
1634 if (AArch64::DDDRegClass.contains(DestReg) &&
1635 AArch64::DDDRegClass.contains(SrcReg)) {
1636 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
1638 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1643 // Copy a DD register pair by copying the individual sub-registers.
1644 if (AArch64::DDRegClass.contains(DestReg) &&
1645 AArch64::DDRegClass.contains(SrcReg)) {
1646 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
1647 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv8i8,
1652 // Copy a QQQQ register quad by copying the individual sub-registers.
1653 if (AArch64::QQQQRegClass.contains(DestReg) &&
1654 AArch64::QQQQRegClass.contains(SrcReg)) {
1655 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1656 AArch64::qsub2, AArch64::qsub3 };
1657 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1662 // Copy a QQQ register triple by copying the individual sub-registers.
1663 if (AArch64::QQQRegClass.contains(DestReg) &&
1664 AArch64::QQQRegClass.contains(SrcReg)) {
1665 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
1667 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1672 // Copy a QQ register pair by copying the individual sub-registers.
1673 if (AArch64::QQRegClass.contains(DestReg) &&
1674 AArch64::QQRegClass.contains(SrcReg)) {
1675 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
1676 copyPhysRegTuple(MBB, I, DL, DestReg, SrcReg, KillSrc, AArch64::ORRv16i8,
1681 if (AArch64::FPR128RegClass.contains(DestReg) &&
1682 AArch64::FPR128RegClass.contains(SrcReg)) {
1683 if(Subtarget.hasNEON()) {
1684 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1686 .addReg(SrcReg, getKillRegState(KillSrc));
1688 BuildMI(MBB, I, DL, get(AArch64::STRQpre))
1689 .addReg(AArch64::SP, RegState::Define)
1690 .addReg(SrcReg, getKillRegState(KillSrc))
1691 .addReg(AArch64::SP)
1693 BuildMI(MBB, I, DL, get(AArch64::LDRQpre))
1694 .addReg(AArch64::SP, RegState::Define)
1695 .addReg(DestReg, RegState::Define)
1696 .addReg(AArch64::SP)
1702 if (AArch64::FPR64RegClass.contains(DestReg) &&
1703 AArch64::FPR64RegClass.contains(SrcReg)) {
1704 if(Subtarget.hasNEON()) {
1705 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
1706 &AArch64::FPR128RegClass);
1707 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
1708 &AArch64::FPR128RegClass);
1709 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1711 .addReg(SrcReg, getKillRegState(KillSrc));
1713 BuildMI(MBB, I, DL, get(AArch64::FMOVDr), DestReg)
1714 .addReg(SrcReg, getKillRegState(KillSrc));
1719 if (AArch64::FPR32RegClass.contains(DestReg) &&
1720 AArch64::FPR32RegClass.contains(SrcReg)) {
1721 if(Subtarget.hasNEON()) {
1722 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
1723 &AArch64::FPR128RegClass);
1724 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
1725 &AArch64::FPR128RegClass);
1726 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1728 .addReg(SrcReg, getKillRegState(KillSrc));
1730 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1731 .addReg(SrcReg, getKillRegState(KillSrc));
1736 if (AArch64::FPR16RegClass.contains(DestReg) &&
1737 AArch64::FPR16RegClass.contains(SrcReg)) {
1738 if(Subtarget.hasNEON()) {
1739 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1740 &AArch64::FPR128RegClass);
1741 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1742 &AArch64::FPR128RegClass);
1743 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1745 .addReg(SrcReg, getKillRegState(KillSrc));
1747 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
1748 &AArch64::FPR32RegClass);
1749 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
1750 &AArch64::FPR32RegClass);
1751 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1752 .addReg(SrcReg, getKillRegState(KillSrc));
1757 if (AArch64::FPR8RegClass.contains(DestReg) &&
1758 AArch64::FPR8RegClass.contains(SrcReg)) {
1759 if(Subtarget.hasNEON()) {
1760 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1761 &AArch64::FPR128RegClass);
1762 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1763 &AArch64::FPR128RegClass);
1764 BuildMI(MBB, I, DL, get(AArch64::ORRv16i8), DestReg)
1766 .addReg(SrcReg, getKillRegState(KillSrc));
1768 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
1769 &AArch64::FPR32RegClass);
1770 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
1771 &AArch64::FPR32RegClass);
1772 BuildMI(MBB, I, DL, get(AArch64::FMOVSr), DestReg)
1773 .addReg(SrcReg, getKillRegState(KillSrc));
1778 // Copies between GPR64 and FPR64.
1779 if (AArch64::FPR64RegClass.contains(DestReg) &&
1780 AArch64::GPR64RegClass.contains(SrcReg)) {
1781 BuildMI(MBB, I, DL, get(AArch64::FMOVXDr), DestReg)
1782 .addReg(SrcReg, getKillRegState(KillSrc));
1785 if (AArch64::GPR64RegClass.contains(DestReg) &&
1786 AArch64::FPR64RegClass.contains(SrcReg)) {
1787 BuildMI(MBB, I, DL, get(AArch64::FMOVDXr), DestReg)
1788 .addReg(SrcReg, getKillRegState(KillSrc));
1791 // Copies between GPR32 and FPR32.
1792 if (AArch64::FPR32RegClass.contains(DestReg) &&
1793 AArch64::GPR32RegClass.contains(SrcReg)) {
1794 BuildMI(MBB, I, DL, get(AArch64::FMOVWSr), DestReg)
1795 .addReg(SrcReg, getKillRegState(KillSrc));
1798 if (AArch64::GPR32RegClass.contains(DestReg) &&
1799 AArch64::FPR32RegClass.contains(SrcReg)) {
1800 BuildMI(MBB, I, DL, get(AArch64::FMOVSWr), DestReg)
1801 .addReg(SrcReg, getKillRegState(KillSrc));
1805 if (DestReg == AArch64::NZCV) {
1806 assert(AArch64::GPR64RegClass.contains(SrcReg) && "Invalid NZCV copy");
1807 BuildMI(MBB, I, DL, get(AArch64::MSR))
1808 .addImm(AArch64SysReg::NZCV)
1809 .addReg(SrcReg, getKillRegState(KillSrc))
1810 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define);
1814 if (SrcReg == AArch64::NZCV) {
1815 assert(AArch64::GPR64RegClass.contains(DestReg) && "Invalid NZCV copy");
1816 BuildMI(MBB, I, DL, get(AArch64::MRS))
1818 .addImm(AArch64SysReg::NZCV)
1819 .addReg(AArch64::NZCV, RegState::Implicit | getKillRegState(KillSrc));
1823 llvm_unreachable("unimplemented reg-to-reg copy");
1826 void AArch64InstrInfo::storeRegToStackSlot(
1827 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
1828 bool isKill, int FI, const TargetRegisterClass *RC,
1829 const TargetRegisterInfo *TRI) const {
1831 if (MBBI != MBB.end())
1832 DL = MBBI->getDebugLoc();
1833 MachineFunction &MF = *MBB.getParent();
1834 MachineFrameInfo &MFI = *MF.getFrameInfo();
1835 unsigned Align = MFI.getObjectAlignment(FI);
1837 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1838 MachineMemOperand *MMO = MF.getMachineMemOperand(
1839 PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
1842 switch (RC->getSize()) {
1844 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1845 Opc = AArch64::STRBui;
1848 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1849 Opc = AArch64::STRHui;
1852 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1853 Opc = AArch64::STRWui;
1854 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1855 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR32RegClass);
1857 assert(SrcReg != AArch64::WSP);
1858 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1859 Opc = AArch64::STRSui;
1862 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1863 Opc = AArch64::STRXui;
1864 if (TargetRegisterInfo::isVirtualRegister(SrcReg))
1865 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
1867 assert(SrcReg != AArch64::SP);
1868 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1869 Opc = AArch64::STRDui;
1872 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1873 Opc = AArch64::STRQui;
1874 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1875 assert(Subtarget.hasNEON() &&
1876 "Unexpected register store without NEON");
1877 Opc = AArch64::ST1Twov1d, Offset = false;
1881 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1882 assert(Subtarget.hasNEON() &&
1883 "Unexpected register store without NEON");
1884 Opc = AArch64::ST1Threev1d, Offset = false;
1888 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1889 assert(Subtarget.hasNEON() &&
1890 "Unexpected register store without NEON");
1891 Opc = AArch64::ST1Fourv1d, Offset = false;
1892 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1893 assert(Subtarget.hasNEON() &&
1894 "Unexpected register store without NEON");
1895 Opc = AArch64::ST1Twov2d, Offset = false;
1899 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1900 assert(Subtarget.hasNEON() &&
1901 "Unexpected register store without NEON");
1902 Opc = AArch64::ST1Threev2d, Offset = false;
1906 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
1907 assert(Subtarget.hasNEON() &&
1908 "Unexpected register store without NEON");
1909 Opc = AArch64::ST1Fourv2d, Offset = false;
1913 assert(Opc && "Unknown register class");
1915 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
1916 .addReg(SrcReg, getKillRegState(isKill))
1921 MI.addMemOperand(MMO);
1924 void AArch64InstrInfo::loadRegFromStackSlot(
1925 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
1926 int FI, const TargetRegisterClass *RC,
1927 const TargetRegisterInfo *TRI) const {
1929 if (MBBI != MBB.end())
1930 DL = MBBI->getDebugLoc();
1931 MachineFunction &MF = *MBB.getParent();
1932 MachineFrameInfo &MFI = *MF.getFrameInfo();
1933 unsigned Align = MFI.getObjectAlignment(FI);
1934 MachinePointerInfo PtrInfo(PseudoSourceValue::getFixedStack(FI));
1935 MachineMemOperand *MMO = MF.getMachineMemOperand(
1936 PtrInfo, MachineMemOperand::MOLoad, MFI.getObjectSize(FI), Align);
1940 switch (RC->getSize()) {
1942 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
1943 Opc = AArch64::LDRBui;
1946 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
1947 Opc = AArch64::LDRHui;
1950 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
1951 Opc = AArch64::LDRWui;
1952 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1953 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR32RegClass);
1955 assert(DestReg != AArch64::WSP);
1956 } else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
1957 Opc = AArch64::LDRSui;
1960 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
1961 Opc = AArch64::LDRXui;
1962 if (TargetRegisterInfo::isVirtualRegister(DestReg))
1963 MF.getRegInfo().constrainRegClass(DestReg, &AArch64::GPR64RegClass);
1965 assert(DestReg != AArch64::SP);
1966 } else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
1967 Opc = AArch64::LDRDui;
1970 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
1971 Opc = AArch64::LDRQui;
1972 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
1973 assert(Subtarget.hasNEON() &&
1974 "Unexpected register load without NEON");
1975 Opc = AArch64::LD1Twov1d, Offset = false;
1979 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
1980 assert(Subtarget.hasNEON() &&
1981 "Unexpected register load without NEON");
1982 Opc = AArch64::LD1Threev1d, Offset = false;
1986 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
1987 assert(Subtarget.hasNEON() &&
1988 "Unexpected register load without NEON");
1989 Opc = AArch64::LD1Fourv1d, Offset = false;
1990 } else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
1991 assert(Subtarget.hasNEON() &&
1992 "Unexpected register load without NEON");
1993 Opc = AArch64::LD1Twov2d, Offset = false;
1997 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
1998 assert(Subtarget.hasNEON() &&
1999 "Unexpected register load without NEON");
2000 Opc = AArch64::LD1Threev2d, Offset = false;
2004 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2005 assert(Subtarget.hasNEON() &&
2006 "Unexpected register load without NEON");
2007 Opc = AArch64::LD1Fourv2d, Offset = false;
2011 assert(Opc && "Unknown register class");
2013 const MachineInstrBuilder MI = BuildMI(MBB, MBBI, DL, get(Opc))
2014 .addReg(DestReg, getDefRegState(true))
2018 MI.addMemOperand(MMO);
2021 void llvm::emitFrameOffset(MachineBasicBlock &MBB,
2022 MachineBasicBlock::iterator MBBI, DebugLoc DL,
2023 unsigned DestReg, unsigned SrcReg, int Offset,
2024 const TargetInstrInfo *TII,
2025 MachineInstr::MIFlag Flag, bool SetNZCV) {
2026 if (DestReg == SrcReg && Offset == 0)
2029 bool isSub = Offset < 0;
2033 // FIXME: If the offset won't fit in 24-bits, compute the offset into a
2034 // scratch register. If DestReg is a virtual register, use it as the
2035 // scratch register; otherwise, create a new virtual register (to be
2036 // replaced by the scavenger at the end of PEI). That case can be optimized
2037 // slightly if DestReg is SP which is always 16-byte aligned, so the scratch
2038 // register can be loaded with offset%8 and the add/sub can use an extending
2039 // instruction with LSL#3.
2040 // Currently the function handles any offsets but generates a poor sequence
2042 // assert(Offset < (1 << 24) && "unimplemented reg plus immediate");
2046 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2048 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2049 const unsigned MaxEncoding = 0xfff;
2050 const unsigned ShiftSize = 12;
2051 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2052 while (((unsigned)Offset) >= (1 << ShiftSize)) {
2054 if (((unsigned)Offset) > MaxEncodableValue) {
2055 ThisVal = MaxEncodableValue;
2057 ThisVal = Offset & MaxEncodableValue;
2059 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2060 "Encoding cannot handle value that big");
2061 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2063 .addImm(ThisVal >> ShiftSize)
2064 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftSize))
2072 BuildMI(MBB, MBBI, DL, TII->get(Opc), DestReg)
2075 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
2079 MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
2080 MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
2081 MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
2082 // This is a bit of a hack. Consider this instruction:
2084 // %vreg0<def> = COPY %SP; GPR64all:%vreg0
2086 // We explicitly chose GPR64all for the virtual register so such a copy might
2087 // be eliminated by RegisterCoalescer. However, that may not be possible, and
2088 // %vreg0 may even spill. We can't spill %SP, and since it is in the GPR64all
2089 // register class, TargetInstrInfo::foldMemoryOperand() is going to try.
2091 // To prevent that, we are going to constrain the %vreg0 register class here.
2093 // <rdar://problem/11522048>
2096 unsigned DstReg = MI->getOperand(0).getReg();
2097 unsigned SrcReg = MI->getOperand(1).getReg();
2098 if (SrcReg == AArch64::SP &&
2099 TargetRegisterInfo::isVirtualRegister(DstReg)) {
2100 MF.getRegInfo().constrainRegClass(DstReg, &AArch64::GPR64RegClass);
2103 if (DstReg == AArch64::SP &&
2104 TargetRegisterInfo::isVirtualRegister(SrcReg)) {
2105 MF.getRegInfo().constrainRegClass(SrcReg, &AArch64::GPR64RegClass);
2114 int llvm::isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset,
2115 bool *OutUseUnscaledOp,
2116 unsigned *OutUnscaledOp,
2117 int *EmittableOffset) {
2119 bool IsSigned = false;
2120 // The ImmIdx should be changed case by case if it is not 2.
2121 unsigned ImmIdx = 2;
2122 unsigned UnscaledOp = 0;
2123 // Set output values in case of early exit.
2124 if (EmittableOffset)
2125 *EmittableOffset = 0;
2126 if (OutUseUnscaledOp)
2127 *OutUseUnscaledOp = false;
2130 switch (MI.getOpcode()) {
2132 llvm_unreachable("unhandled opcode in rewriteAArch64FrameIndex");
2133 // Vector spills/fills can't take an immediate offset.
2134 case AArch64::LD1Twov2d:
2135 case AArch64::LD1Threev2d:
2136 case AArch64::LD1Fourv2d:
2137 case AArch64::LD1Twov1d:
2138 case AArch64::LD1Threev1d:
2139 case AArch64::LD1Fourv1d:
2140 case AArch64::ST1Twov2d:
2141 case AArch64::ST1Threev2d:
2142 case AArch64::ST1Fourv2d:
2143 case AArch64::ST1Twov1d:
2144 case AArch64::ST1Threev1d:
2145 case AArch64::ST1Fourv1d:
2146 return AArch64FrameOffsetCannotUpdate;
2147 case AArch64::PRFMui:
2149 UnscaledOp = AArch64::PRFUMi;
2151 case AArch64::LDRXui:
2153 UnscaledOp = AArch64::LDURXi;
2155 case AArch64::LDRWui:
2157 UnscaledOp = AArch64::LDURWi;
2159 case AArch64::LDRBui:
2161 UnscaledOp = AArch64::LDURBi;
2163 case AArch64::LDRHui:
2165 UnscaledOp = AArch64::LDURHi;
2167 case AArch64::LDRSui:
2169 UnscaledOp = AArch64::LDURSi;
2171 case AArch64::LDRDui:
2173 UnscaledOp = AArch64::LDURDi;
2175 case AArch64::LDRQui:
2177 UnscaledOp = AArch64::LDURQi;
2179 case AArch64::LDRBBui:
2181 UnscaledOp = AArch64::LDURBBi;
2183 case AArch64::LDRHHui:
2185 UnscaledOp = AArch64::LDURHHi;
2187 case AArch64::LDRSBXui:
2189 UnscaledOp = AArch64::LDURSBXi;
2191 case AArch64::LDRSBWui:
2193 UnscaledOp = AArch64::LDURSBWi;
2195 case AArch64::LDRSHXui:
2197 UnscaledOp = AArch64::LDURSHXi;
2199 case AArch64::LDRSHWui:
2201 UnscaledOp = AArch64::LDURSHWi;
2203 case AArch64::LDRSWui:
2205 UnscaledOp = AArch64::LDURSWi;
2208 case AArch64::STRXui:
2210 UnscaledOp = AArch64::STURXi;
2212 case AArch64::STRWui:
2214 UnscaledOp = AArch64::STURWi;
2216 case AArch64::STRBui:
2218 UnscaledOp = AArch64::STURBi;
2220 case AArch64::STRHui:
2222 UnscaledOp = AArch64::STURHi;
2224 case AArch64::STRSui:
2226 UnscaledOp = AArch64::STURSi;
2228 case AArch64::STRDui:
2230 UnscaledOp = AArch64::STURDi;
2232 case AArch64::STRQui:
2234 UnscaledOp = AArch64::STURQi;
2236 case AArch64::STRBBui:
2238 UnscaledOp = AArch64::STURBBi;
2240 case AArch64::STRHHui:
2242 UnscaledOp = AArch64::STURHHi;
2245 case AArch64::LDPXi:
2246 case AArch64::LDPDi:
2247 case AArch64::STPXi:
2248 case AArch64::STPDi:
2252 case AArch64::LDPQi:
2253 case AArch64::STPQi:
2257 case AArch64::LDPWi:
2258 case AArch64::LDPSi:
2259 case AArch64::STPWi:
2260 case AArch64::STPSi:
2265 case AArch64::LDURXi:
2266 case AArch64::LDURWi:
2267 case AArch64::LDURBi:
2268 case AArch64::LDURHi:
2269 case AArch64::LDURSi:
2270 case AArch64::LDURDi:
2271 case AArch64::LDURQi:
2272 case AArch64::LDURHHi:
2273 case AArch64::LDURBBi:
2274 case AArch64::LDURSBXi:
2275 case AArch64::LDURSBWi:
2276 case AArch64::LDURSHXi:
2277 case AArch64::LDURSHWi:
2278 case AArch64::LDURSWi:
2279 case AArch64::STURXi:
2280 case AArch64::STURWi:
2281 case AArch64::STURBi:
2282 case AArch64::STURHi:
2283 case AArch64::STURSi:
2284 case AArch64::STURDi:
2285 case AArch64::STURQi:
2286 case AArch64::STURBBi:
2287 case AArch64::STURHHi:
2292 Offset += MI.getOperand(ImmIdx).getImm() * Scale;
2294 bool useUnscaledOp = false;
2295 // If the offset doesn't match the scale, we rewrite the instruction to
2296 // use the unscaled instruction instead. Likewise, if we have a negative
2297 // offset (and have an unscaled op to use).
2298 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2299 useUnscaledOp = true;
2301 // Use an unscaled addressing mode if the instruction has a negative offset
2302 // (or if the instruction is already using an unscaled addressing mode).
2305 // ldp/stp instructions.
2308 } else if (UnscaledOp == 0 || useUnscaledOp) {
2318 // Attempt to fold address computation.
2319 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2320 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2321 if (Offset >= MinOff && Offset <= MaxOff) {
2322 if (EmittableOffset)
2323 *EmittableOffset = Offset;
2326 int NewOff = Offset < 0 ? MinOff : MaxOff;
2327 if (EmittableOffset)
2328 *EmittableOffset = NewOff;
2329 Offset = (Offset - NewOff) * Scale;
2331 if (OutUseUnscaledOp)
2332 *OutUseUnscaledOp = useUnscaledOp;
2334 *OutUnscaledOp = UnscaledOp;
2335 return AArch64FrameOffsetCanUpdate |
2336 (Offset == 0 ? AArch64FrameOffsetIsLegal : 0);
2339 bool llvm::rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
2340 unsigned FrameReg, int &Offset,
2341 const AArch64InstrInfo *TII) {
2342 unsigned Opcode = MI.getOpcode();
2343 unsigned ImmIdx = FrameRegIdx + 1;
2345 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
2346 Offset += MI.getOperand(ImmIdx).getImm();
2347 emitFrameOffset(*MI.getParent(), MI, MI.getDebugLoc(),
2348 MI.getOperand(0).getReg(), FrameReg, Offset, TII,
2349 MachineInstr::NoFlags, (Opcode == AArch64::ADDSXri));
2350 MI.eraseFromParent();
2356 unsigned UnscaledOp;
2358 int Status = isAArch64FrameOffsetLegal(MI, Offset, &UseUnscaledOp,
2359 &UnscaledOp, &NewOffset);
2360 if (Status & AArch64FrameOffsetCanUpdate) {
2361 if (Status & AArch64FrameOffsetIsLegal)
2362 // Replace the FrameIndex with FrameReg.
2363 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
2365 MI.setDesc(TII->get(UnscaledOp));
2367 MI.getOperand(ImmIdx).ChangeToImmediate(NewOffset);
2374 void AArch64InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
2375 NopInst.setOpcode(AArch64::HINT);
2376 NopInst.addOperand(MCOperand::createImm(0));
2378 /// useMachineCombiner - return true when a target supports MachineCombiner
2379 bool AArch64InstrInfo::useMachineCombiner() const {
2380 // AArch64 supports the combiner
2384 // True when Opc sets flag
2385 static bool isCombineInstrSettingFlag(unsigned Opc) {
2387 case AArch64::ADDSWrr:
2388 case AArch64::ADDSWri:
2389 case AArch64::ADDSXrr:
2390 case AArch64::ADDSXri:
2391 case AArch64::SUBSWrr:
2392 case AArch64::SUBSXrr:
2393 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2394 case AArch64::SUBSWri:
2395 case AArch64::SUBSXri:
2403 // 32b Opcodes that can be combined with a MUL
2404 static bool isCombineInstrCandidate32(unsigned Opc) {
2406 case AArch64::ADDWrr:
2407 case AArch64::ADDWri:
2408 case AArch64::SUBWrr:
2409 case AArch64::ADDSWrr:
2410 case AArch64::ADDSWri:
2411 case AArch64::SUBSWrr:
2412 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2413 case AArch64::SUBWri:
2414 case AArch64::SUBSWri:
2422 // 64b Opcodes that can be combined with a MUL
2423 static bool isCombineInstrCandidate64(unsigned Opc) {
2425 case AArch64::ADDXrr:
2426 case AArch64::ADDXri:
2427 case AArch64::SUBXrr:
2428 case AArch64::ADDSXrr:
2429 case AArch64::ADDSXri:
2430 case AArch64::SUBSXrr:
2431 // Note: MSUB Wd,Wn,Wm,Wi -> Wd = Wi - WnxWm, not Wd=WnxWm - Wi.
2432 case AArch64::SUBXri:
2433 case AArch64::SUBSXri:
2441 // Opcodes that can be combined with a MUL
2442 static bool isCombineInstrCandidate(unsigned Opc) {
2443 return (isCombineInstrCandidate32(Opc) || isCombineInstrCandidate64(Opc));
2446 static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO,
2447 unsigned MulOpc, unsigned ZeroReg) {
2448 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2449 MachineInstr *MI = nullptr;
2450 // We need a virtual register definition.
2451 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))
2452 MI = MRI.getUniqueVRegDef(MO.getReg());
2453 // And it needs to be in the trace (otherwise, it won't have a depth).
2454 if (!MI || MI->getParent() != &MBB || (unsigned)MI->getOpcode() != MulOpc)
2457 assert(MI->getNumOperands() >= 4 && MI->getOperand(0).isReg() &&
2458 MI->getOperand(1).isReg() && MI->getOperand(2).isReg() &&
2459 MI->getOperand(3).isReg() && "MAdd/MSub must have a least 4 regs");
2461 // The third input reg must be zero.
2462 if (MI->getOperand(3).getReg() != ZeroReg)
2465 // Must only used by the user we combine with.
2466 if (!MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
2472 /// Return true when there is potentially a faster code sequence
2473 /// for an instruction chain ending in \p Root. All potential patterns are
2475 /// in the \p Pattern vector. Pattern should be sorted in priority order since
2476 /// the pattern evaluator stops checking as soon as it finds a faster sequence.
2478 bool AArch64InstrInfo::getMachineCombinerPatterns(
2480 SmallVectorImpl<MachineCombinerPattern::MC_PATTERN> &Patterns) const {
2481 unsigned Opc = Root.getOpcode();
2482 MachineBasicBlock &MBB = *Root.getParent();
2485 if (!isCombineInstrCandidate(Opc))
2487 if (isCombineInstrSettingFlag(Opc)) {
2488 int Cmp_NZCV = Root.findRegisterDefOperandIdx(AArch64::NZCV, true);
2489 // When NZCV is live bail out.
2492 unsigned NewOpc = convertFlagSettingOpcode(&Root);
2493 // When opcode can't change bail out.
2494 // CHECKME: do we miss any cases for opcode conversion?
2503 case AArch64::ADDWrr:
2504 assert(Root.getOperand(1).isReg() && Root.getOperand(2).isReg() &&
2505 "ADDWrr does not have register operands");
2506 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2508 Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP1);
2511 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2513 Patterns.push_back(MachineCombinerPattern::MC_MULADDW_OP2);
2517 case AArch64::ADDXrr:
2518 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2520 Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP1);
2523 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2525 Patterns.push_back(MachineCombinerPattern::MC_MULADDX_OP2);
2529 case AArch64::SUBWrr:
2530 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2532 Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP1);
2535 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDWrrr,
2537 Patterns.push_back(MachineCombinerPattern::MC_MULSUBW_OP2);
2541 case AArch64::SUBXrr:
2542 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2544 Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP1);
2547 if (canCombineWithMUL(MBB, Root.getOperand(2), AArch64::MADDXrrr,
2549 Patterns.push_back(MachineCombinerPattern::MC_MULSUBX_OP2);
2553 case AArch64::ADDWri:
2554 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2556 Patterns.push_back(MachineCombinerPattern::MC_MULADDWI_OP1);
2560 case AArch64::ADDXri:
2561 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2563 Patterns.push_back(MachineCombinerPattern::MC_MULADDXI_OP1);
2567 case AArch64::SUBWri:
2568 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDWrrr,
2570 Patterns.push_back(MachineCombinerPattern::MC_MULSUBWI_OP1);
2574 case AArch64::SUBXri:
2575 if (canCombineWithMUL(MBB, Root.getOperand(1), AArch64::MADDXrrr,
2577 Patterns.push_back(MachineCombinerPattern::MC_MULSUBXI_OP1);
2585 /// genMadd - Generate madd instruction and combine mul and add.
2589 /// ==> MADD R,A,B,C
2590 /// \param Root is the ADD instruction
2591 /// \param [out] InsInstrs is a vector of machine instructions and will
2592 /// contain the generated madd instruction
2593 /// \param IdxMulOpd is index of operand in Root that is the result of
2594 /// the MUL. In the example above IdxMulOpd is 1.
2595 /// \param MaddOpc the opcode fo the madd instruction
2596 static MachineInstr *genMadd(MachineFunction &MF, MachineRegisterInfo &MRI,
2597 const TargetInstrInfo *TII, MachineInstr &Root,
2598 SmallVectorImpl<MachineInstr *> &InsInstrs,
2599 unsigned IdxMulOpd, unsigned MaddOpc,
2600 const TargetRegisterClass *RC) {
2601 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2603 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
2604 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2605 unsigned ResultReg = Root.getOperand(0).getReg();
2606 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2607 bool Src0IsKill = MUL->getOperand(1).isKill();
2608 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2609 bool Src1IsKill = MUL->getOperand(2).isKill();
2610 unsigned SrcReg2 = Root.getOperand(IdxOtherOpd).getReg();
2611 bool Src2IsKill = Root.getOperand(IdxOtherOpd).isKill();
2613 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2614 MRI.constrainRegClass(ResultReg, RC);
2615 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2616 MRI.constrainRegClass(SrcReg0, RC);
2617 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2618 MRI.constrainRegClass(SrcReg1, RC);
2619 if (TargetRegisterInfo::isVirtualRegister(SrcReg2))
2620 MRI.constrainRegClass(SrcReg2, RC);
2622 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2624 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2625 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2626 .addReg(SrcReg2, getKillRegState(Src2IsKill));
2628 InsInstrs.push_back(MIB);
2632 /// genMaddR - Generate madd instruction and combine mul and add using
2633 /// an extra virtual register
2634 /// Example - an ADD intermediate needs to be stored in a register:
2637 /// ==> ORR V, ZR, Imm
2638 /// ==> MADD R,A,B,V
2639 /// \param Root is the ADD instruction
2640 /// \param [out] InsInstrs is a vector of machine instructions and will
2641 /// contain the generated madd instruction
2642 /// \param IdxMulOpd is index of operand in Root that is the result of
2643 /// the MUL. In the example above IdxMulOpd is 1.
2644 /// \param MaddOpc the opcode fo the madd instruction
2645 /// \param VR is a virtual register that holds the value of an ADD operand
2646 /// (V in the example above).
2647 static MachineInstr *genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI,
2648 const TargetInstrInfo *TII, MachineInstr &Root,
2649 SmallVectorImpl<MachineInstr *> &InsInstrs,
2650 unsigned IdxMulOpd, unsigned MaddOpc,
2651 unsigned VR, const TargetRegisterClass *RC) {
2652 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
2654 MachineInstr *MUL = MRI.getUniqueVRegDef(Root.getOperand(IdxMulOpd).getReg());
2655 unsigned ResultReg = Root.getOperand(0).getReg();
2656 unsigned SrcReg0 = MUL->getOperand(1).getReg();
2657 bool Src0IsKill = MUL->getOperand(1).isKill();
2658 unsigned SrcReg1 = MUL->getOperand(2).getReg();
2659 bool Src1IsKill = MUL->getOperand(2).isKill();
2661 if (TargetRegisterInfo::isVirtualRegister(ResultReg))
2662 MRI.constrainRegClass(ResultReg, RC);
2663 if (TargetRegisterInfo::isVirtualRegister(SrcReg0))
2664 MRI.constrainRegClass(SrcReg0, RC);
2665 if (TargetRegisterInfo::isVirtualRegister(SrcReg1))
2666 MRI.constrainRegClass(SrcReg1, RC);
2667 if (TargetRegisterInfo::isVirtualRegister(VR))
2668 MRI.constrainRegClass(VR, RC);
2670 MachineInstrBuilder MIB = BuildMI(MF, Root.getDebugLoc(), TII->get(MaddOpc),
2672 .addReg(SrcReg0, getKillRegState(Src0IsKill))
2673 .addReg(SrcReg1, getKillRegState(Src1IsKill))
2676 InsInstrs.push_back(MIB);
2680 /// When getMachineCombinerPatterns() finds potential patterns,
2681 /// this function generates the instructions that could replace the
2682 /// original code sequence
2683 void AArch64InstrInfo::genAlternativeCodeSequence(
2684 MachineInstr &Root, MachineCombinerPattern::MC_PATTERN Pattern,
2685 SmallVectorImpl<MachineInstr *> &InsInstrs,
2686 SmallVectorImpl<MachineInstr *> &DelInstrs,
2687 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
2688 MachineBasicBlock &MBB = *Root.getParent();
2689 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
2690 MachineFunction &MF = *MBB.getParent();
2691 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
2694 const TargetRegisterClass *RC;
2700 case MachineCombinerPattern::MC_MULADDW_OP1:
2701 case MachineCombinerPattern::MC_MULADDX_OP1:
2705 // --- Create(MADD);
2706 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP1) {
2707 Opc = AArch64::MADDWrrr;
2708 RC = &AArch64::GPR32RegClass;
2710 Opc = AArch64::MADDXrrr;
2711 RC = &AArch64::GPR64RegClass;
2713 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 1, Opc, RC);
2715 case MachineCombinerPattern::MC_MULADDW_OP2:
2716 case MachineCombinerPattern::MC_MULADDX_OP2:
2720 // --- Create(MADD);
2721 if (Pattern == MachineCombinerPattern::MC_MULADDW_OP2) {
2722 Opc = AArch64::MADDWrrr;
2723 RC = &AArch64::GPR32RegClass;
2725 Opc = AArch64::MADDXrrr;
2726 RC = &AArch64::GPR64RegClass;
2728 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2730 case MachineCombinerPattern::MC_MULADDWI_OP1:
2731 case MachineCombinerPattern::MC_MULADDXI_OP1: {
2734 // ==> ORR V, ZR, Imm
2736 // --- Create(MADD);
2737 const TargetRegisterClass *OrrRC;
2738 unsigned BitSize, OrrOpc, ZeroReg;
2739 if (Pattern == MachineCombinerPattern::MC_MULADDWI_OP1) {
2740 OrrOpc = AArch64::ORRWri;
2741 OrrRC = &AArch64::GPR32spRegClass;
2743 ZeroReg = AArch64::WZR;
2744 Opc = AArch64::MADDWrrr;
2745 RC = &AArch64::GPR32RegClass;
2747 OrrOpc = AArch64::ORRXri;
2748 OrrRC = &AArch64::GPR64spRegClass;
2750 ZeroReg = AArch64::XZR;
2751 Opc = AArch64::MADDXrrr;
2752 RC = &AArch64::GPR64RegClass;
2754 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2755 uint64_t Imm = Root.getOperand(2).getImm();
2757 if (Root.getOperand(3).isImm()) {
2758 unsigned Val = Root.getOperand(3).getImm();
2761 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
2763 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2764 MachineInstrBuilder MIB1 =
2765 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2768 InsInstrs.push_back(MIB1);
2769 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2770 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2774 case MachineCombinerPattern::MC_MULSUBW_OP1:
2775 case MachineCombinerPattern::MC_MULSUBX_OP1: {
2779 // ==> MADD R,A,B,V // = -C + A*B
2780 // --- Create(MADD);
2781 const TargetRegisterClass *SubRC;
2782 unsigned SubOpc, ZeroReg;
2783 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP1) {
2784 SubOpc = AArch64::SUBWrr;
2785 SubRC = &AArch64::GPR32spRegClass;
2786 ZeroReg = AArch64::WZR;
2787 Opc = AArch64::MADDWrrr;
2788 RC = &AArch64::GPR32RegClass;
2790 SubOpc = AArch64::SUBXrr;
2791 SubRC = &AArch64::GPR64spRegClass;
2792 ZeroReg = AArch64::XZR;
2793 Opc = AArch64::MADDXrrr;
2794 RC = &AArch64::GPR64RegClass;
2796 unsigned NewVR = MRI.createVirtualRegister(SubRC);
2798 MachineInstrBuilder MIB1 =
2799 BuildMI(MF, Root.getDebugLoc(), TII->get(SubOpc), NewVR)
2801 .addOperand(Root.getOperand(2));
2802 InsInstrs.push_back(MIB1);
2803 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2804 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2807 case MachineCombinerPattern::MC_MULSUBW_OP2:
2808 case MachineCombinerPattern::MC_MULSUBX_OP2:
2811 // ==> MSUB R,A,B,C (computes C - A*B)
2812 // --- Create(MSUB);
2813 if (Pattern == MachineCombinerPattern::MC_MULSUBW_OP2) {
2814 Opc = AArch64::MSUBWrrr;
2815 RC = &AArch64::GPR32RegClass;
2817 Opc = AArch64::MSUBXrrr;
2818 RC = &AArch64::GPR64RegClass;
2820 MUL = genMadd(MF, MRI, TII, Root, InsInstrs, 2, Opc, RC);
2822 case MachineCombinerPattern::MC_MULSUBWI_OP1:
2823 case MachineCombinerPattern::MC_MULSUBXI_OP1: {
2826 // ==> ORR V, ZR, -Imm
2827 // ==> MADD R,A,B,V // = -Imm + A*B
2828 // --- Create(MADD);
2829 const TargetRegisterClass *OrrRC;
2830 unsigned BitSize, OrrOpc, ZeroReg;
2831 if (Pattern == MachineCombinerPattern::MC_MULSUBWI_OP1) {
2832 OrrOpc = AArch64::ORRWri;
2833 OrrRC = &AArch64::GPR32spRegClass;
2835 ZeroReg = AArch64::WZR;
2836 Opc = AArch64::MADDWrrr;
2837 RC = &AArch64::GPR32RegClass;
2839 OrrOpc = AArch64::ORRXri;
2840 OrrRC = &AArch64::GPR64spRegClass;
2842 ZeroReg = AArch64::XZR;
2843 Opc = AArch64::MADDXrrr;
2844 RC = &AArch64::GPR64RegClass;
2846 unsigned NewVR = MRI.createVirtualRegister(OrrRC);
2847 int Imm = Root.getOperand(2).getImm();
2848 if (Root.getOperand(3).isImm()) {
2849 unsigned Val = Root.getOperand(3).getImm();
2852 uint64_t UImm = -Imm << (64 - BitSize) >> (64 - BitSize);
2854 if (AArch64_AM::processLogicalImmediate(UImm, BitSize, Encoding)) {
2855 MachineInstrBuilder MIB1 =
2856 BuildMI(MF, Root.getDebugLoc(), TII->get(OrrOpc), NewVR)
2859 InsInstrs.push_back(MIB1);
2860 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
2861 MUL = genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
2865 } // end switch (Pattern)
2866 // Record MUL and ADD/SUB for deletion
2867 DelInstrs.push_back(MUL);
2868 DelInstrs.push_back(&Root);
2873 /// \brief Replace csincr-branch sequence by simple conditional branch
2877 /// csinc w9, wzr, wzr, <condition code>
2878 /// tbnz w9, #0, 0x44
2880 /// b.<inverted condition code>
2883 /// csinc w9, wzr, wzr, <condition code>
2884 /// tbz w9, #0, 0x44
2886 /// b.<condition code>
2888 /// \param MI Conditional Branch
2889 /// \return True when the simple conditional branch is generated
2891 bool AArch64InstrInfo::optimizeCondBranch(MachineInstr *MI) const {
2892 bool IsNegativeBranch = false;
2893 bool IsTestAndBranch = false;
2894 unsigned TargetBBInMI = 0;
2895 switch (MI->getOpcode()) {
2897 llvm_unreachable("Unknown branch instruction?");
2904 case AArch64::CBNZW:
2905 case AArch64::CBNZX:
2907 IsNegativeBranch = true;
2912 IsTestAndBranch = true;
2914 case AArch64::TBNZW:
2915 case AArch64::TBNZX:
2917 IsNegativeBranch = true;
2918 IsTestAndBranch = true;
2921 // So we increment a zero register and test for bits other
2922 // than bit 0? Conservatively bail out in case the verifier
2923 // missed this case.
2924 if (IsTestAndBranch && MI->getOperand(1).getImm())
2928 assert(MI->getParent() && "Incomplete machine instruciton\n");
2929 MachineBasicBlock *MBB = MI->getParent();
2930 MachineFunction *MF = MBB->getParent();
2931 MachineRegisterInfo *MRI = &MF->getRegInfo();
2932 unsigned VReg = MI->getOperand(0).getReg();
2933 if (!TargetRegisterInfo::isVirtualRegister(VReg))
2936 MachineInstr *DefMI = MRI->getVRegDef(VReg);
2939 if (!(DefMI->getOpcode() == AArch64::CSINCWr &&
2940 DefMI->getOperand(1).getReg() == AArch64::WZR &&
2941 DefMI->getOperand(2).getReg() == AArch64::WZR) &&
2942 !(DefMI->getOpcode() == AArch64::CSINCXr &&
2943 DefMI->getOperand(1).getReg() == AArch64::XZR &&
2944 DefMI->getOperand(2).getReg() == AArch64::XZR))
2947 if (DefMI->findRegisterDefOperandIdx(AArch64::NZCV, true) != -1)
2950 AArch64CC::CondCode CC =
2951 (AArch64CC::CondCode)DefMI->getOperand(3).getImm();
2952 bool CheckOnlyCCWrites = true;
2953 // Convert only when the condition code is not modified between
2954 // the CSINC and the branch. The CC may be used by other
2955 // instructions in between.
2956 if (modifiesConditionCode(DefMI, MI, CheckOnlyCCWrites, &getRegisterInfo()))
2958 MachineBasicBlock &RefToMBB = *MBB;
2959 MachineBasicBlock *TBB = MI->getOperand(TargetBBInMI).getMBB();
2960 DebugLoc DL = MI->getDebugLoc();
2961 if (IsNegativeBranch)
2962 CC = AArch64CC::getInvertedCondCode(CC);
2963 BuildMI(RefToMBB, MI, DL, get(AArch64::Bcc)).addImm(CC).addMBB(TBB);
2964 MI->eraseFromParent();