1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
48 // Map of register aliases registers via the .req directive.
49 StringMap<std::pair<bool, unsigned> > RegisterReqs;
51 AArch64TargetStreamer &getTargetStreamer() {
52 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
53 return static_cast<AArch64TargetStreamer &>(TS);
56 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
58 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
59 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
60 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
61 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
62 int tryParseRegister();
63 int tryMatchVectorRegister(StringRef &Kind, bool expected);
64 bool parseRegister(OperandVector &Operands);
65 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
66 bool parseVectorList(OperandVector &Operands);
67 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
71 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
72 bool showMatchError(SMLoc Loc, unsigned ErrCode);
74 bool parseDirectiveWord(unsigned Size, SMLoc L);
75 bool parseDirectiveInst(SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80 bool parseDirectiveLtorg(SMLoc L);
82 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
85 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
89 bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
113 enum AArch64MatchResultTy {
114 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
115 #define GET_OPERAND_DIAGNOSTIC_TYPES
116 #include "AArch64GenAsmMatcher.inc"
118 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
119 const MCInstrInfo &MII, const MCTargetOptions &Options)
120 : MCTargetAsmParser(Options), STI(STI) {
121 MCAsmParserExtension::Initialize(Parser);
122 MCStreamer &S = getParser().getStreamer();
123 if (S.getTargetStreamer() == nullptr)
124 new AArch64TargetStreamer(S);
126 // Initialize the set of available features.
127 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
130 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
131 SMLoc NameLoc, OperandVector &Operands) override;
132 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
133 bool ParseDirective(AsmToken DirectiveID) override;
134 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
135 unsigned Kind) override;
137 static bool classifySymbolRef(const MCExpr *Expr,
138 AArch64MCExpr::VariantKind &ELFRefKind,
139 MCSymbolRefExpr::VariantKind &DarwinRefKind,
142 } // end anonymous namespace
146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
148 class AArch64Operand : public MCParsedAsmOperand {
166 SMLoc StartLoc, EndLoc;
171 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
179 struct VectorListOp {
182 unsigned NumElements;
183 unsigned ElementKind;
186 struct VectorIndexOp {
194 struct ShiftedImmOp {
196 unsigned ShiftAmount;
200 AArch64CC::CondCode Code;
204 unsigned Val; // Encoded 8-bit representation.
208 unsigned Val; // Not the enum since not all values have names.
218 uint32_t PStateField;
231 struct ShiftExtendOp {
232 AArch64_AM::ShiftExtendType Type;
234 bool HasExplicitAmount;
244 struct VectorListOp VectorList;
245 struct VectorIndexOp VectorIndex;
247 struct ShiftedImmOp ShiftedImm;
248 struct CondCodeOp CondCode;
249 struct FPImmOp FPImm;
250 struct BarrierOp Barrier;
251 struct SysRegOp SysReg;
252 struct SysCRImmOp SysCRImm;
253 struct PrefetchOp Prefetch;
254 struct ShiftExtendOp ShiftExtend;
257 // Keep the MCContext around as the MCExprs may need manipulated during
258 // the add<>Operands() calls.
262 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
264 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
266 StartLoc = o.StartLoc;
276 ShiftedImm = o.ShiftedImm;
279 CondCode = o.CondCode;
291 VectorList = o.VectorList;
294 VectorIndex = o.VectorIndex;
300 SysCRImm = o.SysCRImm;
303 Prefetch = o.Prefetch;
306 ShiftExtend = o.ShiftExtend;
311 /// getStartLoc - Get the location of the first token of this operand.
312 SMLoc getStartLoc() const override { return StartLoc; }
313 /// getEndLoc - Get the location of the last token of this operand.
314 SMLoc getEndLoc() const override { return EndLoc; }
316 StringRef getToken() const {
317 assert(Kind == k_Token && "Invalid access!");
318 return StringRef(Tok.Data, Tok.Length);
321 bool isTokenSuffix() const {
322 assert(Kind == k_Token && "Invalid access!");
326 const MCExpr *getImm() const {
327 assert(Kind == k_Immediate && "Invalid access!");
331 const MCExpr *getShiftedImmVal() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.Val;
336 unsigned getShiftedImmShift() const {
337 assert(Kind == k_ShiftedImm && "Invalid access!");
338 return ShiftedImm.ShiftAmount;
341 AArch64CC::CondCode getCondCode() const {
342 assert(Kind == k_CondCode && "Invalid access!");
343 return CondCode.Code;
346 unsigned getFPImm() const {
347 assert(Kind == k_FPImm && "Invalid access!");
351 unsigned getBarrier() const {
352 assert(Kind == k_Barrier && "Invalid access!");
356 StringRef getBarrierName() const {
357 assert(Kind == k_Barrier && "Invalid access!");
358 return StringRef(Barrier.Data, Barrier.Length);
361 unsigned getReg() const override {
362 assert(Kind == k_Register && "Invalid access!");
366 unsigned getVectorListStart() const {
367 assert(Kind == k_VectorList && "Invalid access!");
368 return VectorList.RegNum;
371 unsigned getVectorListCount() const {
372 assert(Kind == k_VectorList && "Invalid access!");
373 return VectorList.Count;
376 unsigned getVectorIndex() const {
377 assert(Kind == k_VectorIndex && "Invalid access!");
378 return VectorIndex.Val;
381 StringRef getSysReg() const {
382 assert(Kind == k_SysReg && "Invalid access!");
383 return StringRef(SysReg.Data, SysReg.Length);
386 unsigned getSysCR() const {
387 assert(Kind == k_SysCR && "Invalid access!");
391 unsigned getPrefetch() const {
392 assert(Kind == k_Prefetch && "Invalid access!");
396 StringRef getPrefetchName() const {
397 assert(Kind == k_Prefetch && "Invalid access!");
398 return StringRef(Prefetch.Data, Prefetch.Length);
401 AArch64_AM::ShiftExtendType getShiftExtendType() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.Type;
406 unsigned getShiftExtendAmount() const {
407 assert(Kind == k_ShiftExtend && "Invalid access!");
408 return ShiftExtend.Amount;
411 bool hasShiftExtendAmount() const {
412 assert(Kind == k_ShiftExtend && "Invalid access!");
413 return ShiftExtend.HasExplicitAmount;
416 bool isImm() const override { return Kind == k_Immediate; }
417 bool isMem() const override { return false; }
418 bool isSImm9() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val < 256);
427 bool isSImm7s4() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
436 bool isSImm7s8() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
445 bool isSImm7s16() const {
448 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
451 int64_t Val = MCE->getValue();
452 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
455 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
456 AArch64MCExpr::VariantKind ELFRefKind;
457 MCSymbolRefExpr::VariantKind DarwinRefKind;
459 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
461 // If we don't understand the expression, assume the best and
462 // let the fixup and relocation code deal with it.
466 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
467 ELFRefKind == AArch64MCExpr::VK_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
472 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
474 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
475 // Note that we don't range-check the addend. It's adjusted modulo page
476 // size when converted, so there is no "out of range" condition when using
478 return Addend >= 0 && (Addend % Scale) == 0;
479 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
480 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
481 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
488 template <int Scale> bool isUImm12Offset() const {
492 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
494 return isSymbolicUImm12Offset(getImm(), Scale);
496 int64_t Val = MCE->getValue();
497 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
500 bool isImm0_7() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val >= 0 && Val < 8);
509 bool isImm1_8() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val > 0 && Val < 9);
518 bool isImm0_15() const {
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
524 int64_t Val = MCE->getValue();
525 return (Val >= 0 && Val < 16);
527 bool isImm1_16() const {
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533 int64_t Val = MCE->getValue();
534 return (Val > 0 && Val < 17);
536 bool isImm0_31() const {
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 int64_t Val = MCE->getValue();
543 return (Val >= 0 && Val < 32);
545 bool isImm1_31() const {
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 int64_t Val = MCE->getValue();
552 return (Val >= 1 && Val < 32);
554 bool isImm1_32() const {
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
560 int64_t Val = MCE->getValue();
561 return (Val >= 1 && Val < 33);
563 bool isImm0_63() const {
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
569 int64_t Val = MCE->getValue();
570 return (Val >= 0 && Val < 64);
572 bool isImm1_63() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
579 return (Val >= 1 && Val < 64);
581 bool isImm1_64() const {
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
587 int64_t Val = MCE->getValue();
588 return (Val >= 1 && Val < 65);
590 bool isImm0_127() const {
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
596 int64_t Val = MCE->getValue();
597 return (Val >= 0 && Val < 128);
599 bool isImm0_255() const {
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 256);
608 bool isImm0_65535() const {
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
614 int64_t Val = MCE->getValue();
615 return (Val >= 0 && Val < 65536);
617 bool isImm32_63() const {
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
623 int64_t Val = MCE->getValue();
624 return (Val >= 32 && Val < 64);
626 bool isLogicalImm32() const {
629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 int64_t Val = MCE->getValue();
633 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
636 return AArch64_AM::isLogicalImmediate(Val, 32);
638 bool isLogicalImm64() const {
641 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
644 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
646 bool isLogicalImm32Not() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
653 return AArch64_AM::isLogicalImmediate(Val, 32);
655 bool isLogicalImm64Not() const {
658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
661 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
663 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
664 bool isAddSubImm() const {
665 if (!isShiftedImm() && !isImm())
670 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
671 if (isShiftedImm()) {
672 unsigned Shift = ShiftedImm.ShiftAmount;
673 Expr = ShiftedImm.Val;
674 if (Shift != 0 && Shift != 12)
680 AArch64MCExpr::VariantKind ELFRefKind;
681 MCSymbolRefExpr::VariantKind DarwinRefKind;
683 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
684 DarwinRefKind, Addend)) {
685 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
686 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
687 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
688 || ELFRefKind == AArch64MCExpr::VK_LO12
689 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
690 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
691 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
692 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
693 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
694 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
695 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
698 // Otherwise it should be a real immediate in range:
699 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
700 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
702 bool isAddSubImmNeg() const {
703 if (!isShiftedImm() && !isImm())
708 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
709 if (isShiftedImm()) {
710 unsigned Shift = ShiftedImm.ShiftAmount;
711 Expr = ShiftedImm.Val;
712 if (Shift != 0 && Shift != 12)
717 // Otherwise it should be a real negative immediate in range:
718 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
719 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
721 bool isCondCode() const { return Kind == k_CondCode; }
722 bool isSIMDImmType10() const {
725 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
728 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
730 bool isBranchTarget26() const {
733 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
736 int64_t Val = MCE->getValue();
739 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
741 bool isPCRelLabel19() const {
744 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
747 int64_t Val = MCE->getValue();
750 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
752 bool isBranchTarget14() const {
755 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
758 int64_t Val = MCE->getValue();
761 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
765 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
769 AArch64MCExpr::VariantKind ELFRefKind;
770 MCSymbolRefExpr::VariantKind DarwinRefKind;
772 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
773 DarwinRefKind, Addend)) {
776 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
779 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
780 if (ELFRefKind == AllowedModifiers[i])
787 bool isMovZSymbolG3() const {
788 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
791 bool isMovZSymbolG2() const {
792 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
793 AArch64MCExpr::VK_TPREL_G2,
794 AArch64MCExpr::VK_DTPREL_G2});
797 bool isMovZSymbolG1() const {
798 return isMovWSymbol({
799 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
800 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
801 AArch64MCExpr::VK_DTPREL_G1,
805 bool isMovZSymbolG0() const {
806 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
807 AArch64MCExpr::VK_TPREL_G0,
808 AArch64MCExpr::VK_DTPREL_G0});
811 bool isMovKSymbolG3() const {
812 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
815 bool isMovKSymbolG2() const {
816 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
819 bool isMovKSymbolG1() const {
820 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
821 AArch64MCExpr::VK_TPREL_G1_NC,
822 AArch64MCExpr::VK_DTPREL_G1_NC});
825 bool isMovKSymbolG0() const {
827 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
828 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
831 template<int RegWidth, int Shift>
832 bool isMOVZMovAlias() const {
833 if (!isImm()) return false;
835 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836 if (!CE) return false;
837 uint64_t Value = CE->getValue();
840 Value &= 0xffffffffULL;
842 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
843 if (Value == 0 && Shift != 0)
846 return (Value & ~(0xffffULL << Shift)) == 0;
849 template<int RegWidth, int Shift>
850 bool isMOVNMovAlias() const {
851 if (!isImm()) return false;
853 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
854 if (!CE) return false;
855 uint64_t Value = CE->getValue();
857 // MOVZ takes precedence over MOVN.
858 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
859 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
864 Value &= 0xffffffffULL;
866 return (Value & ~(0xffffULL << Shift)) == 0;
869 bool isFPImm() const { return Kind == k_FPImm; }
870 bool isBarrier() const { return Kind == k_Barrier; }
871 bool isSysReg() const { return Kind == k_SysReg; }
872 bool isMRSSystemRegister() const {
873 if (!isSysReg()) return false;
875 return SysReg.MRSReg != -1U;
877 bool isMSRSystemRegister() const {
878 if (!isSysReg()) return false;
880 return SysReg.MSRReg != -1U;
882 bool isSystemPStateField() const {
883 if (!isSysReg()) return false;
885 return SysReg.PStateField != -1U;
887 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
888 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
889 bool isVectorRegLo() const {
890 return Kind == k_Register && Reg.isVector &&
891 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
894 bool isGPR32as64() const {
895 return Kind == k_Register && !Reg.isVector &&
896 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
898 bool isWSeqPair() const {
899 return Kind == k_Register && !Reg.isVector &&
900 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
903 bool isXSeqPair() const {
904 return Kind == k_Register && !Reg.isVector &&
905 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
909 bool isGPR64sp0() const {
910 return Kind == k_Register && !Reg.isVector &&
911 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
914 /// Is this a vector list with the type implicit (presumably attached to the
915 /// instruction itself)?
916 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
917 return Kind == k_VectorList && VectorList.Count == NumRegs &&
918 !VectorList.ElementKind;
921 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
922 bool isTypedVectorList() const {
923 if (Kind != k_VectorList)
925 if (VectorList.Count != NumRegs)
927 if (VectorList.ElementKind != ElementKind)
929 return VectorList.NumElements == NumElements;
932 bool isVectorIndex1() const {
933 return Kind == k_VectorIndex && VectorIndex.Val == 1;
935 bool isVectorIndexB() const {
936 return Kind == k_VectorIndex && VectorIndex.Val < 16;
938 bool isVectorIndexH() const {
939 return Kind == k_VectorIndex && VectorIndex.Val < 8;
941 bool isVectorIndexS() const {
942 return Kind == k_VectorIndex && VectorIndex.Val < 4;
944 bool isVectorIndexD() const {
945 return Kind == k_VectorIndex && VectorIndex.Val < 2;
947 bool isToken() const override { return Kind == k_Token; }
948 bool isTokenEqual(StringRef Str) const {
949 return Kind == k_Token && getToken() == Str;
951 bool isSysCR() const { return Kind == k_SysCR; }
952 bool isPrefetch() const { return Kind == k_Prefetch; }
953 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
954 bool isShifter() const {
955 if (!isShiftExtend())
958 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
959 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
960 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
961 ST == AArch64_AM::MSL);
963 bool isExtend() const {
964 if (!isShiftExtend())
967 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
968 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
969 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
970 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
971 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
972 ET == AArch64_AM::LSL) &&
973 getShiftExtendAmount() <= 4;
976 bool isExtend64() const {
979 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
980 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
981 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
983 bool isExtendLSL64() const {
986 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
987 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
988 ET == AArch64_AM::LSL) &&
989 getShiftExtendAmount() <= 4;
992 template<int Width> bool isMemXExtend() const {
995 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
996 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
997 (getShiftExtendAmount() == Log2_32(Width / 8) ||
998 getShiftExtendAmount() == 0);
1001 template<int Width> bool isMemWExtend() const {
1004 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1005 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1006 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1007 getShiftExtendAmount() == 0);
1010 template <unsigned width>
1011 bool isArithmeticShifter() const {
1015 // An arithmetic shifter is LSL, LSR, or ASR.
1016 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1017 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1018 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1021 template <unsigned width>
1022 bool isLogicalShifter() const {
1026 // A logical shifter is LSL, LSR, ASR or ROR.
1027 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1028 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1029 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1030 getShiftExtendAmount() < width;
1033 bool isMovImm32Shifter() const {
1037 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1038 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1039 if (ST != AArch64_AM::LSL)
1041 uint64_t Val = getShiftExtendAmount();
1042 return (Val == 0 || Val == 16);
1045 bool isMovImm64Shifter() const {
1049 // A MOVi shifter is LSL of 0 or 16.
1050 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1051 if (ST != AArch64_AM::LSL)
1053 uint64_t Val = getShiftExtendAmount();
1054 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1057 bool isLogicalVecShifter() const {
1061 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1062 unsigned Shift = getShiftExtendAmount();
1063 return getShiftExtendType() == AArch64_AM::LSL &&
1064 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1067 bool isLogicalVecHalfWordShifter() const {
1068 if (!isLogicalVecShifter())
1071 // A logical vector shifter is a left shift by 0 or 8.
1072 unsigned Shift = getShiftExtendAmount();
1073 return getShiftExtendType() == AArch64_AM::LSL &&
1074 (Shift == 0 || Shift == 8);
1077 bool isMoveVecShifter() const {
1078 if (!isShiftExtend())
1081 // A logical vector shifter is a left shift by 8 or 16.
1082 unsigned Shift = getShiftExtendAmount();
1083 return getShiftExtendType() == AArch64_AM::MSL &&
1084 (Shift == 8 || Shift == 16);
1087 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1088 // to LDUR/STUR when the offset is not legal for the former but is for
1089 // the latter. As such, in addition to checking for being a legal unscaled
1090 // address, also check that it is not a legal scaled address. This avoids
1091 // ambiguity in the matcher.
1093 bool isSImm9OffsetFB() const {
1094 return isSImm9() && !isUImm12Offset<Width / 8>();
1097 bool isAdrpLabel() const {
1098 // Validation was handled during parsing, so we just sanity check that
1099 // something didn't go haywire.
1103 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1104 int64_t Val = CE->getValue();
1105 int64_t Min = - (4096 * (1LL << (21 - 1)));
1106 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1107 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1113 bool isAdrLabel() const {
1114 // Validation was handled during parsing, so we just sanity check that
1115 // something didn't go haywire.
1119 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1120 int64_t Val = CE->getValue();
1121 int64_t Min = - (1LL << (21 - 1));
1122 int64_t Max = ((1LL << (21 - 1)) - 1);
1123 return Val >= Min && Val <= Max;
1129 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1130 // Add as immediates when possible. Null MCExpr = 0.
1132 Inst.addOperand(MCOperand::createImm(0));
1133 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1134 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1136 Inst.addOperand(MCOperand::createExpr(Expr));
1139 void addRegOperands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::createReg(getReg()));
1144 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1147 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1149 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1150 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1151 RI->getEncodingValue(getReg()));
1153 Inst.addOperand(MCOperand::createReg(Reg));
1156 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1159 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1160 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1163 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1164 assert(N == 1 && "Invalid number of operands!");
1166 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1167 Inst.addOperand(MCOperand::createReg(getReg()));
1170 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1171 assert(N == 1 && "Invalid number of operands!");
1172 Inst.addOperand(MCOperand::createReg(getReg()));
1175 template <unsigned NumRegs>
1176 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1177 assert(N == 1 && "Invalid number of operands!");
1178 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1179 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1180 unsigned FirstReg = FirstRegs[NumRegs - 1];
1183 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1186 template <unsigned NumRegs>
1187 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1190 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1191 unsigned FirstReg = FirstRegs[NumRegs - 1];
1194 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1197 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1198 assert(N == 1 && "Invalid number of operands!");
1199 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1202 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1203 assert(N == 1 && "Invalid number of operands!");
1204 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1207 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1208 assert(N == 1 && "Invalid number of operands!");
1209 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1212 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1213 assert(N == 1 && "Invalid number of operands!");
1214 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1217 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!");
1219 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1222 void addImmOperands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 // If this is a pageoff symrefexpr with an addend, adjust the addend
1225 // to be only the page-offset portion. Otherwise, just add the expr
1227 addExpr(Inst, getImm());
1230 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1231 assert(N == 2 && "Invalid number of operands!");
1232 if (isShiftedImm()) {
1233 addExpr(Inst, getShiftedImmVal());
1234 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1236 addExpr(Inst, getImm());
1237 Inst.addOperand(MCOperand::createImm(0));
1241 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1242 assert(N == 2 && "Invalid number of operands!");
1244 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1245 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1246 int64_t Val = -CE->getValue();
1247 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1249 Inst.addOperand(MCOperand::createImm(Val));
1250 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1253 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1254 assert(N == 1 && "Invalid number of operands!");
1255 Inst.addOperand(MCOperand::createImm(getCondCode()));
1258 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1262 addExpr(Inst, getImm());
1264 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1267 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1268 addImmOperands(Inst, N);
1272 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1277 Inst.addOperand(MCOperand::createExpr(getImm()));
1280 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1283 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1286 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1289 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1290 assert(N == 1 && "Invalid number of operands!");
1291 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1292 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1295 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1296 assert(N == 1 && "Invalid number of operands!");
1297 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1298 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1301 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1302 assert(N == 1 && "Invalid number of operands!");
1303 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1304 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1307 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1310 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1313 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1316 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1319 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1320 assert(N == 1 && "Invalid number of operands!");
1321 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1322 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1325 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1328 assert(MCE && "Invalid constant immediate operand!");
1329 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1332 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1333 assert(N == 1 && "Invalid number of operands!");
1334 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1335 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1338 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1341 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1344 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1345 assert(N == 1 && "Invalid number of operands!");
1346 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1347 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1350 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1351 assert(N == 1 && "Invalid number of operands!");
1352 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1353 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1356 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1359 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1362 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1363 assert(N == 1 && "Invalid number of operands!");
1364 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1365 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1368 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1369 assert(N == 1 && "Invalid number of operands!");
1370 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1371 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1374 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1375 assert(N == 1 && "Invalid number of operands!");
1376 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1377 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1380 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1383 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1386 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1387 assert(N == 1 && "Invalid number of operands!");
1388 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1389 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1392 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1396 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1397 Inst.addOperand(MCOperand::createImm(encoding));
1400 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1401 assert(N == 1 && "Invalid number of operands!");
1402 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1403 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1404 Inst.addOperand(MCOperand::createImm(encoding));
1407 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1408 assert(N == 1 && "Invalid number of operands!");
1409 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1410 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1411 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1412 Inst.addOperand(MCOperand::createImm(encoding));
1415 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1417 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1419 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1420 Inst.addOperand(MCOperand::createImm(encoding));
1423 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1424 assert(N == 1 && "Invalid number of operands!");
1425 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1426 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1427 Inst.addOperand(MCOperand::createImm(encoding));
1430 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1431 // Branch operands don't encode the low bits, so shift them off
1432 // here. If it's a label, however, just put it on directly as there's
1433 // not enough information now to do anything.
1434 assert(N == 1 && "Invalid number of operands!");
1435 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1437 addExpr(Inst, getImm());
1440 assert(MCE && "Invalid constant immediate operand!");
1441 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1444 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1445 // Branch operands don't encode the low bits, so shift them off
1446 // here. If it's a label, however, just put it on directly as there's
1447 // not enough information now to do anything.
1448 assert(N == 1 && "Invalid number of operands!");
1449 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1451 addExpr(Inst, getImm());
1454 assert(MCE && "Invalid constant immediate operand!");
1455 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1458 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1459 // Branch operands don't encode the low bits, so shift them off
1460 // here. If it's a label, however, just put it on directly as there's
1461 // not enough information now to do anything.
1462 assert(N == 1 && "Invalid number of operands!");
1463 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1465 addExpr(Inst, getImm());
1468 assert(MCE && "Invalid constant immediate operand!");
1469 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1472 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1473 assert(N == 1 && "Invalid number of operands!");
1474 Inst.addOperand(MCOperand::createImm(getFPImm()));
1477 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1478 assert(N == 1 && "Invalid number of operands!");
1479 Inst.addOperand(MCOperand::createImm(getBarrier()));
1482 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1483 assert(N == 1 && "Invalid number of operands!");
1485 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1488 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1489 assert(N == 1 && "Invalid number of operands!");
1491 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1494 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1497 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1500 void addSysCROperands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && "Invalid number of operands!");
1502 Inst.addOperand(MCOperand::createImm(getSysCR()));
1505 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1506 assert(N == 1 && "Invalid number of operands!");
1507 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1510 void addShifterOperands(MCInst &Inst, unsigned N) const {
1511 assert(N == 1 && "Invalid number of operands!");
1513 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1514 Inst.addOperand(MCOperand::createImm(Imm));
1517 void addExtendOperands(MCInst &Inst, unsigned N) const {
1518 assert(N == 1 && "Invalid number of operands!");
1519 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1520 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1521 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1522 Inst.addOperand(MCOperand::createImm(Imm));
1525 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1526 assert(N == 1 && "Invalid number of operands!");
1527 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1528 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1529 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1530 Inst.addOperand(MCOperand::createImm(Imm));
1533 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1534 assert(N == 2 && "Invalid number of operands!");
1535 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1536 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1537 Inst.addOperand(MCOperand::createImm(IsSigned));
1538 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1541 // For 8-bit load/store instructions with a register offset, both the
1542 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1543 // they're disambiguated by whether the shift was explicit or implicit rather
1545 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1546 assert(N == 2 && "Invalid number of operands!");
1547 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1548 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1549 Inst.addOperand(MCOperand::createImm(IsSigned));
1550 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1554 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1555 assert(N == 1 && "Invalid number of operands!");
1557 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1558 uint64_t Value = CE->getValue();
1559 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1563 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1564 assert(N == 1 && "Invalid number of operands!");
1566 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1567 uint64_t Value = CE->getValue();
1568 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1571 void print(raw_ostream &OS) const override;
1573 static std::unique_ptr<AArch64Operand>
1574 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1575 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1576 Op->Tok.Data = Str.data();
1577 Op->Tok.Length = Str.size();
1578 Op->Tok.IsSuffix = IsSuffix;
1584 static std::unique_ptr<AArch64Operand>
1585 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1586 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1587 Op->Reg.RegNum = RegNum;
1588 Op->Reg.isVector = isVector;
1594 static std::unique_ptr<AArch64Operand>
1595 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1596 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1597 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1598 Op->VectorList.RegNum = RegNum;
1599 Op->VectorList.Count = Count;
1600 Op->VectorList.NumElements = NumElements;
1601 Op->VectorList.ElementKind = ElementKind;
1607 static std::unique_ptr<AArch64Operand>
1608 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1609 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1610 Op->VectorIndex.Val = Idx;
1616 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1617 SMLoc E, MCContext &Ctx) {
1618 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1625 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1626 unsigned ShiftAmount,
1629 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1630 Op->ShiftedImm .Val = Val;
1631 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1637 static std::unique_ptr<AArch64Operand>
1638 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1639 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1640 Op->CondCode.Code = Code;
1646 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1648 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1649 Op->FPImm.Val = Val;
1655 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1659 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1660 Op->Barrier.Val = Val;
1661 Op->Barrier.Data = Str.data();
1662 Op->Barrier.Length = Str.size();
1668 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1671 uint32_t PStateField,
1673 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1674 Op->SysReg.Data = Str.data();
1675 Op->SysReg.Length = Str.size();
1676 Op->SysReg.MRSReg = MRSReg;
1677 Op->SysReg.MSRReg = MSRReg;
1678 Op->SysReg.PStateField = PStateField;
1684 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1685 SMLoc E, MCContext &Ctx) {
1686 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1687 Op->SysCRImm.Val = Val;
1693 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1697 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1698 Op->Prefetch.Val = Val;
1699 Op->Barrier.Data = Str.data();
1700 Op->Barrier.Length = Str.size();
1706 static std::unique_ptr<AArch64Operand>
1707 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1708 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1709 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1710 Op->ShiftExtend.Type = ShOp;
1711 Op->ShiftExtend.Amount = Val;
1712 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1719 } // end anonymous namespace.
1721 void AArch64Operand::print(raw_ostream &OS) const {
1724 OS << "<fpimm " << getFPImm() << "("
1725 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1728 StringRef Name = getBarrierName();
1730 OS << "<barrier " << Name << ">";
1732 OS << "<barrier invalid #" << getBarrier() << ">";
1738 case k_ShiftedImm: {
1739 unsigned Shift = getShiftedImmShift();
1740 OS << "<shiftedimm ";
1741 OS << *getShiftedImmVal();
1742 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1746 OS << "<condcode " << getCondCode() << ">";
1749 OS << "<register " << getReg() << ">";
1751 case k_VectorList: {
1752 OS << "<vectorlist ";
1753 unsigned Reg = getVectorListStart();
1754 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1755 OS << Reg + i << " ";
1760 OS << "<vectorindex " << getVectorIndex() << ">";
1763 OS << "<sysreg: " << getSysReg() << '>';
1766 OS << "'" << getToken() << "'";
1769 OS << "c" << getSysCR();
1772 StringRef Name = getPrefetchName();
1774 OS << "<prfop " << Name << ">";
1776 OS << "<prfop invalid #" << getPrefetch() << ">";
1779 case k_ShiftExtend: {
1780 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1781 << getShiftExtendAmount();
1782 if (!hasShiftExtendAmount())
1790 /// @name Auto-generated Match Functions
1793 static unsigned MatchRegisterName(StringRef Name);
1797 static unsigned matchVectorRegName(StringRef Name) {
1798 return StringSwitch<unsigned>(Name.lower())
1799 .Case("v0", AArch64::Q0)
1800 .Case("v1", AArch64::Q1)
1801 .Case("v2", AArch64::Q2)
1802 .Case("v3", AArch64::Q3)
1803 .Case("v4", AArch64::Q4)
1804 .Case("v5", AArch64::Q5)
1805 .Case("v6", AArch64::Q6)
1806 .Case("v7", AArch64::Q7)
1807 .Case("v8", AArch64::Q8)
1808 .Case("v9", AArch64::Q9)
1809 .Case("v10", AArch64::Q10)
1810 .Case("v11", AArch64::Q11)
1811 .Case("v12", AArch64::Q12)
1812 .Case("v13", AArch64::Q13)
1813 .Case("v14", AArch64::Q14)
1814 .Case("v15", AArch64::Q15)
1815 .Case("v16", AArch64::Q16)
1816 .Case("v17", AArch64::Q17)
1817 .Case("v18", AArch64::Q18)
1818 .Case("v19", AArch64::Q19)
1819 .Case("v20", AArch64::Q20)
1820 .Case("v21", AArch64::Q21)
1821 .Case("v22", AArch64::Q22)
1822 .Case("v23", AArch64::Q23)
1823 .Case("v24", AArch64::Q24)
1824 .Case("v25", AArch64::Q25)
1825 .Case("v26", AArch64::Q26)
1826 .Case("v27", AArch64::Q27)
1827 .Case("v28", AArch64::Q28)
1828 .Case("v29", AArch64::Q29)
1829 .Case("v30", AArch64::Q30)
1830 .Case("v31", AArch64::Q31)
1834 static bool isValidVectorKind(StringRef Name) {
1835 return StringSwitch<bool>(Name.lower())
1845 // Accept the width neutral ones, too, for verbose syntax. If those
1846 // aren't used in the right places, the token operand won't match so
1847 // all will work out.
1855 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1856 char &ElementKind) {
1857 assert(isValidVectorKind(Name));
1859 ElementKind = Name.lower()[Name.size() - 1];
1862 if (Name.size() == 2)
1865 // Parse the lane count
1866 Name = Name.drop_front();
1867 while (isdigit(Name.front())) {
1868 NumElements = 10 * NumElements + (Name.front() - '0');
1869 Name = Name.drop_front();
1873 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1875 StartLoc = getLoc();
1876 RegNo = tryParseRegister();
1877 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1878 return (RegNo == (unsigned)-1);
1881 // Matches a register name or register alias previously defined by '.req'
1882 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1884 unsigned RegNum = isVector ? matchVectorRegName(Name)
1885 : MatchRegisterName(Name);
1888 // Check for aliases registered via .req. Canonicalize to lower case.
1889 // That's more consistent since register names are case insensitive, and
1890 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1891 auto Entry = RegisterReqs.find(Name.lower());
1892 if (Entry == RegisterReqs.end())
1894 // set RegNum if the match is the right kind of register
1895 if (isVector == Entry->getValue().first)
1896 RegNum = Entry->getValue().second;
1901 /// tryParseRegister - Try to parse a register name. The token must be an
1902 /// Identifier when called, and if it is a register name the token is eaten and
1903 /// the register is added to the operand list.
1904 int AArch64AsmParser::tryParseRegister() {
1905 MCAsmParser &Parser = getParser();
1906 const AsmToken &Tok = Parser.getTok();
1907 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1909 std::string lowerCase = Tok.getString().lower();
1910 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1911 // Also handle a few aliases of registers.
1913 RegNum = StringSwitch<unsigned>(lowerCase)
1914 .Case("fp", AArch64::FP)
1915 .Case("lr", AArch64::LR)
1916 .Case("x31", AArch64::XZR)
1917 .Case("w31", AArch64::WZR)
1923 Parser.Lex(); // Eat identifier token.
1927 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1928 /// kind specifier. If it is a register specifier, eat the token and return it.
1929 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1930 MCAsmParser &Parser = getParser();
1931 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1932 TokError("vector register expected");
1936 StringRef Name = Parser.getTok().getString();
1937 // If there is a kind specifier, it's separated from the register name by
1939 size_t Start = 0, Next = Name.find('.');
1940 StringRef Head = Name.slice(Start, Next);
1941 unsigned RegNum = matchRegisterNameAlias(Head, true);
1944 if (Next != StringRef::npos) {
1945 Kind = Name.slice(Next, StringRef::npos);
1946 if (!isValidVectorKind(Kind)) {
1947 TokError("invalid vector kind qualifier");
1951 Parser.Lex(); // Eat the register token.
1956 TokError("vector register expected");
1960 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1961 AArch64AsmParser::OperandMatchResultTy
1962 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1963 MCAsmParser &Parser = getParser();
1966 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1967 Error(S, "Expected cN operand where 0 <= N <= 15");
1968 return MatchOperand_ParseFail;
1971 StringRef Tok = Parser.getTok().getIdentifier();
1972 if (Tok[0] != 'c' && Tok[0] != 'C') {
1973 Error(S, "Expected cN operand where 0 <= N <= 15");
1974 return MatchOperand_ParseFail;
1978 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1979 if (BadNum || CRNum > 15) {
1980 Error(S, "Expected cN operand where 0 <= N <= 15");
1981 return MatchOperand_ParseFail;
1984 Parser.Lex(); // Eat identifier token.
1986 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1987 return MatchOperand_Success;
1990 /// tryParsePrefetch - Try to parse a prefetch operand.
1991 AArch64AsmParser::OperandMatchResultTy
1992 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1993 MCAsmParser &Parser = getParser();
1995 const AsmToken &Tok = Parser.getTok();
1996 // Either an identifier for named values or a 5-bit immediate.
1997 bool Hash = Tok.is(AsmToken::Hash);
1998 if (Hash || Tok.is(AsmToken::Integer)) {
2000 Parser.Lex(); // Eat hash token.
2001 const MCExpr *ImmVal;
2002 if (getParser().parseExpression(ImmVal))
2003 return MatchOperand_ParseFail;
2005 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2007 TokError("immediate value expected for prefetch operand");
2008 return MatchOperand_ParseFail;
2010 unsigned prfop = MCE->getValue();
2012 TokError("prefetch operand out of range, [0,31] expected");
2013 return MatchOperand_ParseFail;
2017 auto Mapper = AArch64PRFM::PRFMMapper();
2019 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2020 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2022 return MatchOperand_Success;
2025 if (Tok.isNot(AsmToken::Identifier)) {
2026 TokError("pre-fetch hint expected");
2027 return MatchOperand_ParseFail;
2031 auto Mapper = AArch64PRFM::PRFMMapper();
2033 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2035 TokError("pre-fetch hint expected");
2036 return MatchOperand_ParseFail;
2039 Parser.Lex(); // Eat identifier token.
2040 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2042 return MatchOperand_Success;
2045 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2047 AArch64AsmParser::OperandMatchResultTy
2048 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2049 MCAsmParser &Parser = getParser();
2053 if (Parser.getTok().is(AsmToken::Hash)) {
2054 Parser.Lex(); // Eat hash token.
2057 if (parseSymbolicImmVal(Expr))
2058 return MatchOperand_ParseFail;
2060 AArch64MCExpr::VariantKind ELFRefKind;
2061 MCSymbolRefExpr::VariantKind DarwinRefKind;
2063 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2064 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2065 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2066 // No modifier was specified at all; this is the syntax for an ELF basic
2067 // ADRP relocation (unfortunately).
2069 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2070 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2071 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2073 Error(S, "gotpage label reference not allowed an addend");
2074 return MatchOperand_ParseFail;
2075 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2076 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2077 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2078 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2079 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2080 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2081 // The operand must be an @page or @gotpage qualified symbolref.
2082 Error(S, "page or gotpage label reference expected");
2083 return MatchOperand_ParseFail;
2087 // We have either a label reference possibly with addend or an immediate. The
2088 // addend is a raw value here. The linker will adjust it to only reference the
2090 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2091 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2093 return MatchOperand_Success;
2096 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2098 AArch64AsmParser::OperandMatchResultTy
2099 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2100 MCAsmParser &Parser = getParser();
2104 if (Parser.getTok().is(AsmToken::Hash)) {
2105 Parser.Lex(); // Eat hash token.
2108 if (getParser().parseExpression(Expr))
2109 return MatchOperand_ParseFail;
2111 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2112 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2114 return MatchOperand_Success;
2117 /// tryParseFPImm - A floating point immediate expression operand.
2118 AArch64AsmParser::OperandMatchResultTy
2119 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2120 MCAsmParser &Parser = getParser();
2124 if (Parser.getTok().is(AsmToken::Hash)) {
2125 Parser.Lex(); // Eat '#'
2129 // Handle negation, as that still comes through as a separate token.
2130 bool isNegative = false;
2131 if (Parser.getTok().is(AsmToken::Minus)) {
2135 const AsmToken &Tok = Parser.getTok();
2136 if (Tok.is(AsmToken::Real)) {
2137 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2139 RealVal.changeSign();
2141 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2142 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2143 Parser.Lex(); // Eat the token.
2144 // Check for out of range values. As an exception, we let Zero through,
2145 // as we handle that special case in post-processing before matching in
2146 // order to use the zero register for it.
2147 if (Val == -1 && !RealVal.isPosZero()) {
2148 TokError("expected compatible register or floating-point constant");
2149 return MatchOperand_ParseFail;
2151 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2152 return MatchOperand_Success;
2154 if (Tok.is(AsmToken::Integer)) {
2156 if (!isNegative && Tok.getString().startswith("0x")) {
2157 Val = Tok.getIntVal();
2158 if (Val > 255 || Val < 0) {
2159 TokError("encoded floating point value out of range");
2160 return MatchOperand_ParseFail;
2163 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2164 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2165 // If we had a '-' in front, toggle the sign bit.
2166 IntVal ^= (uint64_t)isNegative << 63;
2167 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2169 Parser.Lex(); // Eat the token.
2170 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2171 return MatchOperand_Success;
2175 return MatchOperand_NoMatch;
2177 TokError("invalid floating point immediate");
2178 return MatchOperand_ParseFail;
2181 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2182 AArch64AsmParser::OperandMatchResultTy
2183 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2184 MCAsmParser &Parser = getParser();
2187 if (Parser.getTok().is(AsmToken::Hash))
2188 Parser.Lex(); // Eat '#'
2189 else if (Parser.getTok().isNot(AsmToken::Integer))
2190 // Operand should start from # or should be integer, emit error otherwise.
2191 return MatchOperand_NoMatch;
2194 if (parseSymbolicImmVal(Imm))
2195 return MatchOperand_ParseFail;
2196 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2197 uint64_t ShiftAmount = 0;
2198 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2200 int64_t Val = MCE->getValue();
2201 if (Val > 0xfff && (Val & 0xfff) == 0) {
2202 Imm = MCConstantExpr::create(Val >> 12, getContext());
2206 SMLoc E = Parser.getTok().getLoc();
2207 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2209 return MatchOperand_Success;
2215 // The optional operand must be "lsl #N" where N is non-negative.
2216 if (!Parser.getTok().is(AsmToken::Identifier) ||
2217 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2218 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2219 return MatchOperand_ParseFail;
2225 if (Parser.getTok().is(AsmToken::Hash)) {
2229 if (Parser.getTok().isNot(AsmToken::Integer)) {
2230 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2231 return MatchOperand_ParseFail;
2234 int64_t ShiftAmount = Parser.getTok().getIntVal();
2236 if (ShiftAmount < 0) {
2237 Error(Parser.getTok().getLoc(), "positive shift amount required");
2238 return MatchOperand_ParseFail;
2240 Parser.Lex(); // Eat the number
2242 SMLoc E = Parser.getTok().getLoc();
2243 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2244 S, E, getContext()));
2245 return MatchOperand_Success;
2248 /// parseCondCodeString - Parse a Condition Code string.
2249 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2250 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2251 .Case("eq", AArch64CC::EQ)
2252 .Case("ne", AArch64CC::NE)
2253 .Case("cs", AArch64CC::HS)
2254 .Case("hs", AArch64CC::HS)
2255 .Case("cc", AArch64CC::LO)
2256 .Case("lo", AArch64CC::LO)
2257 .Case("mi", AArch64CC::MI)
2258 .Case("pl", AArch64CC::PL)
2259 .Case("vs", AArch64CC::VS)
2260 .Case("vc", AArch64CC::VC)
2261 .Case("hi", AArch64CC::HI)
2262 .Case("ls", AArch64CC::LS)
2263 .Case("ge", AArch64CC::GE)
2264 .Case("lt", AArch64CC::LT)
2265 .Case("gt", AArch64CC::GT)
2266 .Case("le", AArch64CC::LE)
2267 .Case("al", AArch64CC::AL)
2268 .Case("nv", AArch64CC::NV)
2269 .Default(AArch64CC::Invalid);
2273 /// parseCondCode - Parse a Condition Code operand.
2274 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2275 bool invertCondCode) {
2276 MCAsmParser &Parser = getParser();
2278 const AsmToken &Tok = Parser.getTok();
2279 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2281 StringRef Cond = Tok.getString();
2282 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2283 if (CC == AArch64CC::Invalid)
2284 return TokError("invalid condition code");
2285 Parser.Lex(); // Eat identifier token.
2287 if (invertCondCode) {
2288 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2289 return TokError("condition codes AL and NV are invalid for this instruction");
2290 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2294 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2298 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2299 /// them if present.
2300 AArch64AsmParser::OperandMatchResultTy
2301 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2302 MCAsmParser &Parser = getParser();
2303 const AsmToken &Tok = Parser.getTok();
2304 std::string LowerID = Tok.getString().lower();
2305 AArch64_AM::ShiftExtendType ShOp =
2306 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2307 .Case("lsl", AArch64_AM::LSL)
2308 .Case("lsr", AArch64_AM::LSR)
2309 .Case("asr", AArch64_AM::ASR)
2310 .Case("ror", AArch64_AM::ROR)
2311 .Case("msl", AArch64_AM::MSL)
2312 .Case("uxtb", AArch64_AM::UXTB)
2313 .Case("uxth", AArch64_AM::UXTH)
2314 .Case("uxtw", AArch64_AM::UXTW)
2315 .Case("uxtx", AArch64_AM::UXTX)
2316 .Case("sxtb", AArch64_AM::SXTB)
2317 .Case("sxth", AArch64_AM::SXTH)
2318 .Case("sxtw", AArch64_AM::SXTW)
2319 .Case("sxtx", AArch64_AM::SXTX)
2320 .Default(AArch64_AM::InvalidShiftExtend);
2322 if (ShOp == AArch64_AM::InvalidShiftExtend)
2323 return MatchOperand_NoMatch;
2325 SMLoc S = Tok.getLoc();
2328 bool Hash = getLexer().is(AsmToken::Hash);
2329 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2330 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2331 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2332 ShOp == AArch64_AM::MSL) {
2333 // We expect a number here.
2334 TokError("expected #imm after shift specifier");
2335 return MatchOperand_ParseFail;
2338 // "extend" type operatoins don't need an immediate, #0 is implicit.
2339 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2341 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2342 return MatchOperand_Success;
2346 Parser.Lex(); // Eat the '#'.
2348 // Make sure we do actually have a number or a parenthesized expression.
2349 SMLoc E = Parser.getTok().getLoc();
2350 if (!Parser.getTok().is(AsmToken::Integer) &&
2351 !Parser.getTok().is(AsmToken::LParen)) {
2352 Error(E, "expected integer shift amount");
2353 return MatchOperand_ParseFail;
2356 const MCExpr *ImmVal;
2357 if (getParser().parseExpression(ImmVal))
2358 return MatchOperand_ParseFail;
2360 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2362 Error(E, "expected constant '#imm' after shift specifier");
2363 return MatchOperand_ParseFail;
2366 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2367 Operands.push_back(AArch64Operand::CreateShiftExtend(
2368 ShOp, MCE->getValue(), true, S, E, getContext()));
2369 return MatchOperand_Success;
2372 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2373 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2374 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2375 OperandVector &Operands) {
2376 if (Name.find('.') != StringRef::npos)
2377 return TokError("invalid operand");
2381 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2383 MCAsmParser &Parser = getParser();
2384 const AsmToken &Tok = Parser.getTok();
2385 StringRef Op = Tok.getString();
2386 SMLoc S = Tok.getLoc();
2388 const MCExpr *Expr = nullptr;
2390 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2392 Expr = MCConstantExpr::create(op1, getContext()); \
2393 Operands.push_back( \
2394 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2395 Operands.push_back( \
2396 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2397 Operands.push_back( \
2398 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2399 Expr = MCConstantExpr::create(op2, getContext()); \
2400 Operands.push_back( \
2401 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2404 if (Mnemonic == "ic") {
2405 if (!Op.compare_lower("ialluis")) {
2406 // SYS #0, C7, C1, #0
2407 SYS_ALIAS(0, 7, 1, 0);
2408 } else if (!Op.compare_lower("iallu")) {
2409 // SYS #0, C7, C5, #0
2410 SYS_ALIAS(0, 7, 5, 0);
2411 } else if (!Op.compare_lower("ivau")) {
2412 // SYS #3, C7, C5, #1
2413 SYS_ALIAS(3, 7, 5, 1);
2415 return TokError("invalid operand for IC instruction");
2417 } else if (Mnemonic == "dc") {
2418 if (!Op.compare_lower("zva")) {
2419 // SYS #3, C7, C4, #1
2420 SYS_ALIAS(3, 7, 4, 1);
2421 } else if (!Op.compare_lower("ivac")) {
2422 // SYS #3, C7, C6, #1
2423 SYS_ALIAS(0, 7, 6, 1);
2424 } else if (!Op.compare_lower("isw")) {
2425 // SYS #0, C7, C6, #2
2426 SYS_ALIAS(0, 7, 6, 2);
2427 } else if (!Op.compare_lower("cvac")) {
2428 // SYS #3, C7, C10, #1
2429 SYS_ALIAS(3, 7, 10, 1);
2430 } else if (!Op.compare_lower("csw")) {
2431 // SYS #0, C7, C10, #2
2432 SYS_ALIAS(0, 7, 10, 2);
2433 } else if (!Op.compare_lower("cvau")) {
2434 // SYS #3, C7, C11, #1
2435 SYS_ALIAS(3, 7, 11, 1);
2436 } else if (!Op.compare_lower("civac")) {
2437 // SYS #3, C7, C14, #1
2438 SYS_ALIAS(3, 7, 14, 1);
2439 } else if (!Op.compare_lower("cisw")) {
2440 // SYS #0, C7, C14, #2
2441 SYS_ALIAS(0, 7, 14, 2);
2443 return TokError("invalid operand for DC instruction");
2445 } else if (Mnemonic == "at") {
2446 if (!Op.compare_lower("s1e1r")) {
2447 // SYS #0, C7, C8, #0
2448 SYS_ALIAS(0, 7, 8, 0);
2449 } else if (!Op.compare_lower("s1e2r")) {
2450 // SYS #4, C7, C8, #0
2451 SYS_ALIAS(4, 7, 8, 0);
2452 } else if (!Op.compare_lower("s1e3r")) {
2453 // SYS #6, C7, C8, #0
2454 SYS_ALIAS(6, 7, 8, 0);
2455 } else if (!Op.compare_lower("s1e1w")) {
2456 // SYS #0, C7, C8, #1
2457 SYS_ALIAS(0, 7, 8, 1);
2458 } else if (!Op.compare_lower("s1e2w")) {
2459 // SYS #4, C7, C8, #1
2460 SYS_ALIAS(4, 7, 8, 1);
2461 } else if (!Op.compare_lower("s1e3w")) {
2462 // SYS #6, C7, C8, #1
2463 SYS_ALIAS(6, 7, 8, 1);
2464 } else if (!Op.compare_lower("s1e0r")) {
2465 // SYS #0, C7, C8, #3
2466 SYS_ALIAS(0, 7, 8, 2);
2467 } else if (!Op.compare_lower("s1e0w")) {
2468 // SYS #0, C7, C8, #3
2469 SYS_ALIAS(0, 7, 8, 3);
2470 } else if (!Op.compare_lower("s12e1r")) {
2471 // SYS #4, C7, C8, #4
2472 SYS_ALIAS(4, 7, 8, 4);
2473 } else if (!Op.compare_lower("s12e1w")) {
2474 // SYS #4, C7, C8, #5
2475 SYS_ALIAS(4, 7, 8, 5);
2476 } else if (!Op.compare_lower("s12e0r")) {
2477 // SYS #4, C7, C8, #6
2478 SYS_ALIAS(4, 7, 8, 6);
2479 } else if (!Op.compare_lower("s12e0w")) {
2480 // SYS #4, C7, C8, #7
2481 SYS_ALIAS(4, 7, 8, 7);
2483 return TokError("invalid operand for AT instruction");
2485 } else if (Mnemonic == "tlbi") {
2486 if (!Op.compare_lower("vmalle1is")) {
2487 // SYS #0, C8, C3, #0
2488 SYS_ALIAS(0, 8, 3, 0);
2489 } else if (!Op.compare_lower("alle2is")) {
2490 // SYS #4, C8, C3, #0
2491 SYS_ALIAS(4, 8, 3, 0);
2492 } else if (!Op.compare_lower("alle3is")) {
2493 // SYS #6, C8, C3, #0
2494 SYS_ALIAS(6, 8, 3, 0);
2495 } else if (!Op.compare_lower("vae1is")) {
2496 // SYS #0, C8, C3, #1
2497 SYS_ALIAS(0, 8, 3, 1);
2498 } else if (!Op.compare_lower("vae2is")) {
2499 // SYS #4, C8, C3, #1
2500 SYS_ALIAS(4, 8, 3, 1);
2501 } else if (!Op.compare_lower("vae3is")) {
2502 // SYS #6, C8, C3, #1
2503 SYS_ALIAS(6, 8, 3, 1);
2504 } else if (!Op.compare_lower("aside1is")) {
2505 // SYS #0, C8, C3, #2
2506 SYS_ALIAS(0, 8, 3, 2);
2507 } else if (!Op.compare_lower("vaae1is")) {
2508 // SYS #0, C8, C3, #3
2509 SYS_ALIAS(0, 8, 3, 3);
2510 } else if (!Op.compare_lower("alle1is")) {
2511 // SYS #4, C8, C3, #4
2512 SYS_ALIAS(4, 8, 3, 4);
2513 } else if (!Op.compare_lower("vale1is")) {
2514 // SYS #0, C8, C3, #5
2515 SYS_ALIAS(0, 8, 3, 5);
2516 } else if (!Op.compare_lower("vaale1is")) {
2517 // SYS #0, C8, C3, #7
2518 SYS_ALIAS(0, 8, 3, 7);
2519 } else if (!Op.compare_lower("vmalle1")) {
2520 // SYS #0, C8, C7, #0
2521 SYS_ALIAS(0, 8, 7, 0);
2522 } else if (!Op.compare_lower("alle2")) {
2523 // SYS #4, C8, C7, #0
2524 SYS_ALIAS(4, 8, 7, 0);
2525 } else if (!Op.compare_lower("vale2is")) {
2526 // SYS #4, C8, C3, #5
2527 SYS_ALIAS(4, 8, 3, 5);
2528 } else if (!Op.compare_lower("vale3is")) {
2529 // SYS #6, C8, C3, #5
2530 SYS_ALIAS(6, 8, 3, 5);
2531 } else if (!Op.compare_lower("alle3")) {
2532 // SYS #6, C8, C7, #0
2533 SYS_ALIAS(6, 8, 7, 0);
2534 } else if (!Op.compare_lower("vae1")) {
2535 // SYS #0, C8, C7, #1
2536 SYS_ALIAS(0, 8, 7, 1);
2537 } else if (!Op.compare_lower("vae2")) {
2538 // SYS #4, C8, C7, #1
2539 SYS_ALIAS(4, 8, 7, 1);
2540 } else if (!Op.compare_lower("vae3")) {
2541 // SYS #6, C8, C7, #1
2542 SYS_ALIAS(6, 8, 7, 1);
2543 } else if (!Op.compare_lower("aside1")) {
2544 // SYS #0, C8, C7, #2
2545 SYS_ALIAS(0, 8, 7, 2);
2546 } else if (!Op.compare_lower("vaae1")) {
2547 // SYS #0, C8, C7, #3
2548 SYS_ALIAS(0, 8, 7, 3);
2549 } else if (!Op.compare_lower("alle1")) {
2550 // SYS #4, C8, C7, #4
2551 SYS_ALIAS(4, 8, 7, 4);
2552 } else if (!Op.compare_lower("vale1")) {
2553 // SYS #0, C8, C7, #5
2554 SYS_ALIAS(0, 8, 7, 5);
2555 } else if (!Op.compare_lower("vale2")) {
2556 // SYS #4, C8, C7, #5
2557 SYS_ALIAS(4, 8, 7, 5);
2558 } else if (!Op.compare_lower("vale3")) {
2559 // SYS #6, C8, C7, #5
2560 SYS_ALIAS(6, 8, 7, 5);
2561 } else if (!Op.compare_lower("vaale1")) {
2562 // SYS #0, C8, C7, #7
2563 SYS_ALIAS(0, 8, 7, 7);
2564 } else if (!Op.compare_lower("ipas2e1")) {
2565 // SYS #4, C8, C4, #1
2566 SYS_ALIAS(4, 8, 4, 1);
2567 } else if (!Op.compare_lower("ipas2le1")) {
2568 // SYS #4, C8, C4, #5
2569 SYS_ALIAS(4, 8, 4, 5);
2570 } else if (!Op.compare_lower("ipas2e1is")) {
2571 // SYS #4, C8, C4, #1
2572 SYS_ALIAS(4, 8, 0, 1);
2573 } else if (!Op.compare_lower("ipas2le1is")) {
2574 // SYS #4, C8, C4, #5
2575 SYS_ALIAS(4, 8, 0, 5);
2576 } else if (!Op.compare_lower("vmalls12e1")) {
2577 // SYS #4, C8, C7, #6
2578 SYS_ALIAS(4, 8, 7, 6);
2579 } else if (!Op.compare_lower("vmalls12e1is")) {
2580 // SYS #4, C8, C3, #6
2581 SYS_ALIAS(4, 8, 3, 6);
2583 return TokError("invalid operand for TLBI instruction");
2589 Parser.Lex(); // Eat operand.
2591 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2592 bool HasRegister = false;
2594 // Check for the optional register operand.
2595 if (getLexer().is(AsmToken::Comma)) {
2596 Parser.Lex(); // Eat comma.
2598 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2599 return TokError("expected register operand");
2604 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2605 Parser.eatToEndOfStatement();
2606 return TokError("unexpected token in argument list");
2609 if (ExpectRegister && !HasRegister) {
2610 return TokError("specified " + Mnemonic + " op requires a register");
2612 else if (!ExpectRegister && HasRegister) {
2613 return TokError("specified " + Mnemonic + " op does not use a register");
2616 Parser.Lex(); // Consume the EndOfStatement
2620 AArch64AsmParser::OperandMatchResultTy
2621 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2622 MCAsmParser &Parser = getParser();
2623 const AsmToken &Tok = Parser.getTok();
2625 // Can be either a #imm style literal or an option name
2626 bool Hash = Tok.is(AsmToken::Hash);
2627 if (Hash || Tok.is(AsmToken::Integer)) {
2628 // Immediate operand.
2630 Parser.Lex(); // Eat the '#'
2631 const MCExpr *ImmVal;
2632 SMLoc ExprLoc = getLoc();
2633 if (getParser().parseExpression(ImmVal))
2634 return MatchOperand_ParseFail;
2635 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2637 Error(ExprLoc, "immediate value expected for barrier operand");
2638 return MatchOperand_ParseFail;
2640 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2641 Error(ExprLoc, "barrier operand out of range");
2642 return MatchOperand_ParseFail;
2645 auto Mapper = AArch64DB::DBarrierMapper();
2647 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2648 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2649 ExprLoc, getContext()));
2650 return MatchOperand_Success;
2653 if (Tok.isNot(AsmToken::Identifier)) {
2654 TokError("invalid operand for instruction");
2655 return MatchOperand_ParseFail;
2659 auto Mapper = AArch64DB::DBarrierMapper();
2661 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2663 TokError("invalid barrier option name");
2664 return MatchOperand_ParseFail;
2667 // The only valid named option for ISB is 'sy'
2668 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2669 TokError("'sy' or #imm operand expected");
2670 return MatchOperand_ParseFail;
2673 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2674 getLoc(), getContext()));
2675 Parser.Lex(); // Consume the option
2677 return MatchOperand_Success;
2680 AArch64AsmParser::OperandMatchResultTy
2681 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2682 MCAsmParser &Parser = getParser();
2683 const AsmToken &Tok = Parser.getTok();
2685 if (Tok.isNot(AsmToken::Identifier))
2686 return MatchOperand_NoMatch;
2689 auto MRSMapper = AArch64SysReg::MRSMapper();
2690 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2692 assert(IsKnown == (MRSReg != -1U) &&
2693 "register should be -1 if and only if it's unknown");
2695 auto MSRMapper = AArch64SysReg::MSRMapper();
2696 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2698 assert(IsKnown == (MSRReg != -1U) &&
2699 "register should be -1 if and only if it's unknown");
2701 auto PStateMapper = AArch64PState::PStateMapper();
2702 uint32_t PStateField =
2703 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2704 assert(IsKnown == (PStateField != -1U) &&
2705 "register should be -1 if and only if it's unknown");
2707 Operands.push_back(AArch64Operand::CreateSysReg(
2708 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2709 Parser.Lex(); // Eat identifier
2711 return MatchOperand_Success;
2714 /// tryParseVectorRegister - Parse a vector register operand.
2715 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2716 MCAsmParser &Parser = getParser();
2717 if (Parser.getTok().isNot(AsmToken::Identifier))
2721 // Check for a vector register specifier first.
2723 int64_t Reg = tryMatchVectorRegister(Kind, false);
2727 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2728 // If there was an explicit qualifier, that goes on as a literal text
2732 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2734 // If there is an index specifier following the register, parse that too.
2735 if (Parser.getTok().is(AsmToken::LBrac)) {
2736 SMLoc SIdx = getLoc();
2737 Parser.Lex(); // Eat left bracket token.
2739 const MCExpr *ImmVal;
2740 if (getParser().parseExpression(ImmVal))
2742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2744 TokError("immediate value expected for vector index");
2749 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2750 Error(E, "']' expected");
2754 Parser.Lex(); // Eat right bracket token.
2756 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2763 /// parseRegister - Parse a non-vector register operand.
2764 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2765 MCAsmParser &Parser = getParser();
2767 // Try for a vector register.
2768 if (!tryParseVectorRegister(Operands))
2771 // Try for a scalar register.
2772 int64_t Reg = tryParseRegister();
2776 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2778 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2779 // as a string token in the instruction itself.
2780 if (getLexer().getKind() == AsmToken::LBrac) {
2781 SMLoc LBracS = getLoc();
2783 const AsmToken &Tok = Parser.getTok();
2784 if (Tok.is(AsmToken::Integer)) {
2785 SMLoc IntS = getLoc();
2786 int64_t Val = Tok.getIntVal();
2789 if (getLexer().getKind() == AsmToken::RBrac) {
2790 SMLoc RBracS = getLoc();
2793 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2795 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2797 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2807 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2808 MCAsmParser &Parser = getParser();
2809 bool HasELFModifier = false;
2810 AArch64MCExpr::VariantKind RefKind;
2812 if (Parser.getTok().is(AsmToken::Colon)) {
2813 Parser.Lex(); // Eat ':"
2814 HasELFModifier = true;
2816 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2817 Error(Parser.getTok().getLoc(),
2818 "expect relocation specifier in operand after ':'");
2822 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2823 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2824 .Case("lo12", AArch64MCExpr::VK_LO12)
2825 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2826 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2827 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2828 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2829 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2830 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2831 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2832 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2833 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2834 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2835 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2836 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2837 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2838 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2839 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2840 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2841 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2842 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2843 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2844 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2845 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2846 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2847 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2848 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2849 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2850 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2851 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2852 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2853 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2854 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2855 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2856 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2857 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2858 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2859 .Default(AArch64MCExpr::VK_INVALID);
2861 if (RefKind == AArch64MCExpr::VK_INVALID) {
2862 Error(Parser.getTok().getLoc(),
2863 "expect relocation specifier in operand after ':'");
2867 Parser.Lex(); // Eat identifier
2869 if (Parser.getTok().isNot(AsmToken::Colon)) {
2870 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2873 Parser.Lex(); // Eat ':'
2876 if (getParser().parseExpression(ImmVal))
2880 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2885 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2886 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2887 MCAsmParser &Parser = getParser();
2888 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2890 Parser.Lex(); // Eat left bracket token.
2892 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2895 int64_t PrevReg = FirstReg;
2898 if (Parser.getTok().is(AsmToken::Minus)) {
2899 Parser.Lex(); // Eat the minus.
2901 SMLoc Loc = getLoc();
2903 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2906 // Any Kind suffices must match on all regs in the list.
2907 if (Kind != NextKind)
2908 return Error(Loc, "mismatched register size suffix");
2910 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2912 if (Space == 0 || Space > 3) {
2913 return Error(Loc, "invalid number of vectors");
2919 while (Parser.getTok().is(AsmToken::Comma)) {
2920 Parser.Lex(); // Eat the comma token.
2922 SMLoc Loc = getLoc();
2924 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2927 // Any Kind suffices must match on all regs in the list.
2928 if (Kind != NextKind)
2929 return Error(Loc, "mismatched register size suffix");
2931 // Registers must be incremental (with wraparound at 31)
2932 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2933 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2934 return Error(Loc, "registers must be sequential");
2941 if (Parser.getTok().isNot(AsmToken::RCurly))
2942 return Error(getLoc(), "'}' expected");
2943 Parser.Lex(); // Eat the '}' token.
2946 return Error(S, "invalid number of vectors");
2948 unsigned NumElements = 0;
2949 char ElementKind = 0;
2951 parseValidVectorKind(Kind, NumElements, ElementKind);
2953 Operands.push_back(AArch64Operand::CreateVectorList(
2954 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2956 // If there is an index specifier following the list, parse that too.
2957 if (Parser.getTok().is(AsmToken::LBrac)) {
2958 SMLoc SIdx = getLoc();
2959 Parser.Lex(); // Eat left bracket token.
2961 const MCExpr *ImmVal;
2962 if (getParser().parseExpression(ImmVal))
2964 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2966 TokError("immediate value expected for vector index");
2971 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2972 Error(E, "']' expected");
2976 Parser.Lex(); // Eat right bracket token.
2978 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2984 AArch64AsmParser::OperandMatchResultTy
2985 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2986 MCAsmParser &Parser = getParser();
2987 const AsmToken &Tok = Parser.getTok();
2988 if (!Tok.is(AsmToken::Identifier))
2989 return MatchOperand_NoMatch;
2991 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2993 MCContext &Ctx = getContext();
2994 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2995 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2996 return MatchOperand_NoMatch;
2999 Parser.Lex(); // Eat register
3001 if (Parser.getTok().isNot(AsmToken::Comma)) {
3003 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3004 return MatchOperand_Success;
3006 Parser.Lex(); // Eat comma.
3008 if (Parser.getTok().is(AsmToken::Hash))
3009 Parser.Lex(); // Eat hash
3011 if (Parser.getTok().isNot(AsmToken::Integer)) {
3012 Error(getLoc(), "index must be absent or #0");
3013 return MatchOperand_ParseFail;
3016 const MCExpr *ImmVal;
3017 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3018 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3019 Error(getLoc(), "index must be absent or #0");
3020 return MatchOperand_ParseFail;
3024 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3025 return MatchOperand_Success;
3028 /// parseOperand - Parse a arm instruction operand. For now this parses the
3029 /// operand regardless of the mnemonic.
3030 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3031 bool invertCondCode) {
3032 MCAsmParser &Parser = getParser();
3033 // Check if the current operand has a custom associated parser, if so, try to
3034 // custom parse the operand, or fallback to the general approach.
3035 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3036 if (ResTy == MatchOperand_Success)
3038 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3039 // there was a match, but an error occurred, in which case, just return that
3040 // the operand parsing failed.
3041 if (ResTy == MatchOperand_ParseFail)
3044 // Nothing custom, so do general case parsing.
3046 switch (getLexer().getKind()) {
3050 if (parseSymbolicImmVal(Expr))
3051 return Error(S, "invalid operand");
3053 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3054 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3057 case AsmToken::LBrac: {
3058 SMLoc Loc = Parser.getTok().getLoc();
3059 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3061 Parser.Lex(); // Eat '['
3063 // There's no comma after a '[', so we can parse the next operand
3065 return parseOperand(Operands, false, false);
3067 case AsmToken::LCurly:
3068 return parseVectorList(Operands);
3069 case AsmToken::Identifier: {
3070 // If we're expecting a Condition Code operand, then just parse that.
3072 return parseCondCode(Operands, invertCondCode);
3074 // If it's a register name, parse it.
3075 if (!parseRegister(Operands))
3078 // This could be an optional "shift" or "extend" operand.
3079 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3080 // We can only continue if no tokens were eaten.
3081 if (GotShift != MatchOperand_NoMatch)
3084 // This was not a register so parse other operands that start with an
3085 // identifier (like labels) as expressions and create them as immediates.
3086 const MCExpr *IdVal;
3088 if (getParser().parseExpression(IdVal))
3091 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3092 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3095 case AsmToken::Integer:
3096 case AsmToken::Real:
3097 case AsmToken::Hash: {
3098 // #42 -> immediate.
3100 if (getLexer().is(AsmToken::Hash))
3103 // Parse a negative sign
3104 bool isNegative = false;
3105 if (Parser.getTok().is(AsmToken::Minus)) {
3107 // We need to consume this token only when we have a Real, otherwise
3108 // we let parseSymbolicImmVal take care of it
3109 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3113 // The only Real that should come through here is a literal #0.0 for
3114 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3115 // so convert the value.
3116 const AsmToken &Tok = Parser.getTok();
3117 if (Tok.is(AsmToken::Real)) {
3118 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3119 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3120 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3121 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3122 Mnemonic != "fcmlt")
3123 return TokError("unexpected floating point literal");
3124 else if (IntVal != 0 || isNegative)
3125 return TokError("expected floating-point constant #0.0");
3126 Parser.Lex(); // Eat the token.
3129 AArch64Operand::CreateToken("#0", false, S, getContext()));
3131 AArch64Operand::CreateToken(".0", false, S, getContext()));
3135 const MCExpr *ImmVal;
3136 if (parseSymbolicImmVal(ImmVal))
3139 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3140 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3143 case AsmToken::Equal: {
3144 SMLoc Loc = Parser.getTok().getLoc();
3145 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3146 return Error(Loc, "unexpected token in operand");
3147 Parser.Lex(); // Eat '='
3148 const MCExpr *SubExprVal;
3149 if (getParser().parseExpression(SubExprVal))
3152 if (Operands.size() < 2 ||
3153 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3157 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3158 Operands[1]->getReg());
3160 MCContext& Ctx = getContext();
3161 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3162 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3163 if (isa<MCConstantExpr>(SubExprVal)) {
3164 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3165 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3166 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3170 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3171 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3172 Operands.push_back(AArch64Operand::CreateImm(
3173 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3175 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3176 ShiftAmt, true, S, E, Ctx));
3179 APInt Simm = APInt(64, Imm << ShiftAmt);
3180 // check if the immediate is an unsigned or signed 32-bit int for W regs
3181 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3182 return Error(Loc, "Immediate too large for register");
3184 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3185 const MCExpr *CPLoc =
3186 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3187 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3193 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3195 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3196 StringRef Name, SMLoc NameLoc,
3197 OperandVector &Operands) {
3198 MCAsmParser &Parser = getParser();
3199 Name = StringSwitch<StringRef>(Name.lower())
3200 .Case("beq", "b.eq")
3201 .Case("bne", "b.ne")
3202 .Case("bhs", "b.hs")
3203 .Case("bcs", "b.cs")
3204 .Case("blo", "b.lo")
3205 .Case("bcc", "b.cc")
3206 .Case("bmi", "b.mi")
3207 .Case("bpl", "b.pl")
3208 .Case("bvs", "b.vs")
3209 .Case("bvc", "b.vc")
3210 .Case("bhi", "b.hi")
3211 .Case("bls", "b.ls")
3212 .Case("bge", "b.ge")
3213 .Case("blt", "b.lt")
3214 .Case("bgt", "b.gt")
3215 .Case("ble", "b.le")
3216 .Case("bal", "b.al")
3217 .Case("bnv", "b.nv")
3220 // First check for the AArch64-specific .req directive.
3221 if (Parser.getTok().is(AsmToken::Identifier) &&
3222 Parser.getTok().getIdentifier() == ".req") {
3223 parseDirectiveReq(Name, NameLoc);
3224 // We always return 'error' for this, as we're done with this
3225 // statement and don't need to match the 'instruction."
3229 // Create the leading tokens for the mnemonic, split by '.' characters.
3230 size_t Start = 0, Next = Name.find('.');
3231 StringRef Head = Name.slice(Start, Next);
3233 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3234 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3235 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3236 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3237 Parser.eatToEndOfStatement();
3242 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3245 // Handle condition codes for a branch mnemonic
3246 if (Head == "b" && Next != StringRef::npos) {
3248 Next = Name.find('.', Start + 1);
3249 Head = Name.slice(Start + 1, Next);
3251 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3252 (Head.data() - Name.data()));
3253 AArch64CC::CondCode CC = parseCondCodeString(Head);
3254 if (CC == AArch64CC::Invalid)
3255 return Error(SuffixLoc, "invalid condition code");
3257 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3259 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3262 // Add the remaining tokens in the mnemonic.
3263 while (Next != StringRef::npos) {
3265 Next = Name.find('.', Start + 1);
3266 Head = Name.slice(Start, Next);
3267 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3268 (Head.data() - Name.data()) + 1);
3270 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3273 // Conditional compare instructions have a Condition Code operand, which needs
3274 // to be parsed and an immediate operand created.
3275 bool condCodeFourthOperand =
3276 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3277 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3278 Head == "csinc" || Head == "csinv" || Head == "csneg");
3280 // These instructions are aliases to some of the conditional select
3281 // instructions. However, the condition code is inverted in the aliased
3284 // FIXME: Is this the correct way to handle these? Or should the parser
3285 // generate the aliased instructions directly?
3286 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3287 bool condCodeThirdOperand =
3288 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3290 // Read the remaining operands.
3291 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3292 // Read the first operand.
3293 if (parseOperand(Operands, false, false)) {
3294 Parser.eatToEndOfStatement();
3299 while (getLexer().is(AsmToken::Comma)) {
3300 Parser.Lex(); // Eat the comma.
3302 // Parse and remember the operand.
3303 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3304 (N == 3 && condCodeThirdOperand) ||
3305 (N == 2 && condCodeSecondOperand),
3306 condCodeSecondOperand || condCodeThirdOperand)) {
3307 Parser.eatToEndOfStatement();
3311 // After successfully parsing some operands there are two special cases to
3312 // consider (i.e. notional operands not separated by commas). Both are due
3313 // to memory specifiers:
3314 // + An RBrac will end an address for load/store/prefetch
3315 // + An '!' will indicate a pre-indexed operation.
3317 // It's someone else's responsibility to make sure these tokens are sane
3318 // in the given context!
3319 if (Parser.getTok().is(AsmToken::RBrac)) {
3320 SMLoc Loc = Parser.getTok().getLoc();
3321 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3326 if (Parser.getTok().is(AsmToken::Exclaim)) {
3327 SMLoc Loc = Parser.getTok().getLoc();
3328 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3337 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3338 SMLoc Loc = Parser.getTok().getLoc();
3339 Parser.eatToEndOfStatement();
3340 return Error(Loc, "unexpected token in argument list");
3343 Parser.Lex(); // Consume the EndOfStatement
3347 // FIXME: This entire function is a giant hack to provide us with decent
3348 // operand range validation/diagnostics until TableGen/MC can be extended
3349 // to support autogeneration of this kind of validation.
3350 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3351 SmallVectorImpl<SMLoc> &Loc) {
3352 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3353 // Check for indexed addressing modes w/ the base register being the
3354 // same as a destination/source register or pair load where
3355 // the Rt == Rt2. All of those are undefined behaviour.
3356 switch (Inst.getOpcode()) {
3357 case AArch64::LDPSWpre:
3358 case AArch64::LDPWpost:
3359 case AArch64::LDPWpre:
3360 case AArch64::LDPXpost:
3361 case AArch64::LDPXpre: {
3362 unsigned Rt = Inst.getOperand(1).getReg();
3363 unsigned Rt2 = Inst.getOperand(2).getReg();
3364 unsigned Rn = Inst.getOperand(3).getReg();
3365 if (RI->isSubRegisterEq(Rn, Rt))
3366 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3367 "is also a destination");
3368 if (RI->isSubRegisterEq(Rn, Rt2))
3369 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3370 "is also a destination");
3373 case AArch64::LDPDi:
3374 case AArch64::LDPQi:
3375 case AArch64::LDPSi:
3376 case AArch64::LDPSWi:
3377 case AArch64::LDPWi:
3378 case AArch64::LDPXi: {
3379 unsigned Rt = Inst.getOperand(0).getReg();
3380 unsigned Rt2 = Inst.getOperand(1).getReg();
3382 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3385 case AArch64::LDPDpost:
3386 case AArch64::LDPDpre:
3387 case AArch64::LDPQpost:
3388 case AArch64::LDPQpre:
3389 case AArch64::LDPSpost:
3390 case AArch64::LDPSpre:
3391 case AArch64::LDPSWpost: {
3392 unsigned Rt = Inst.getOperand(1).getReg();
3393 unsigned Rt2 = Inst.getOperand(2).getReg();
3395 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3398 case AArch64::STPDpost:
3399 case AArch64::STPDpre:
3400 case AArch64::STPQpost:
3401 case AArch64::STPQpre:
3402 case AArch64::STPSpost:
3403 case AArch64::STPSpre:
3404 case AArch64::STPWpost:
3405 case AArch64::STPWpre:
3406 case AArch64::STPXpost:
3407 case AArch64::STPXpre: {
3408 unsigned Rt = Inst.getOperand(1).getReg();
3409 unsigned Rt2 = Inst.getOperand(2).getReg();
3410 unsigned Rn = Inst.getOperand(3).getReg();
3411 if (RI->isSubRegisterEq(Rn, Rt))
3412 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3413 "is also a source");
3414 if (RI->isSubRegisterEq(Rn, Rt2))
3415 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3416 "is also a source");
3419 case AArch64::LDRBBpre:
3420 case AArch64::LDRBpre:
3421 case AArch64::LDRHHpre:
3422 case AArch64::LDRHpre:
3423 case AArch64::LDRSBWpre:
3424 case AArch64::LDRSBXpre:
3425 case AArch64::LDRSHWpre:
3426 case AArch64::LDRSHXpre:
3427 case AArch64::LDRSWpre:
3428 case AArch64::LDRWpre:
3429 case AArch64::LDRXpre:
3430 case AArch64::LDRBBpost:
3431 case AArch64::LDRBpost:
3432 case AArch64::LDRHHpost:
3433 case AArch64::LDRHpost:
3434 case AArch64::LDRSBWpost:
3435 case AArch64::LDRSBXpost:
3436 case AArch64::LDRSHWpost:
3437 case AArch64::LDRSHXpost:
3438 case AArch64::LDRSWpost:
3439 case AArch64::LDRWpost:
3440 case AArch64::LDRXpost: {
3441 unsigned Rt = Inst.getOperand(1).getReg();
3442 unsigned Rn = Inst.getOperand(2).getReg();
3443 if (RI->isSubRegisterEq(Rn, Rt))
3444 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3445 "is also a source");
3448 case AArch64::STRBBpost:
3449 case AArch64::STRBpost:
3450 case AArch64::STRHHpost:
3451 case AArch64::STRHpost:
3452 case AArch64::STRWpost:
3453 case AArch64::STRXpost:
3454 case AArch64::STRBBpre:
3455 case AArch64::STRBpre:
3456 case AArch64::STRHHpre:
3457 case AArch64::STRHpre:
3458 case AArch64::STRWpre:
3459 case AArch64::STRXpre: {
3460 unsigned Rt = Inst.getOperand(1).getReg();
3461 unsigned Rn = Inst.getOperand(2).getReg();
3462 if (RI->isSubRegisterEq(Rn, Rt))
3463 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3464 "is also a source");
3469 // Now check immediate ranges. Separate from the above as there is overlap
3470 // in the instructions being checked and this keeps the nested conditionals
3472 switch (Inst.getOpcode()) {
3473 case AArch64::ADDSWri:
3474 case AArch64::ADDSXri:
3475 case AArch64::ADDWri:
3476 case AArch64::ADDXri:
3477 case AArch64::SUBSWri:
3478 case AArch64::SUBSXri:
3479 case AArch64::SUBWri:
3480 case AArch64::SUBXri: {
3481 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3482 // some slight duplication here.
3483 if (Inst.getOperand(2).isExpr()) {
3484 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3485 AArch64MCExpr::VariantKind ELFRefKind;
3486 MCSymbolRefExpr::VariantKind DarwinRefKind;
3488 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3489 return Error(Loc[2], "invalid immediate expression");
3492 // Only allow these with ADDXri.
3493 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3494 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3495 Inst.getOpcode() == AArch64::ADDXri)
3498 // Only allow these with ADDXri/ADDWri
3499 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3500 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3501 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3502 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3503 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3504 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3505 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3506 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3507 (Inst.getOpcode() == AArch64::ADDXri ||
3508 Inst.getOpcode() == AArch64::ADDWri))
3511 // Don't allow expressions in the immediate field otherwise
3512 return Error(Loc[2], "invalid immediate expression");
3521 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3523 case Match_MissingFeature:
3525 "instruction requires a CPU feature not currently enabled");
3526 case Match_InvalidOperand:
3527 return Error(Loc, "invalid operand for instruction");
3528 case Match_InvalidSuffix:
3529 return Error(Loc, "invalid type suffix for instruction");
3530 case Match_InvalidCondCode:
3531 return Error(Loc, "expected AArch64 condition code");
3532 case Match_AddSubRegExtendSmall:
3534 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3535 case Match_AddSubRegExtendLarge:
3537 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3538 case Match_AddSubSecondSource:
3540 "expected compatible register, symbol or integer in range [0, 4095]");
3541 case Match_LogicalSecondSource:
3542 return Error(Loc, "expected compatible register or logical immediate");
3543 case Match_InvalidMovImm32Shift:
3544 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3545 case Match_InvalidMovImm64Shift:
3546 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3547 case Match_AddSubRegShift32:
3549 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3550 case Match_AddSubRegShift64:
3552 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3553 case Match_InvalidFPImm:
3555 "expected compatible register or floating-point constant");
3556 case Match_InvalidMemoryIndexedSImm9:
3557 return Error(Loc, "index must be an integer in range [-256, 255].");
3558 case Match_InvalidMemoryIndexed4SImm7:
3559 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3560 case Match_InvalidMemoryIndexed8SImm7:
3561 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3562 case Match_InvalidMemoryIndexed16SImm7:
3563 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3564 case Match_InvalidMemoryWExtend8:
3566 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3567 case Match_InvalidMemoryWExtend16:
3569 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3570 case Match_InvalidMemoryWExtend32:
3572 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3573 case Match_InvalidMemoryWExtend64:
3575 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3576 case Match_InvalidMemoryWExtend128:
3578 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3579 case Match_InvalidMemoryXExtend8:
3581 "expected 'lsl' or 'sxtx' with optional shift of #0");
3582 case Match_InvalidMemoryXExtend16:
3584 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3585 case Match_InvalidMemoryXExtend32:
3587 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3588 case Match_InvalidMemoryXExtend64:
3590 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3591 case Match_InvalidMemoryXExtend128:
3593 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3594 case Match_InvalidMemoryIndexed1:
3595 return Error(Loc, "index must be an integer in range [0, 4095].");
3596 case Match_InvalidMemoryIndexed2:
3597 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3598 case Match_InvalidMemoryIndexed4:
3599 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3600 case Match_InvalidMemoryIndexed8:
3601 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3602 case Match_InvalidMemoryIndexed16:
3603 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3604 case Match_InvalidImm0_7:
3605 return Error(Loc, "immediate must be an integer in range [0, 7].");
3606 case Match_InvalidImm0_15:
3607 return Error(Loc, "immediate must be an integer in range [0, 15].");
3608 case Match_InvalidImm0_31:
3609 return Error(Loc, "immediate must be an integer in range [0, 31].");
3610 case Match_InvalidImm0_63:
3611 return Error(Loc, "immediate must be an integer in range [0, 63].");
3612 case Match_InvalidImm0_127:
3613 return Error(Loc, "immediate must be an integer in range [0, 127].");
3614 case Match_InvalidImm0_65535:
3615 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3616 case Match_InvalidImm1_8:
3617 return Error(Loc, "immediate must be an integer in range [1, 8].");
3618 case Match_InvalidImm1_16:
3619 return Error(Loc, "immediate must be an integer in range [1, 16].");
3620 case Match_InvalidImm1_32:
3621 return Error(Loc, "immediate must be an integer in range [1, 32].");
3622 case Match_InvalidImm1_64:
3623 return Error(Loc, "immediate must be an integer in range [1, 64].");
3624 case Match_InvalidIndex1:
3625 return Error(Loc, "expected lane specifier '[1]'");
3626 case Match_InvalidIndexB:
3627 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3628 case Match_InvalidIndexH:
3629 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3630 case Match_InvalidIndexS:
3631 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3632 case Match_InvalidIndexD:
3633 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3634 case Match_InvalidLabel:
3635 return Error(Loc, "expected label or encodable integer pc offset");
3637 return Error(Loc, "expected readable system register");
3639 return Error(Loc, "expected writable system register or pstate");
3640 case Match_MnemonicFail:
3641 return Error(Loc, "unrecognized instruction mnemonic");
3643 llvm_unreachable("unexpected error code!");
3647 static const char *getSubtargetFeatureName(uint64_t Val);
3649 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3650 OperandVector &Operands,
3652 uint64_t &ErrorInfo,
3653 bool MatchingInlineAsm) {
3654 assert(!Operands.empty() && "Unexpect empty operand list!");
3655 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3656 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3658 StringRef Tok = Op.getToken();
3659 unsigned NumOperands = Operands.size();
3661 if (NumOperands == 4 && Tok == "lsl") {
3662 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3663 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3664 if (Op2.isReg() && Op3.isImm()) {
3665 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3667 uint64_t Op3Val = Op3CE->getValue();
3668 uint64_t NewOp3Val = 0;
3669 uint64_t NewOp4Val = 0;
3670 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3672 NewOp3Val = (32 - Op3Val) & 0x1f;
3673 NewOp4Val = 31 - Op3Val;
3675 NewOp3Val = (64 - Op3Val) & 0x3f;
3676 NewOp4Val = 63 - Op3Val;
3679 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3680 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3682 Operands[0] = AArch64Operand::CreateToken(
3683 "ubfm", false, Op.getStartLoc(), getContext());
3684 Operands.push_back(AArch64Operand::CreateImm(
3685 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3686 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3687 Op3.getEndLoc(), getContext());
3690 } else if (NumOperands == 4 && Tok == "bfc") {
3691 // FIXME: Horrible hack to handle BFC->BFM alias.
3692 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3693 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3694 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3696 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3697 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3698 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3700 if (LSBCE && WidthCE) {
3701 uint64_t LSB = LSBCE->getValue();
3702 uint64_t Width = WidthCE->getValue();
3704 uint64_t RegWidth = 0;
3705 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3711 if (LSB >= RegWidth)
3712 return Error(LSBOp.getStartLoc(),
3713 "expected integer in range [0, 31]");
3714 if (Width < 1 || Width > RegWidth)
3715 return Error(WidthOp.getStartLoc(),
3716 "expected integer in range [1, 32]");
3720 ImmR = (32 - LSB) & 0x1f;
3722 ImmR = (64 - LSB) & 0x3f;
3724 uint64_t ImmS = Width - 1;
3726 if (ImmR != 0 && ImmS >= ImmR)
3727 return Error(WidthOp.getStartLoc(),
3728 "requested insert overflows register");
3730 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3731 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3732 Operands[0] = AArch64Operand::CreateToken(
3733 "bfm", false, Op.getStartLoc(), getContext());
3734 Operands[2] = AArch64Operand::CreateReg(
3735 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3736 SMLoc(), getContext());
3737 Operands[3] = AArch64Operand::CreateImm(
3738 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3739 Operands.emplace_back(
3740 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3741 WidthOp.getEndLoc(), getContext()));
3744 } else if (NumOperands == 5) {
3745 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3746 // UBFIZ -> UBFM aliases.
3747 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3748 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3749 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3750 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3752 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3753 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3754 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3756 if (Op3CE && Op4CE) {
3757 uint64_t Op3Val = Op3CE->getValue();
3758 uint64_t Op4Val = Op4CE->getValue();
3760 uint64_t RegWidth = 0;
3761 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3767 if (Op3Val >= RegWidth)
3768 return Error(Op3.getStartLoc(),
3769 "expected integer in range [0, 31]");
3770 if (Op4Val < 1 || Op4Val > RegWidth)
3771 return Error(Op4.getStartLoc(),
3772 "expected integer in range [1, 32]");
3774 uint64_t NewOp3Val = 0;
3776 NewOp3Val = (32 - Op3Val) & 0x1f;
3778 NewOp3Val = (64 - Op3Val) & 0x3f;
3780 uint64_t NewOp4Val = Op4Val - 1;
3782 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3783 return Error(Op4.getStartLoc(),
3784 "requested insert overflows register");
3786 const MCExpr *NewOp3 =
3787 MCConstantExpr::create(NewOp3Val, getContext());
3788 const MCExpr *NewOp4 =
3789 MCConstantExpr::create(NewOp4Val, getContext());
3790 Operands[3] = AArch64Operand::CreateImm(
3791 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3792 Operands[4] = AArch64Operand::CreateImm(
3793 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3795 Operands[0] = AArch64Operand::CreateToken(
3796 "bfm", false, Op.getStartLoc(), getContext());
3797 else if (Tok == "sbfiz")
3798 Operands[0] = AArch64Operand::CreateToken(
3799 "sbfm", false, Op.getStartLoc(), getContext());
3800 else if (Tok == "ubfiz")
3801 Operands[0] = AArch64Operand::CreateToken(
3802 "ubfm", false, Op.getStartLoc(), getContext());
3804 llvm_unreachable("No valid mnemonic for alias?");
3808 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3809 // UBFX -> UBFM aliases.
3810 } else if (NumOperands == 5 &&
3811 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3812 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3813 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3814 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3816 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3817 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3818 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3820 if (Op3CE && Op4CE) {
3821 uint64_t Op3Val = Op3CE->getValue();
3822 uint64_t Op4Val = Op4CE->getValue();
3824 uint64_t RegWidth = 0;
3825 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3831 if (Op3Val >= RegWidth)
3832 return Error(Op3.getStartLoc(),
3833 "expected integer in range [0, 31]");
3834 if (Op4Val < 1 || Op4Val > RegWidth)
3835 return Error(Op4.getStartLoc(),
3836 "expected integer in range [1, 32]");
3838 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3840 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3841 return Error(Op4.getStartLoc(),
3842 "requested extract overflows register");
3844 const MCExpr *NewOp4 =
3845 MCConstantExpr::create(NewOp4Val, getContext());
3846 Operands[4] = AArch64Operand::CreateImm(
3847 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3849 Operands[0] = AArch64Operand::CreateToken(
3850 "bfm", false, Op.getStartLoc(), getContext());
3851 else if (Tok == "sbfx")
3852 Operands[0] = AArch64Operand::CreateToken(
3853 "sbfm", false, Op.getStartLoc(), getContext());
3854 else if (Tok == "ubfx")
3855 Operands[0] = AArch64Operand::CreateToken(
3856 "ubfm", false, Op.getStartLoc(), getContext());
3858 llvm_unreachable("No valid mnemonic for alias?");
3863 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3864 // InstAlias can't quite handle this since the reg classes aren't
3866 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3867 // The source register can be Wn here, but the matcher expects a
3868 // GPR64. Twiddle it here if necessary.
3869 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3871 unsigned Reg = getXRegFromWReg(Op.getReg());
3872 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3873 Op.getEndLoc(), getContext());
3876 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3877 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3878 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3880 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3882 // The source register can be Wn here, but the matcher expects a
3883 // GPR64. Twiddle it here if necessary.
3884 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3886 unsigned Reg = getXRegFromWReg(Op.getReg());
3887 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3888 Op.getEndLoc(), getContext());
3892 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3893 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3894 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3896 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3898 // The source register can be Wn here, but the matcher expects a
3899 // GPR32. Twiddle it here if necessary.
3900 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3902 unsigned Reg = getWRegFromXReg(Op.getReg());
3903 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3904 Op.getEndLoc(), getContext());
3909 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3910 if (NumOperands == 3 && Tok == "fmov") {
3911 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3912 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3913 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3915 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3919 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3920 Op.getEndLoc(), getContext());
3925 // First try to match against the secondary set of tables containing the
3926 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3927 unsigned MatchResult =
3928 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3930 // If that fails, try against the alternate table containing long-form NEON:
3931 // "fadd v0.2s, v1.2s, v2.2s"
3932 // But first, save the ErrorInfo: we can use it in case this try also fails.
3933 uint64_t ShortFormNEONErrorInfo = ErrorInfo;
3934 if (MatchResult != Match_Success)
3936 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3938 switch (MatchResult) {
3939 case Match_Success: {
3940 // Perform range checking and other semantic validations
3941 SmallVector<SMLoc, 8> OperandLocs;
3942 NumOperands = Operands.size();
3943 for (unsigned i = 1; i < NumOperands; ++i)
3944 OperandLocs.push_back(Operands[i]->getStartLoc());
3945 if (validateInstruction(Inst, OperandLocs))
3949 Out.EmitInstruction(Inst, STI);
3952 case Match_MissingFeature: {
3953 assert(ErrorInfo && "Unknown missing feature!");
3954 // Special case the error message for the very common case where only
3955 // a single subtarget feature is missing (neon, e.g.).
3956 std::string Msg = "instruction requires:";
3958 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3959 if (ErrorInfo & Mask) {
3961 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3965 return Error(IDLoc, Msg);
3967 case Match_MnemonicFail:
3968 return showMatchError(IDLoc, MatchResult);
3969 case Match_InvalidOperand: {
3970 SMLoc ErrorLoc = IDLoc;
3972 // If the long-form match failed on the mnemonic suffix token operand,
3973 // the short-form match failure is probably more relevant: use it instead.
3974 if (ErrorInfo == 1 &&
3975 ((AArch64Operand &)*Operands[1]).isToken() &&
3976 ((AArch64Operand &)*Operands[1]).isTokenSuffix())
3977 ErrorInfo = ShortFormNEONErrorInfo;
3979 if (ErrorInfo != ~0ULL) {
3980 if (ErrorInfo >= Operands.size())
3981 return Error(IDLoc, "too few operands for instruction");
3983 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3984 if (ErrorLoc == SMLoc())
3987 // If the match failed on a suffix token operand, tweak the diagnostic
3989 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3990 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3991 MatchResult = Match_InvalidSuffix;
3993 return showMatchError(ErrorLoc, MatchResult);
3995 case Match_InvalidMemoryIndexed1:
3996 case Match_InvalidMemoryIndexed2:
3997 case Match_InvalidMemoryIndexed4:
3998 case Match_InvalidMemoryIndexed8:
3999 case Match_InvalidMemoryIndexed16:
4000 case Match_InvalidCondCode:
4001 case Match_AddSubRegExtendSmall:
4002 case Match_AddSubRegExtendLarge:
4003 case Match_AddSubSecondSource:
4004 case Match_LogicalSecondSource:
4005 case Match_AddSubRegShift32:
4006 case Match_AddSubRegShift64:
4007 case Match_InvalidMovImm32Shift:
4008 case Match_InvalidMovImm64Shift:
4009 case Match_InvalidFPImm:
4010 case Match_InvalidMemoryWExtend8:
4011 case Match_InvalidMemoryWExtend16:
4012 case Match_InvalidMemoryWExtend32:
4013 case Match_InvalidMemoryWExtend64:
4014 case Match_InvalidMemoryWExtend128:
4015 case Match_InvalidMemoryXExtend8:
4016 case Match_InvalidMemoryXExtend16:
4017 case Match_InvalidMemoryXExtend32:
4018 case Match_InvalidMemoryXExtend64:
4019 case Match_InvalidMemoryXExtend128:
4020 case Match_InvalidMemoryIndexed4SImm7:
4021 case Match_InvalidMemoryIndexed8SImm7:
4022 case Match_InvalidMemoryIndexed16SImm7:
4023 case Match_InvalidMemoryIndexedSImm9:
4024 case Match_InvalidImm0_7:
4025 case Match_InvalidImm0_15:
4026 case Match_InvalidImm0_31:
4027 case Match_InvalidImm0_63:
4028 case Match_InvalidImm0_127:
4029 case Match_InvalidImm0_65535:
4030 case Match_InvalidImm1_8:
4031 case Match_InvalidImm1_16:
4032 case Match_InvalidImm1_32:
4033 case Match_InvalidImm1_64:
4034 case Match_InvalidIndex1:
4035 case Match_InvalidIndexB:
4036 case Match_InvalidIndexH:
4037 case Match_InvalidIndexS:
4038 case Match_InvalidIndexD:
4039 case Match_InvalidLabel:
4042 if (ErrorInfo >= Operands.size())
4043 return Error(IDLoc, "too few operands for instruction");
4044 // Any time we get here, there's nothing fancy to do. Just get the
4045 // operand SMLoc and display the diagnostic.
4046 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4047 if (ErrorLoc == SMLoc())
4049 return showMatchError(ErrorLoc, MatchResult);
4053 llvm_unreachable("Implement any new match types added!");
4056 /// ParseDirective parses the arm specific directives
4057 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4058 const MCObjectFileInfo::Environment Format =
4059 getContext().getObjectFileInfo()->getObjectFileType();
4060 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4061 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4063 StringRef IDVal = DirectiveID.getIdentifier();
4064 SMLoc Loc = DirectiveID.getLoc();
4065 if (IDVal == ".hword")
4066 return parseDirectiveWord(2, Loc);
4067 if (IDVal == ".word")
4068 return parseDirectiveWord(4, Loc);
4069 if (IDVal == ".xword")
4070 return parseDirectiveWord(8, Loc);
4071 if (IDVal == ".tlsdesccall")
4072 return parseDirectiveTLSDescCall(Loc);
4073 if (IDVal == ".ltorg" || IDVal == ".pool")
4074 return parseDirectiveLtorg(Loc);
4075 if (IDVal == ".unreq")
4076 return parseDirectiveUnreq(Loc);
4078 if (!IsMachO && !IsCOFF) {
4079 if (IDVal == ".inst")
4080 return parseDirectiveInst(Loc);
4083 return parseDirectiveLOH(IDVal, Loc);
4086 /// parseDirectiveWord
4087 /// ::= .word [ expression (, expression)* ]
4088 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4089 MCAsmParser &Parser = getParser();
4090 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4092 const MCExpr *Value;
4093 if (getParser().parseExpression(Value))
4096 getParser().getStreamer().EmitValue(Value, Size);
4098 if (getLexer().is(AsmToken::EndOfStatement))
4101 // FIXME: Improve diagnostic.
4102 if (getLexer().isNot(AsmToken::Comma))
4103 return Error(L, "unexpected token in directive");
4112 /// parseDirectiveInst
4113 /// ::= .inst opcode [, ...]
4114 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4115 MCAsmParser &Parser = getParser();
4116 if (getLexer().is(AsmToken::EndOfStatement)) {
4117 Parser.eatToEndOfStatement();
4118 Error(Loc, "expected expression following directive");
4125 if (getParser().parseExpression(Expr)) {
4126 Error(Loc, "expected expression");
4130 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4132 Error(Loc, "expected constant expression");
4136 getTargetStreamer().emitInst(Value->getValue());
4138 if (getLexer().is(AsmToken::EndOfStatement))
4141 if (getLexer().isNot(AsmToken::Comma)) {
4142 Error(Loc, "unexpected token in directive");
4146 Parser.Lex(); // Eat comma.
4153 // parseDirectiveTLSDescCall:
4154 // ::= .tlsdesccall symbol
4155 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4157 if (getParser().parseIdentifier(Name))
4158 return Error(L, "expected symbol after directive");
4160 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4161 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4162 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4165 Inst.setOpcode(AArch64::TLSDESCCALL);
4166 Inst.addOperand(MCOperand::createExpr(Expr));
4168 getParser().getStreamer().EmitInstruction(Inst, STI);
4172 /// ::= .loh <lohName | lohId> label1, ..., labelN
4173 /// The number of arguments depends on the loh identifier.
4174 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4175 if (IDVal != MCLOHDirectiveName())
4178 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4179 if (getParser().getTok().isNot(AsmToken::Integer))
4180 return TokError("expected an identifier or a number in directive");
4181 // We successfully get a numeric value for the identifier.
4182 // Check if it is valid.
4183 int64_t Id = getParser().getTok().getIntVal();
4184 if (Id <= -1U && !isValidMCLOHType(Id))
4185 return TokError("invalid numeric identifier in directive");
4186 Kind = (MCLOHType)Id;
4188 StringRef Name = getTok().getIdentifier();
4189 // We successfully parse an identifier.
4190 // Check if it is a recognized one.
4191 int Id = MCLOHNameToId(Name);
4194 return TokError("invalid identifier in directive");
4195 Kind = (MCLOHType)Id;
4197 // Consume the identifier.
4199 // Get the number of arguments of this LOH.
4200 int NbArgs = MCLOHIdToNbArgs(Kind);
4202 assert(NbArgs != -1 && "Invalid number of arguments");
4204 SmallVector<MCSymbol *, 3> Args;
4205 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4207 if (getParser().parseIdentifier(Name))
4208 return TokError("expected identifier in directive");
4209 Args.push_back(getContext().getOrCreateSymbol(Name));
4211 if (Idx + 1 == NbArgs)
4213 if (getLexer().isNot(AsmToken::Comma))
4214 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4217 if (getLexer().isNot(AsmToken::EndOfStatement))
4218 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4220 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4224 /// parseDirectiveLtorg
4225 /// ::= .ltorg | .pool
4226 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4227 getTargetStreamer().emitCurrentConstantPool();
4231 /// parseDirectiveReq
4232 /// ::= name .req registername
4233 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4234 MCAsmParser &Parser = getParser();
4235 Parser.Lex(); // Eat the '.req' token.
4236 SMLoc SRegLoc = getLoc();
4237 unsigned RegNum = tryParseRegister();
4238 bool IsVector = false;
4240 if (RegNum == static_cast<unsigned>(-1)) {
4242 RegNum = tryMatchVectorRegister(Kind, false);
4243 if (!Kind.empty()) {
4244 Error(SRegLoc, "vector register without type specifier expected");
4250 if (RegNum == static_cast<unsigned>(-1)) {
4251 Parser.eatToEndOfStatement();
4252 Error(SRegLoc, "register name or alias expected");
4256 // Shouldn't be anything else.
4257 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4258 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4259 Parser.eatToEndOfStatement();
4263 Parser.Lex(); // Consume the EndOfStatement
4265 auto pair = std::make_pair(IsVector, RegNum);
4266 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4267 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4272 /// parseDirectiveUneq
4273 /// ::= .unreq registername
4274 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4275 MCAsmParser &Parser = getParser();
4276 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4277 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4278 Parser.eatToEndOfStatement();
4281 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4282 Parser.Lex(); // Eat the identifier.
4287 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4288 AArch64MCExpr::VariantKind &ELFRefKind,
4289 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4291 ELFRefKind = AArch64MCExpr::VK_INVALID;
4292 DarwinRefKind = MCSymbolRefExpr::VK_None;
4295 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4296 ELFRefKind = AE->getKind();
4297 Expr = AE->getSubExpr();
4300 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4302 // It's a simple symbol reference with no addend.
4303 DarwinRefKind = SE->getKind();
4307 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4311 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4314 DarwinRefKind = SE->getKind();
4316 if (BE->getOpcode() != MCBinaryExpr::Add &&
4317 BE->getOpcode() != MCBinaryExpr::Sub)
4320 // See if the addend is is a constant, otherwise there's more going
4321 // on here than we can deal with.
4322 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4326 Addend = AddendExpr->getValue();
4327 if (BE->getOpcode() == MCBinaryExpr::Sub)
4330 // It's some symbol reference + a constant addend, but really
4331 // shouldn't use both Darwin and ELF syntax.
4332 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4333 DarwinRefKind == MCSymbolRefExpr::VK_None;
4336 /// Force static initialization.
4337 extern "C" void LLVMInitializeAArch64AsmParser() {
4338 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4339 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4340 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4343 #define GET_REGISTER_MATCHER
4344 #define GET_SUBTARGET_FEATURE_NAME
4345 #define GET_MATCHER_IMPLEMENTATION
4346 #include "AArch64GenAsmMatcher.inc"
4348 // Define this matcher function after the auto-generated include so we
4349 // have the match class enum definitions.
4350 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4352 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4353 // If the kind is a token for a literal immediate, check if our asm
4354 // operand matches. This is for InstAliases which have a fixed-value
4355 // immediate in the syntax.
4356 int64_t ExpectedVal;
4359 return Match_InvalidOperand;
4401 return Match_InvalidOperand;
4402 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4404 return Match_InvalidOperand;
4405 if (CE->getValue() == ExpectedVal)
4406 return Match_Success;
4407 return Match_InvalidOperand;
4411 AArch64AsmParser::OperandMatchResultTy
4412 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4416 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4417 Error(S, "expected register");
4418 return MatchOperand_ParseFail;
4421 int FirstReg = tryParseRegister();
4422 if (FirstReg == -1) {
4423 return MatchOperand_ParseFail;
4425 const MCRegisterClass &WRegClass =
4426 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4427 const MCRegisterClass &XRegClass =
4428 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4430 bool isXReg = XRegClass.contains(FirstReg),
4431 isWReg = WRegClass.contains(FirstReg);
4432 if (!isXReg && !isWReg) {
4433 Error(S, "expected first even register of a "
4434 "consecutive same-size even/odd register pair");
4435 return MatchOperand_ParseFail;
4438 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4439 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4441 if (FirstEncoding & 0x1) {
4442 Error(S, "expected first even register of a "
4443 "consecutive same-size even/odd register pair");
4444 return MatchOperand_ParseFail;
4448 if (getParser().getTok().isNot(AsmToken::Comma)) {
4449 Error(M, "expected comma");
4450 return MatchOperand_ParseFail;
4456 int SecondReg = tryParseRegister();
4457 if (SecondReg ==-1) {
4458 return MatchOperand_ParseFail;
4461 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4462 (isXReg && !XRegClass.contains(SecondReg)) ||
4463 (isWReg && !WRegClass.contains(SecondReg))) {
4464 Error(E,"expected second odd register of a "
4465 "consecutive same-size even/odd register pair");
4466 return MatchOperand_ParseFail;
4471 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4472 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4474 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4475 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4478 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4481 return MatchOperand_Success;