1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "MCTargetDesc/AArch64TargetStreamer.h"
13 #include "Utils/AArch64BaseInfo.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCExpr.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCObjectFileInfo.h"
24 #include "llvm/MC/MCParser/MCAsmLexer.h"
25 #include "llvm/MC/MCParser/MCAsmParser.h"
26 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27 #include "llvm/MC/MCRegisterInfo.h"
28 #include "llvm/MC/MCStreamer.h"
29 #include "llvm/MC/MCSubtargetInfo.h"
30 #include "llvm/MC/MCSymbol.h"
31 #include "llvm/MC/MCTargetAsmParser.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/SourceMgr.h"
34 #include "llvm/Support/TargetRegistry.h"
35 #include "llvm/Support/raw_ostream.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
45 StringRef Mnemonic; ///< Instruction mnemonic.
48 // Map of register aliases registers via the .req directive.
49 StringMap<std::pair<bool, unsigned> > RegisterReqs;
51 AArch64TargetStreamer &getTargetStreamer() {
52 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
53 return static_cast<AArch64TargetStreamer &>(TS);
56 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
58 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
59 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
60 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
61 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
62 int tryParseRegister();
63 int tryMatchVectorRegister(StringRef &Kind, bool expected);
64 bool parseRegister(OperandVector &Operands);
65 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
66 bool parseVectorList(OperandVector &Operands);
67 bool parseOperand(OperandVector &Operands, bool isCondCode,
70 void Warning(SMLoc L, const Twine &Msg) { getParser().Warning(L, Msg); }
71 bool Error(SMLoc L, const Twine &Msg) { return getParser().Error(L, Msg); }
72 bool showMatchError(SMLoc Loc, unsigned ErrCode);
74 bool parseDirectiveWord(unsigned Size, SMLoc L);
75 bool parseDirectiveInst(SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80 bool parseDirectiveLtorg(SMLoc L);
82 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
85 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
89 bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
110 OperandMatchResultTy tryParseGPRSeqPair(OperandVector &Operands);
113 enum AArch64MatchResultTy {
114 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
115 #define GET_OPERAND_DIAGNOSTIC_TYPES
116 #include "AArch64GenAsmMatcher.inc"
118 AArch64AsmParser(MCSubtargetInfo &STI, MCAsmParser &Parser,
119 const MCInstrInfo &MII, const MCTargetOptions &Options)
120 : MCTargetAsmParser(Options), STI(STI) {
121 MCAsmParserExtension::Initialize(Parser);
122 MCStreamer &S = getParser().getStreamer();
123 if (S.getTargetStreamer() == nullptr)
124 new AArch64TargetStreamer(S);
126 // Initialize the set of available features.
127 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
130 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
131 SMLoc NameLoc, OperandVector &Operands) override;
132 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
133 bool ParseDirective(AsmToken DirectiveID) override;
134 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
135 unsigned Kind) override;
137 static bool classifySymbolRef(const MCExpr *Expr,
138 AArch64MCExpr::VariantKind &ELFRefKind,
139 MCSymbolRefExpr::VariantKind &DarwinRefKind,
142 } // end anonymous namespace
146 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
148 class AArch64Operand : public MCParsedAsmOperand {
166 SMLoc StartLoc, EndLoc;
171 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
179 struct VectorListOp {
182 unsigned NumElements;
183 unsigned ElementKind;
186 struct VectorIndexOp {
194 struct ShiftedImmOp {
196 unsigned ShiftAmount;
200 AArch64CC::CondCode Code;
204 unsigned Val; // Encoded 8-bit representation.
208 unsigned Val; // Not the enum since not all values have names.
218 uint32_t PStateField;
231 struct ShiftExtendOp {
232 AArch64_AM::ShiftExtendType Type;
234 bool HasExplicitAmount;
244 struct VectorListOp VectorList;
245 struct VectorIndexOp VectorIndex;
247 struct ShiftedImmOp ShiftedImm;
248 struct CondCodeOp CondCode;
249 struct FPImmOp FPImm;
250 struct BarrierOp Barrier;
251 struct SysRegOp SysReg;
252 struct SysCRImmOp SysCRImm;
253 struct PrefetchOp Prefetch;
254 struct ShiftExtendOp ShiftExtend;
257 // Keep the MCContext around as the MCExprs may need manipulated during
258 // the add<>Operands() calls.
262 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
264 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
266 StartLoc = o.StartLoc;
276 ShiftedImm = o.ShiftedImm;
279 CondCode = o.CondCode;
291 VectorList = o.VectorList;
294 VectorIndex = o.VectorIndex;
300 SysCRImm = o.SysCRImm;
303 Prefetch = o.Prefetch;
306 ShiftExtend = o.ShiftExtend;
311 /// getStartLoc - Get the location of the first token of this operand.
312 SMLoc getStartLoc() const override { return StartLoc; }
313 /// getEndLoc - Get the location of the last token of this operand.
314 SMLoc getEndLoc() const override { return EndLoc; }
316 StringRef getToken() const {
317 assert(Kind == k_Token && "Invalid access!");
318 return StringRef(Tok.Data, Tok.Length);
321 bool isTokenSuffix() const {
322 assert(Kind == k_Token && "Invalid access!");
326 const MCExpr *getImm() const {
327 assert(Kind == k_Immediate && "Invalid access!");
331 const MCExpr *getShiftedImmVal() const {
332 assert(Kind == k_ShiftedImm && "Invalid access!");
333 return ShiftedImm.Val;
336 unsigned getShiftedImmShift() const {
337 assert(Kind == k_ShiftedImm && "Invalid access!");
338 return ShiftedImm.ShiftAmount;
341 AArch64CC::CondCode getCondCode() const {
342 assert(Kind == k_CondCode && "Invalid access!");
343 return CondCode.Code;
346 unsigned getFPImm() const {
347 assert(Kind == k_FPImm && "Invalid access!");
351 unsigned getBarrier() const {
352 assert(Kind == k_Barrier && "Invalid access!");
356 StringRef getBarrierName() const {
357 assert(Kind == k_Barrier && "Invalid access!");
358 return StringRef(Barrier.Data, Barrier.Length);
361 unsigned getReg() const override {
362 assert(Kind == k_Register && "Invalid access!");
366 unsigned getVectorListStart() const {
367 assert(Kind == k_VectorList && "Invalid access!");
368 return VectorList.RegNum;
371 unsigned getVectorListCount() const {
372 assert(Kind == k_VectorList && "Invalid access!");
373 return VectorList.Count;
376 unsigned getVectorIndex() const {
377 assert(Kind == k_VectorIndex && "Invalid access!");
378 return VectorIndex.Val;
381 StringRef getSysReg() const {
382 assert(Kind == k_SysReg && "Invalid access!");
383 return StringRef(SysReg.Data, SysReg.Length);
386 unsigned getSysCR() const {
387 assert(Kind == k_SysCR && "Invalid access!");
391 unsigned getPrefetch() const {
392 assert(Kind == k_Prefetch && "Invalid access!");
396 StringRef getPrefetchName() const {
397 assert(Kind == k_Prefetch && "Invalid access!");
398 return StringRef(Prefetch.Data, Prefetch.Length);
401 AArch64_AM::ShiftExtendType getShiftExtendType() const {
402 assert(Kind == k_ShiftExtend && "Invalid access!");
403 return ShiftExtend.Type;
406 unsigned getShiftExtendAmount() const {
407 assert(Kind == k_ShiftExtend && "Invalid access!");
408 return ShiftExtend.Amount;
411 bool hasShiftExtendAmount() const {
412 assert(Kind == k_ShiftExtend && "Invalid access!");
413 return ShiftExtend.HasExplicitAmount;
416 bool isImm() const override { return Kind == k_Immediate; }
417 bool isMem() const override { return false; }
418 bool isSImm9() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val < 256);
427 bool isSImm7s4() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
436 bool isSImm7s8() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
445 bool isSImm7s16() const {
448 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
451 int64_t Val = MCE->getValue();
452 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
455 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
456 AArch64MCExpr::VariantKind ELFRefKind;
457 MCSymbolRefExpr::VariantKind DarwinRefKind;
459 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
461 // If we don't understand the expression, assume the best and
462 // let the fixup and relocation code deal with it.
466 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
467 ELFRefKind == AArch64MCExpr::VK_LO12 ||
468 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
469 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
470 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
471 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
472 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
473 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
474 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
475 // Note that we don't range-check the addend. It's adjusted modulo page
476 // size when converted, so there is no "out of range" condition when using
478 return Addend >= 0 && (Addend % Scale) == 0;
479 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
480 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
481 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
488 template <int Scale> bool isUImm12Offset() const {
492 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
494 return isSymbolicUImm12Offset(getImm(), Scale);
496 int64_t Val = MCE->getValue();
497 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
500 bool isImm0_1() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val >= 0 && Val < 2);
509 bool isImm0_7() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val >= 0 && Val < 8);
518 bool isImm1_8() const {
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
524 int64_t Val = MCE->getValue();
525 return (Val > 0 && Val < 9);
527 bool isImm0_15() const {
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533 int64_t Val = MCE->getValue();
534 return (Val >= 0 && Val < 16);
536 bool isImm1_16() const {
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 int64_t Val = MCE->getValue();
543 return (Val > 0 && Val < 17);
545 bool isImm0_31() const {
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 int64_t Val = MCE->getValue();
552 return (Val >= 0 && Val < 32);
554 bool isImm1_31() const {
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
560 int64_t Val = MCE->getValue();
561 return (Val >= 1 && Val < 32);
563 bool isImm1_32() const {
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
569 int64_t Val = MCE->getValue();
570 return (Val >= 1 && Val < 33);
572 bool isImm0_63() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
579 return (Val >= 0 && Val < 64);
581 bool isImm1_63() const {
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
587 int64_t Val = MCE->getValue();
588 return (Val >= 1 && Val < 64);
590 bool isImm1_64() const {
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
596 int64_t Val = MCE->getValue();
597 return (Val >= 1 && Val < 65);
599 bool isImm0_127() const {
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 128);
608 bool isImm0_255() const {
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
614 int64_t Val = MCE->getValue();
615 return (Val >= 0 && Val < 256);
617 bool isImm0_65535() const {
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
623 int64_t Val = MCE->getValue();
624 return (Val >= 0 && Val < 65536);
626 bool isImm32_63() const {
629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
632 int64_t Val = MCE->getValue();
633 return (Val >= 32 && Val < 64);
635 bool isLogicalImm32() const {
638 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
641 int64_t Val = MCE->getValue();
642 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
645 return AArch64_AM::isLogicalImmediate(Val, 32);
647 bool isLogicalImm64() const {
650 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
653 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
655 bool isLogicalImm32Not() const {
658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
661 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
662 return AArch64_AM::isLogicalImmediate(Val, 32);
664 bool isLogicalImm64Not() const {
667 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
670 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
672 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
673 bool isAddSubImm() const {
674 if (!isShiftedImm() && !isImm())
679 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
680 if (isShiftedImm()) {
681 unsigned Shift = ShiftedImm.ShiftAmount;
682 Expr = ShiftedImm.Val;
683 if (Shift != 0 && Shift != 12)
689 AArch64MCExpr::VariantKind ELFRefKind;
690 MCSymbolRefExpr::VariantKind DarwinRefKind;
692 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
693 DarwinRefKind, Addend)) {
694 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
695 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
696 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
697 || ELFRefKind == AArch64MCExpr::VK_LO12
698 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
699 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
700 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
701 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
702 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
703 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
704 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
707 // Otherwise it should be a real immediate in range:
708 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
709 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
711 bool isAddSubImmNeg() const {
712 if (!isShiftedImm() && !isImm())
717 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
718 if (isShiftedImm()) {
719 unsigned Shift = ShiftedImm.ShiftAmount;
720 Expr = ShiftedImm.Val;
721 if (Shift != 0 && Shift != 12)
726 // Otherwise it should be a real negative immediate in range:
727 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
728 return CE != nullptr && CE->getValue() < 0 && -CE->getValue() <= 0xfff;
730 bool isCondCode() const { return Kind == k_CondCode; }
731 bool isSIMDImmType10() const {
734 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
737 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
739 bool isBranchTarget26() const {
742 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
745 int64_t Val = MCE->getValue();
748 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
750 bool isPCRelLabel19() const {
753 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
756 int64_t Val = MCE->getValue();
759 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
761 bool isBranchTarget14() const {
764 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
767 int64_t Val = MCE->getValue();
770 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
774 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
778 AArch64MCExpr::VariantKind ELFRefKind;
779 MCSymbolRefExpr::VariantKind DarwinRefKind;
781 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
782 DarwinRefKind, Addend)) {
785 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
788 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
789 if (ELFRefKind == AllowedModifiers[i])
796 bool isMovZSymbolG3() const {
797 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
800 bool isMovZSymbolG2() const {
801 return isMovWSymbol({AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
802 AArch64MCExpr::VK_TPREL_G2,
803 AArch64MCExpr::VK_DTPREL_G2});
806 bool isMovZSymbolG1() const {
807 return isMovWSymbol({
808 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
809 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
810 AArch64MCExpr::VK_DTPREL_G1,
814 bool isMovZSymbolG0() const {
815 return isMovWSymbol({AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
816 AArch64MCExpr::VK_TPREL_G0,
817 AArch64MCExpr::VK_DTPREL_G0});
820 bool isMovKSymbolG3() const {
821 return isMovWSymbol(AArch64MCExpr::VK_ABS_G3);
824 bool isMovKSymbolG2() const {
825 return isMovWSymbol(AArch64MCExpr::VK_ABS_G2_NC);
828 bool isMovKSymbolG1() const {
829 return isMovWSymbol({AArch64MCExpr::VK_ABS_G1_NC,
830 AArch64MCExpr::VK_TPREL_G1_NC,
831 AArch64MCExpr::VK_DTPREL_G1_NC});
834 bool isMovKSymbolG0() const {
836 {AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
837 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC});
840 template<int RegWidth, int Shift>
841 bool isMOVZMovAlias() const {
842 if (!isImm()) return false;
844 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845 if (!CE) return false;
846 uint64_t Value = CE->getValue();
849 Value &= 0xffffffffULL;
851 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
852 if (Value == 0 && Shift != 0)
855 return (Value & ~(0xffffULL << Shift)) == 0;
858 template<int RegWidth, int Shift>
859 bool isMOVNMovAlias() const {
860 if (!isImm()) return false;
862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
863 if (!CE) return false;
864 uint64_t Value = CE->getValue();
866 // MOVZ takes precedence over MOVN.
867 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
868 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
873 Value &= 0xffffffffULL;
875 return (Value & ~(0xffffULL << Shift)) == 0;
878 bool isFPImm() const { return Kind == k_FPImm; }
879 bool isBarrier() const { return Kind == k_Barrier; }
880 bool isSysReg() const { return Kind == k_SysReg; }
881 bool isMRSSystemRegister() const {
882 if (!isSysReg()) return false;
884 return SysReg.MRSReg != -1U;
886 bool isMSRSystemRegister() const {
887 if (!isSysReg()) return false;
888 return SysReg.MSRReg != -1U;
890 bool isSystemPStateFieldWithImm0_1() const {
891 if (!isSysReg()) return false;
892 return SysReg.PStateField == AArch64PState::PAN;
894 bool isSystemPStateFieldWithImm0_15() const {
895 if (!isSysReg() || isSystemPStateFieldWithImm0_1()) return false;
896 return SysReg.PStateField != -1U;
898 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
899 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
900 bool isVectorRegLo() const {
901 return Kind == k_Register && Reg.isVector &&
902 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
905 bool isGPR32as64() const {
906 return Kind == k_Register && !Reg.isVector &&
907 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
909 bool isWSeqPair() const {
910 return Kind == k_Register && !Reg.isVector &&
911 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
914 bool isXSeqPair() const {
915 return Kind == k_Register && !Reg.isVector &&
916 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
920 bool isGPR64sp0() const {
921 return Kind == k_Register && !Reg.isVector &&
922 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
925 /// Is this a vector list with the type implicit (presumably attached to the
926 /// instruction itself)?
927 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
928 return Kind == k_VectorList && VectorList.Count == NumRegs &&
929 !VectorList.ElementKind;
932 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
933 bool isTypedVectorList() const {
934 if (Kind != k_VectorList)
936 if (VectorList.Count != NumRegs)
938 if (VectorList.ElementKind != ElementKind)
940 return VectorList.NumElements == NumElements;
943 bool isVectorIndex1() const {
944 return Kind == k_VectorIndex && VectorIndex.Val == 1;
946 bool isVectorIndexB() const {
947 return Kind == k_VectorIndex && VectorIndex.Val < 16;
949 bool isVectorIndexH() const {
950 return Kind == k_VectorIndex && VectorIndex.Val < 8;
952 bool isVectorIndexS() const {
953 return Kind == k_VectorIndex && VectorIndex.Val < 4;
955 bool isVectorIndexD() const {
956 return Kind == k_VectorIndex && VectorIndex.Val < 2;
958 bool isToken() const override { return Kind == k_Token; }
959 bool isTokenEqual(StringRef Str) const {
960 return Kind == k_Token && getToken() == Str;
962 bool isSysCR() const { return Kind == k_SysCR; }
963 bool isPrefetch() const { return Kind == k_Prefetch; }
964 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
965 bool isShifter() const {
966 if (!isShiftExtend())
969 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
970 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
971 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
972 ST == AArch64_AM::MSL);
974 bool isExtend() const {
975 if (!isShiftExtend())
978 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
979 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
980 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
981 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
982 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
983 ET == AArch64_AM::LSL) &&
984 getShiftExtendAmount() <= 4;
987 bool isExtend64() const {
990 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
991 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
992 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
994 bool isExtendLSL64() const {
997 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
998 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
999 ET == AArch64_AM::LSL) &&
1000 getShiftExtendAmount() <= 4;
1003 template<int Width> bool isMemXExtend() const {
1006 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1007 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1008 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1009 getShiftExtendAmount() == 0);
1012 template<int Width> bool isMemWExtend() const {
1015 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1016 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1017 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1018 getShiftExtendAmount() == 0);
1021 template <unsigned width>
1022 bool isArithmeticShifter() const {
1026 // An arithmetic shifter is LSL, LSR, or ASR.
1027 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1028 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1029 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1032 template <unsigned width>
1033 bool isLogicalShifter() const {
1037 // A logical shifter is LSL, LSR, ASR or ROR.
1038 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1039 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1040 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1041 getShiftExtendAmount() < width;
1044 bool isMovImm32Shifter() const {
1048 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1049 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1050 if (ST != AArch64_AM::LSL)
1052 uint64_t Val = getShiftExtendAmount();
1053 return (Val == 0 || Val == 16);
1056 bool isMovImm64Shifter() const {
1060 // A MOVi shifter is LSL of 0 or 16.
1061 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1062 if (ST != AArch64_AM::LSL)
1064 uint64_t Val = getShiftExtendAmount();
1065 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1068 bool isLogicalVecShifter() const {
1072 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1073 unsigned Shift = getShiftExtendAmount();
1074 return getShiftExtendType() == AArch64_AM::LSL &&
1075 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1078 bool isLogicalVecHalfWordShifter() const {
1079 if (!isLogicalVecShifter())
1082 // A logical vector shifter is a left shift by 0 or 8.
1083 unsigned Shift = getShiftExtendAmount();
1084 return getShiftExtendType() == AArch64_AM::LSL &&
1085 (Shift == 0 || Shift == 8);
1088 bool isMoveVecShifter() const {
1089 if (!isShiftExtend())
1092 // A logical vector shifter is a left shift by 8 or 16.
1093 unsigned Shift = getShiftExtendAmount();
1094 return getShiftExtendType() == AArch64_AM::MSL &&
1095 (Shift == 8 || Shift == 16);
1098 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1099 // to LDUR/STUR when the offset is not legal for the former but is for
1100 // the latter. As such, in addition to checking for being a legal unscaled
1101 // address, also check that it is not a legal scaled address. This avoids
1102 // ambiguity in the matcher.
1104 bool isSImm9OffsetFB() const {
1105 return isSImm9() && !isUImm12Offset<Width / 8>();
1108 bool isAdrpLabel() const {
1109 // Validation was handled during parsing, so we just sanity check that
1110 // something didn't go haywire.
1114 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1115 int64_t Val = CE->getValue();
1116 int64_t Min = - (4096 * (1LL << (21 - 1)));
1117 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1118 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1124 bool isAdrLabel() const {
1125 // Validation was handled during parsing, so we just sanity check that
1126 // something didn't go haywire.
1130 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1131 int64_t Val = CE->getValue();
1132 int64_t Min = - (1LL << (21 - 1));
1133 int64_t Max = ((1LL << (21 - 1)) - 1);
1134 return Val >= Min && Val <= Max;
1140 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1141 // Add as immediates when possible. Null MCExpr = 0.
1143 Inst.addOperand(MCOperand::createImm(0));
1144 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1145 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1147 Inst.addOperand(MCOperand::createExpr(Expr));
1150 void addRegOperands(MCInst &Inst, unsigned N) const {
1151 assert(N == 1 && "Invalid number of operands!");
1152 Inst.addOperand(MCOperand::createReg(getReg()));
1155 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1156 assert(N == 1 && "Invalid number of operands!");
1158 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1160 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1161 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1162 RI->getEncodingValue(getReg()));
1164 Inst.addOperand(MCOperand::createReg(Reg));
1167 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1170 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1171 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1174 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1178 Inst.addOperand(MCOperand::createReg(getReg()));
1181 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::createReg(getReg()));
1186 template <unsigned NumRegs>
1187 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1190 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1191 unsigned FirstReg = FirstRegs[NumRegs - 1];
1194 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1197 template <unsigned NumRegs>
1198 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1199 assert(N == 1 && "Invalid number of operands!");
1200 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1201 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1202 unsigned FirstReg = FirstRegs[NumRegs - 1];
1205 MCOperand::createReg(FirstReg + getVectorListStart() - AArch64::Q0));
1208 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1209 assert(N == 1 && "Invalid number of operands!");
1210 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1213 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1214 assert(N == 1 && "Invalid number of operands!");
1215 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1218 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1219 assert(N == 1 && "Invalid number of operands!");
1220 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1223 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1224 assert(N == 1 && "Invalid number of operands!");
1225 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1228 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1229 assert(N == 1 && "Invalid number of operands!");
1230 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1233 void addImmOperands(MCInst &Inst, unsigned N) const {
1234 assert(N == 1 && "Invalid number of operands!");
1235 // If this is a pageoff symrefexpr with an addend, adjust the addend
1236 // to be only the page-offset portion. Otherwise, just add the expr
1238 addExpr(Inst, getImm());
1241 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1242 assert(N == 2 && "Invalid number of operands!");
1243 if (isShiftedImm()) {
1244 addExpr(Inst, getShiftedImmVal());
1245 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1247 addExpr(Inst, getImm());
1248 Inst.addOperand(MCOperand::createImm(0));
1252 void addAddSubImmNegOperands(MCInst &Inst, unsigned N) const {
1253 assert(N == 2 && "Invalid number of operands!");
1255 const MCExpr *MCE = isShiftedImm() ? getShiftedImmVal() : getImm();
1256 const MCConstantExpr *CE = cast<MCConstantExpr>(MCE);
1257 int64_t Val = -CE->getValue();
1258 unsigned ShiftAmt = isShiftedImm() ? ShiftedImm.ShiftAmount : 0;
1260 Inst.addOperand(MCOperand::createImm(Val));
1261 Inst.addOperand(MCOperand::createImm(ShiftAmt));
1264 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1265 assert(N == 1 && "Invalid number of operands!");
1266 Inst.addOperand(MCOperand::createImm(getCondCode()));
1269 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1270 assert(N == 1 && "Invalid number of operands!");
1271 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1273 addExpr(Inst, getImm());
1275 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1278 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1279 addImmOperands(Inst, N);
1283 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1288 Inst.addOperand(MCOperand::createExpr(getImm()));
1291 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
1294 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1295 assert(N == 1 && "Invalid number of operands!");
1296 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1297 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1300 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1301 assert(N == 1 && "Invalid number of operands!");
1302 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1303 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 4));
1306 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1307 assert(N == 1 && "Invalid number of operands!");
1308 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1309 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 8));
1312 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1313 assert(N == 1 && "Invalid number of operands!");
1314 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1315 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 16));
1318 void addImm0_1Operands(MCInst &Inst, unsigned N) const {
1319 assert(N == 1 && "Invalid number of operands!");
1320 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1321 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1324 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1325 assert(N == 1 && "Invalid number of operands!");
1326 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1327 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1330 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1331 assert(N == 1 && "Invalid number of operands!");
1332 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1333 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1336 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1337 assert(N == 1 && "Invalid number of operands!");
1338 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1339 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1342 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1343 assert(N == 1 && "Invalid number of operands!");
1344 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1345 assert(MCE && "Invalid constant immediate operand!");
1346 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1349 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1350 assert(N == 1 && "Invalid number of operands!");
1351 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1352 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1355 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1358 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1361 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1362 assert(N == 1 && "Invalid number of operands!");
1363 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1364 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1367 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1368 assert(N == 1 && "Invalid number of operands!");
1369 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1370 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1373 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!");
1375 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1376 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1379 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1382 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1385 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1386 assert(N == 1 && "Invalid number of operands!");
1387 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1388 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1391 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1392 assert(N == 1 && "Invalid number of operands!");
1393 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1394 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1397 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1398 assert(N == 1 && "Invalid number of operands!");
1399 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1400 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1403 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1404 assert(N == 1 && "Invalid number of operands!");
1405 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1406 Inst.addOperand(MCOperand::createImm(MCE->getValue()));
1409 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1410 assert(N == 1 && "Invalid number of operands!");
1411 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1413 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1414 Inst.addOperand(MCOperand::createImm(encoding));
1417 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1418 assert(N == 1 && "Invalid number of operands!");
1419 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1420 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1421 Inst.addOperand(MCOperand::createImm(encoding));
1424 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1425 assert(N == 1 && "Invalid number of operands!");
1426 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1427 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1428 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1429 Inst.addOperand(MCOperand::createImm(encoding));
1432 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1433 assert(N == 1 && "Invalid number of operands!");
1434 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1436 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1437 Inst.addOperand(MCOperand::createImm(encoding));
1440 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1441 assert(N == 1 && "Invalid number of operands!");
1442 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1443 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1444 Inst.addOperand(MCOperand::createImm(encoding));
1447 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1448 // Branch operands don't encode the low bits, so shift them off
1449 // here. If it's a label, however, just put it on directly as there's
1450 // not enough information now to do anything.
1451 assert(N == 1 && "Invalid number of operands!");
1452 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1454 addExpr(Inst, getImm());
1457 assert(MCE && "Invalid constant immediate operand!");
1458 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1461 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1462 // Branch operands don't encode the low bits, so shift them off
1463 // here. If it's a label, however, just put it on directly as there's
1464 // not enough information now to do anything.
1465 assert(N == 1 && "Invalid number of operands!");
1466 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1468 addExpr(Inst, getImm());
1471 assert(MCE && "Invalid constant immediate operand!");
1472 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1475 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1476 // Branch operands don't encode the low bits, so shift them off
1477 // here. If it's a label, however, just put it on directly as there's
1478 // not enough information now to do anything.
1479 assert(N == 1 && "Invalid number of operands!");
1480 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1482 addExpr(Inst, getImm());
1485 assert(MCE && "Invalid constant immediate operand!");
1486 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
1489 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 1 && "Invalid number of operands!");
1491 Inst.addOperand(MCOperand::createImm(getFPImm()));
1494 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1496 Inst.addOperand(MCOperand::createImm(getBarrier()));
1499 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1500 assert(N == 1 && "Invalid number of operands!");
1502 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
1505 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1506 assert(N == 1 && "Invalid number of operands!");
1508 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
1511 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
1512 assert(N == 1 && "Invalid number of operands!");
1514 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1517 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
1518 assert(N == 1 && "Invalid number of operands!");
1520 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
1523 void addSysCROperands(MCInst &Inst, unsigned N) const {
1524 assert(N == 1 && "Invalid number of operands!");
1525 Inst.addOperand(MCOperand::createImm(getSysCR()));
1528 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1529 assert(N == 1 && "Invalid number of operands!");
1530 Inst.addOperand(MCOperand::createImm(getPrefetch()));
1533 void addShifterOperands(MCInst &Inst, unsigned N) const {
1534 assert(N == 1 && "Invalid number of operands!");
1536 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1537 Inst.addOperand(MCOperand::createImm(Imm));
1540 void addExtendOperands(MCInst &Inst, unsigned N) const {
1541 assert(N == 1 && "Invalid number of operands!");
1542 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1543 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1544 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1545 Inst.addOperand(MCOperand::createImm(Imm));
1548 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1549 assert(N == 1 && "Invalid number of operands!");
1550 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1551 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1552 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1553 Inst.addOperand(MCOperand::createImm(Imm));
1556 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1557 assert(N == 2 && "Invalid number of operands!");
1558 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1559 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1560 Inst.addOperand(MCOperand::createImm(IsSigned));
1561 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
1564 // For 8-bit load/store instructions with a register offset, both the
1565 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1566 // they're disambiguated by whether the shift was explicit or implicit rather
1568 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1569 assert(N == 2 && "Invalid number of operands!");
1570 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1571 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1572 Inst.addOperand(MCOperand::createImm(IsSigned));
1573 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
1577 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1578 assert(N == 1 && "Invalid number of operands!");
1580 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1581 uint64_t Value = CE->getValue();
1582 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
1586 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1587 assert(N == 1 && "Invalid number of operands!");
1589 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1590 uint64_t Value = CE->getValue();
1591 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
1594 void print(raw_ostream &OS) const override;
1596 static std::unique_ptr<AArch64Operand>
1597 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1598 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1599 Op->Tok.Data = Str.data();
1600 Op->Tok.Length = Str.size();
1601 Op->Tok.IsSuffix = IsSuffix;
1607 static std::unique_ptr<AArch64Operand>
1608 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1609 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1610 Op->Reg.RegNum = RegNum;
1611 Op->Reg.isVector = isVector;
1617 static std::unique_ptr<AArch64Operand>
1618 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1619 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1620 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1621 Op->VectorList.RegNum = RegNum;
1622 Op->VectorList.Count = Count;
1623 Op->VectorList.NumElements = NumElements;
1624 Op->VectorList.ElementKind = ElementKind;
1630 static std::unique_ptr<AArch64Operand>
1631 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1632 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1633 Op->VectorIndex.Val = Idx;
1639 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1640 SMLoc E, MCContext &Ctx) {
1641 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1648 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1649 unsigned ShiftAmount,
1652 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1653 Op->ShiftedImm .Val = Val;
1654 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1660 static std::unique_ptr<AArch64Operand>
1661 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1662 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1663 Op->CondCode.Code = Code;
1669 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1671 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1672 Op->FPImm.Val = Val;
1678 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
1682 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1683 Op->Barrier.Val = Val;
1684 Op->Barrier.Data = Str.data();
1685 Op->Barrier.Length = Str.size();
1691 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
1694 uint32_t PStateField,
1696 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1697 Op->SysReg.Data = Str.data();
1698 Op->SysReg.Length = Str.size();
1699 Op->SysReg.MRSReg = MRSReg;
1700 Op->SysReg.MSRReg = MSRReg;
1701 Op->SysReg.PStateField = PStateField;
1707 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1708 SMLoc E, MCContext &Ctx) {
1709 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1710 Op->SysCRImm.Val = Val;
1716 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
1720 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1721 Op->Prefetch.Val = Val;
1722 Op->Barrier.Data = Str.data();
1723 Op->Barrier.Length = Str.size();
1729 static std::unique_ptr<AArch64Operand>
1730 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1731 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1732 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1733 Op->ShiftExtend.Type = ShOp;
1734 Op->ShiftExtend.Amount = Val;
1735 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1742 } // end anonymous namespace.
1744 void AArch64Operand::print(raw_ostream &OS) const {
1747 OS << "<fpimm " << getFPImm() << "("
1748 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1751 StringRef Name = getBarrierName();
1753 OS << "<barrier " << Name << ">";
1755 OS << "<barrier invalid #" << getBarrier() << ">";
1761 case k_ShiftedImm: {
1762 unsigned Shift = getShiftedImmShift();
1763 OS << "<shiftedimm ";
1764 OS << *getShiftedImmVal();
1765 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1769 OS << "<condcode " << getCondCode() << ">";
1772 OS << "<register " << getReg() << ">";
1774 case k_VectorList: {
1775 OS << "<vectorlist ";
1776 unsigned Reg = getVectorListStart();
1777 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1778 OS << Reg + i << " ";
1783 OS << "<vectorindex " << getVectorIndex() << ">";
1786 OS << "<sysreg: " << getSysReg() << '>';
1789 OS << "'" << getToken() << "'";
1792 OS << "c" << getSysCR();
1795 StringRef Name = getPrefetchName();
1797 OS << "<prfop " << Name << ">";
1799 OS << "<prfop invalid #" << getPrefetch() << ">";
1802 case k_ShiftExtend: {
1803 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1804 << getShiftExtendAmount();
1805 if (!hasShiftExtendAmount())
1813 /// @name Auto-generated Match Functions
1816 static unsigned MatchRegisterName(StringRef Name);
1820 static unsigned matchVectorRegName(StringRef Name) {
1821 return StringSwitch<unsigned>(Name.lower())
1822 .Case("v0", AArch64::Q0)
1823 .Case("v1", AArch64::Q1)
1824 .Case("v2", AArch64::Q2)
1825 .Case("v3", AArch64::Q3)
1826 .Case("v4", AArch64::Q4)
1827 .Case("v5", AArch64::Q5)
1828 .Case("v6", AArch64::Q6)
1829 .Case("v7", AArch64::Q7)
1830 .Case("v8", AArch64::Q8)
1831 .Case("v9", AArch64::Q9)
1832 .Case("v10", AArch64::Q10)
1833 .Case("v11", AArch64::Q11)
1834 .Case("v12", AArch64::Q12)
1835 .Case("v13", AArch64::Q13)
1836 .Case("v14", AArch64::Q14)
1837 .Case("v15", AArch64::Q15)
1838 .Case("v16", AArch64::Q16)
1839 .Case("v17", AArch64::Q17)
1840 .Case("v18", AArch64::Q18)
1841 .Case("v19", AArch64::Q19)
1842 .Case("v20", AArch64::Q20)
1843 .Case("v21", AArch64::Q21)
1844 .Case("v22", AArch64::Q22)
1845 .Case("v23", AArch64::Q23)
1846 .Case("v24", AArch64::Q24)
1847 .Case("v25", AArch64::Q25)
1848 .Case("v26", AArch64::Q26)
1849 .Case("v27", AArch64::Q27)
1850 .Case("v28", AArch64::Q28)
1851 .Case("v29", AArch64::Q29)
1852 .Case("v30", AArch64::Q30)
1853 .Case("v31", AArch64::Q31)
1857 static bool isValidVectorKind(StringRef Name) {
1858 return StringSwitch<bool>(Name.lower())
1868 // Accept the width neutral ones, too, for verbose syntax. If those
1869 // aren't used in the right places, the token operand won't match so
1870 // all will work out.
1878 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1879 char &ElementKind) {
1880 assert(isValidVectorKind(Name));
1882 ElementKind = Name.lower()[Name.size() - 1];
1885 if (Name.size() == 2)
1888 // Parse the lane count
1889 Name = Name.drop_front();
1890 while (isdigit(Name.front())) {
1891 NumElements = 10 * NumElements + (Name.front() - '0');
1892 Name = Name.drop_front();
1896 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1898 StartLoc = getLoc();
1899 RegNo = tryParseRegister();
1900 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1901 return (RegNo == (unsigned)-1);
1904 // Matches a register name or register alias previously defined by '.req'
1905 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1907 unsigned RegNum = isVector ? matchVectorRegName(Name)
1908 : MatchRegisterName(Name);
1911 // Check for aliases registered via .req. Canonicalize to lower case.
1912 // That's more consistent since register names are case insensitive, and
1913 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1914 auto Entry = RegisterReqs.find(Name.lower());
1915 if (Entry == RegisterReqs.end())
1917 // set RegNum if the match is the right kind of register
1918 if (isVector == Entry->getValue().first)
1919 RegNum = Entry->getValue().second;
1924 /// tryParseRegister - Try to parse a register name. The token must be an
1925 /// Identifier when called, and if it is a register name the token is eaten and
1926 /// the register is added to the operand list.
1927 int AArch64AsmParser::tryParseRegister() {
1928 MCAsmParser &Parser = getParser();
1929 const AsmToken &Tok = Parser.getTok();
1930 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1932 std::string lowerCase = Tok.getString().lower();
1933 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1934 // Also handle a few aliases of registers.
1936 RegNum = StringSwitch<unsigned>(lowerCase)
1937 .Case("fp", AArch64::FP)
1938 .Case("lr", AArch64::LR)
1939 .Case("x31", AArch64::XZR)
1940 .Case("w31", AArch64::WZR)
1946 Parser.Lex(); // Eat identifier token.
1950 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1951 /// kind specifier. If it is a register specifier, eat the token and return it.
1952 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1953 MCAsmParser &Parser = getParser();
1954 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1955 TokError("vector register expected");
1959 StringRef Name = Parser.getTok().getString();
1960 // If there is a kind specifier, it's separated from the register name by
1962 size_t Start = 0, Next = Name.find('.');
1963 StringRef Head = Name.slice(Start, Next);
1964 unsigned RegNum = matchRegisterNameAlias(Head, true);
1967 if (Next != StringRef::npos) {
1968 Kind = Name.slice(Next, StringRef::npos);
1969 if (!isValidVectorKind(Kind)) {
1970 TokError("invalid vector kind qualifier");
1974 Parser.Lex(); // Eat the register token.
1979 TokError("vector register expected");
1983 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1984 AArch64AsmParser::OperandMatchResultTy
1985 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1986 MCAsmParser &Parser = getParser();
1989 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1990 Error(S, "Expected cN operand where 0 <= N <= 15");
1991 return MatchOperand_ParseFail;
1994 StringRef Tok = Parser.getTok().getIdentifier();
1995 if (Tok[0] != 'c' && Tok[0] != 'C') {
1996 Error(S, "Expected cN operand where 0 <= N <= 15");
1997 return MatchOperand_ParseFail;
2001 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
2002 if (BadNum || CRNum > 15) {
2003 Error(S, "Expected cN operand where 0 <= N <= 15");
2004 return MatchOperand_ParseFail;
2007 Parser.Lex(); // Eat identifier token.
2009 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2010 return MatchOperand_Success;
2013 /// tryParsePrefetch - Try to parse a prefetch operand.
2014 AArch64AsmParser::OperandMatchResultTy
2015 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2016 MCAsmParser &Parser = getParser();
2018 const AsmToken &Tok = Parser.getTok();
2019 // Either an identifier for named values or a 5-bit immediate.
2020 bool Hash = Tok.is(AsmToken::Hash);
2021 if (Hash || Tok.is(AsmToken::Integer)) {
2023 Parser.Lex(); // Eat hash token.
2024 const MCExpr *ImmVal;
2025 if (getParser().parseExpression(ImmVal))
2026 return MatchOperand_ParseFail;
2028 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2030 TokError("immediate value expected for prefetch operand");
2031 return MatchOperand_ParseFail;
2033 unsigned prfop = MCE->getValue();
2035 TokError("prefetch operand out of range, [0,31] expected");
2036 return MatchOperand_ParseFail;
2040 auto Mapper = AArch64PRFM::PRFMMapper();
2042 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2043 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Name,
2045 return MatchOperand_Success;
2048 if (Tok.isNot(AsmToken::Identifier)) {
2049 TokError("pre-fetch hint expected");
2050 return MatchOperand_ParseFail;
2054 auto Mapper = AArch64PRFM::PRFMMapper();
2056 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2058 TokError("pre-fetch hint expected");
2059 return MatchOperand_ParseFail;
2062 Parser.Lex(); // Eat identifier token.
2063 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, Tok.getString(),
2065 return MatchOperand_Success;
2068 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2070 AArch64AsmParser::OperandMatchResultTy
2071 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2072 MCAsmParser &Parser = getParser();
2076 if (Parser.getTok().is(AsmToken::Hash)) {
2077 Parser.Lex(); // Eat hash token.
2080 if (parseSymbolicImmVal(Expr))
2081 return MatchOperand_ParseFail;
2083 AArch64MCExpr::VariantKind ELFRefKind;
2084 MCSymbolRefExpr::VariantKind DarwinRefKind;
2086 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2087 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2088 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2089 // No modifier was specified at all; this is the syntax for an ELF basic
2090 // ADRP relocation (unfortunately).
2092 AArch64MCExpr::create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2093 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2094 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2096 Error(S, "gotpage label reference not allowed an addend");
2097 return MatchOperand_ParseFail;
2098 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2099 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2100 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2101 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2102 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2103 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2104 // The operand must be an @page or @gotpage qualified symbolref.
2105 Error(S, "page or gotpage label reference expected");
2106 return MatchOperand_ParseFail;
2110 // We have either a label reference possibly with addend or an immediate. The
2111 // addend is a raw value here. The linker will adjust it to only reference the
2113 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2114 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2116 return MatchOperand_Success;
2119 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2121 AArch64AsmParser::OperandMatchResultTy
2122 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2123 MCAsmParser &Parser = getParser();
2127 if (Parser.getTok().is(AsmToken::Hash)) {
2128 Parser.Lex(); // Eat hash token.
2131 if (getParser().parseExpression(Expr))
2132 return MatchOperand_ParseFail;
2134 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2135 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2137 return MatchOperand_Success;
2140 /// tryParseFPImm - A floating point immediate expression operand.
2141 AArch64AsmParser::OperandMatchResultTy
2142 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2143 MCAsmParser &Parser = getParser();
2147 if (Parser.getTok().is(AsmToken::Hash)) {
2148 Parser.Lex(); // Eat '#'
2152 // Handle negation, as that still comes through as a separate token.
2153 bool isNegative = false;
2154 if (Parser.getTok().is(AsmToken::Minus)) {
2158 const AsmToken &Tok = Parser.getTok();
2159 if (Tok.is(AsmToken::Real)) {
2160 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2162 RealVal.changeSign();
2164 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2165 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2166 Parser.Lex(); // Eat the token.
2167 // Check for out of range values. As an exception, we let Zero through,
2168 // as we handle that special case in post-processing before matching in
2169 // order to use the zero register for it.
2170 if (Val == -1 && !RealVal.isPosZero()) {
2171 TokError("expected compatible register or floating-point constant");
2172 return MatchOperand_ParseFail;
2174 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2175 return MatchOperand_Success;
2177 if (Tok.is(AsmToken::Integer)) {
2179 if (!isNegative && Tok.getString().startswith("0x")) {
2180 Val = Tok.getIntVal();
2181 if (Val > 255 || Val < 0) {
2182 TokError("encoded floating point value out of range");
2183 return MatchOperand_ParseFail;
2186 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2187 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2188 // If we had a '-' in front, toggle the sign bit.
2189 IntVal ^= (uint64_t)isNegative << 63;
2190 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2192 Parser.Lex(); // Eat the token.
2193 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2194 return MatchOperand_Success;
2198 return MatchOperand_NoMatch;
2200 TokError("invalid floating point immediate");
2201 return MatchOperand_ParseFail;
2204 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2205 AArch64AsmParser::OperandMatchResultTy
2206 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2207 MCAsmParser &Parser = getParser();
2210 if (Parser.getTok().is(AsmToken::Hash))
2211 Parser.Lex(); // Eat '#'
2212 else if (Parser.getTok().isNot(AsmToken::Integer))
2213 // Operand should start from # or should be integer, emit error otherwise.
2214 return MatchOperand_NoMatch;
2217 if (parseSymbolicImmVal(Imm))
2218 return MatchOperand_ParseFail;
2219 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2220 uint64_t ShiftAmount = 0;
2221 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2223 int64_t Val = MCE->getValue();
2224 if (Val > 0xfff && (Val & 0xfff) == 0) {
2225 Imm = MCConstantExpr::create(Val >> 12, getContext());
2229 SMLoc E = Parser.getTok().getLoc();
2230 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2232 return MatchOperand_Success;
2238 // The optional operand must be "lsl #N" where N is non-negative.
2239 if (!Parser.getTok().is(AsmToken::Identifier) ||
2240 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2241 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2242 return MatchOperand_ParseFail;
2248 if (Parser.getTok().is(AsmToken::Hash)) {
2252 if (Parser.getTok().isNot(AsmToken::Integer)) {
2253 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2254 return MatchOperand_ParseFail;
2257 int64_t ShiftAmount = Parser.getTok().getIntVal();
2259 if (ShiftAmount < 0) {
2260 Error(Parser.getTok().getLoc(), "positive shift amount required");
2261 return MatchOperand_ParseFail;
2263 Parser.Lex(); // Eat the number
2265 SMLoc E = Parser.getTok().getLoc();
2266 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2267 S, E, getContext()));
2268 return MatchOperand_Success;
2271 /// parseCondCodeString - Parse a Condition Code string.
2272 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2273 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2274 .Case("eq", AArch64CC::EQ)
2275 .Case("ne", AArch64CC::NE)
2276 .Case("cs", AArch64CC::HS)
2277 .Case("hs", AArch64CC::HS)
2278 .Case("cc", AArch64CC::LO)
2279 .Case("lo", AArch64CC::LO)
2280 .Case("mi", AArch64CC::MI)
2281 .Case("pl", AArch64CC::PL)
2282 .Case("vs", AArch64CC::VS)
2283 .Case("vc", AArch64CC::VC)
2284 .Case("hi", AArch64CC::HI)
2285 .Case("ls", AArch64CC::LS)
2286 .Case("ge", AArch64CC::GE)
2287 .Case("lt", AArch64CC::LT)
2288 .Case("gt", AArch64CC::GT)
2289 .Case("le", AArch64CC::LE)
2290 .Case("al", AArch64CC::AL)
2291 .Case("nv", AArch64CC::NV)
2292 .Default(AArch64CC::Invalid);
2296 /// parseCondCode - Parse a Condition Code operand.
2297 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2298 bool invertCondCode) {
2299 MCAsmParser &Parser = getParser();
2301 const AsmToken &Tok = Parser.getTok();
2302 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2304 StringRef Cond = Tok.getString();
2305 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2306 if (CC == AArch64CC::Invalid)
2307 return TokError("invalid condition code");
2308 Parser.Lex(); // Eat identifier token.
2310 if (invertCondCode) {
2311 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2312 return TokError("condition codes AL and NV are invalid for this instruction");
2313 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2317 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2321 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2322 /// them if present.
2323 AArch64AsmParser::OperandMatchResultTy
2324 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2325 MCAsmParser &Parser = getParser();
2326 const AsmToken &Tok = Parser.getTok();
2327 std::string LowerID = Tok.getString().lower();
2328 AArch64_AM::ShiftExtendType ShOp =
2329 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2330 .Case("lsl", AArch64_AM::LSL)
2331 .Case("lsr", AArch64_AM::LSR)
2332 .Case("asr", AArch64_AM::ASR)
2333 .Case("ror", AArch64_AM::ROR)
2334 .Case("msl", AArch64_AM::MSL)
2335 .Case("uxtb", AArch64_AM::UXTB)
2336 .Case("uxth", AArch64_AM::UXTH)
2337 .Case("uxtw", AArch64_AM::UXTW)
2338 .Case("uxtx", AArch64_AM::UXTX)
2339 .Case("sxtb", AArch64_AM::SXTB)
2340 .Case("sxth", AArch64_AM::SXTH)
2341 .Case("sxtw", AArch64_AM::SXTW)
2342 .Case("sxtx", AArch64_AM::SXTX)
2343 .Default(AArch64_AM::InvalidShiftExtend);
2345 if (ShOp == AArch64_AM::InvalidShiftExtend)
2346 return MatchOperand_NoMatch;
2348 SMLoc S = Tok.getLoc();
2351 bool Hash = getLexer().is(AsmToken::Hash);
2352 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2353 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2354 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2355 ShOp == AArch64_AM::MSL) {
2356 // We expect a number here.
2357 TokError("expected #imm after shift specifier");
2358 return MatchOperand_ParseFail;
2361 // "extend" type operatoins don't need an immediate, #0 is implicit.
2362 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2364 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2365 return MatchOperand_Success;
2369 Parser.Lex(); // Eat the '#'.
2371 // Make sure we do actually have a number or a parenthesized expression.
2372 SMLoc E = Parser.getTok().getLoc();
2373 if (!Parser.getTok().is(AsmToken::Integer) &&
2374 !Parser.getTok().is(AsmToken::LParen)) {
2375 Error(E, "expected integer shift amount");
2376 return MatchOperand_ParseFail;
2379 const MCExpr *ImmVal;
2380 if (getParser().parseExpression(ImmVal))
2381 return MatchOperand_ParseFail;
2383 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2385 Error(E, "expected constant '#imm' after shift specifier");
2386 return MatchOperand_ParseFail;
2389 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2390 Operands.push_back(AArch64Operand::CreateShiftExtend(
2391 ShOp, MCE->getValue(), true, S, E, getContext()));
2392 return MatchOperand_Success;
2395 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2396 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2397 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2398 OperandVector &Operands) {
2399 if (Name.find('.') != StringRef::npos)
2400 return TokError("invalid operand");
2404 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2406 MCAsmParser &Parser = getParser();
2407 const AsmToken &Tok = Parser.getTok();
2408 StringRef Op = Tok.getString();
2409 SMLoc S = Tok.getLoc();
2411 const MCExpr *Expr = nullptr;
2413 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2415 Expr = MCConstantExpr::create(op1, getContext()); \
2416 Operands.push_back( \
2417 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2418 Operands.push_back( \
2419 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2420 Operands.push_back( \
2421 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2422 Expr = MCConstantExpr::create(op2, getContext()); \
2423 Operands.push_back( \
2424 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2427 if (Mnemonic == "ic") {
2428 if (!Op.compare_lower("ialluis")) {
2429 // SYS #0, C7, C1, #0
2430 SYS_ALIAS(0, 7, 1, 0);
2431 } else if (!Op.compare_lower("iallu")) {
2432 // SYS #0, C7, C5, #0
2433 SYS_ALIAS(0, 7, 5, 0);
2434 } else if (!Op.compare_lower("ivau")) {
2435 // SYS #3, C7, C5, #1
2436 SYS_ALIAS(3, 7, 5, 1);
2438 return TokError("invalid operand for IC instruction");
2440 } else if (Mnemonic == "dc") {
2441 if (!Op.compare_lower("zva")) {
2442 // SYS #3, C7, C4, #1
2443 SYS_ALIAS(3, 7, 4, 1);
2444 } else if (!Op.compare_lower("ivac")) {
2445 // SYS #3, C7, C6, #1
2446 SYS_ALIAS(0, 7, 6, 1);
2447 } else if (!Op.compare_lower("isw")) {
2448 // SYS #0, C7, C6, #2
2449 SYS_ALIAS(0, 7, 6, 2);
2450 } else if (!Op.compare_lower("cvac")) {
2451 // SYS #3, C7, C10, #1
2452 SYS_ALIAS(3, 7, 10, 1);
2453 } else if (!Op.compare_lower("csw")) {
2454 // SYS #0, C7, C10, #2
2455 SYS_ALIAS(0, 7, 10, 2);
2456 } else if (!Op.compare_lower("cvau")) {
2457 // SYS #3, C7, C11, #1
2458 SYS_ALIAS(3, 7, 11, 1);
2459 } else if (!Op.compare_lower("civac")) {
2460 // SYS #3, C7, C14, #1
2461 SYS_ALIAS(3, 7, 14, 1);
2462 } else if (!Op.compare_lower("cisw")) {
2463 // SYS #0, C7, C14, #2
2464 SYS_ALIAS(0, 7, 14, 2);
2466 return TokError("invalid operand for DC instruction");
2468 } else if (Mnemonic == "at") {
2469 if (!Op.compare_lower("s1e1r")) {
2470 // SYS #0, C7, C8, #0
2471 SYS_ALIAS(0, 7, 8, 0);
2472 } else if (!Op.compare_lower("s1e2r")) {
2473 // SYS #4, C7, C8, #0
2474 SYS_ALIAS(4, 7, 8, 0);
2475 } else if (!Op.compare_lower("s1e3r")) {
2476 // SYS #6, C7, C8, #0
2477 SYS_ALIAS(6, 7, 8, 0);
2478 } else if (!Op.compare_lower("s1e1w")) {
2479 // SYS #0, C7, C8, #1
2480 SYS_ALIAS(0, 7, 8, 1);
2481 } else if (!Op.compare_lower("s1e2w")) {
2482 // SYS #4, C7, C8, #1
2483 SYS_ALIAS(4, 7, 8, 1);
2484 } else if (!Op.compare_lower("s1e3w")) {
2485 // SYS #6, C7, C8, #1
2486 SYS_ALIAS(6, 7, 8, 1);
2487 } else if (!Op.compare_lower("s1e0r")) {
2488 // SYS #0, C7, C8, #3
2489 SYS_ALIAS(0, 7, 8, 2);
2490 } else if (!Op.compare_lower("s1e0w")) {
2491 // SYS #0, C7, C8, #3
2492 SYS_ALIAS(0, 7, 8, 3);
2493 } else if (!Op.compare_lower("s12e1r")) {
2494 // SYS #4, C7, C8, #4
2495 SYS_ALIAS(4, 7, 8, 4);
2496 } else if (!Op.compare_lower("s12e1w")) {
2497 // SYS #4, C7, C8, #5
2498 SYS_ALIAS(4, 7, 8, 5);
2499 } else if (!Op.compare_lower("s12e0r")) {
2500 // SYS #4, C7, C8, #6
2501 SYS_ALIAS(4, 7, 8, 6);
2502 } else if (!Op.compare_lower("s12e0w")) {
2503 // SYS #4, C7, C8, #7
2504 SYS_ALIAS(4, 7, 8, 7);
2506 return TokError("invalid operand for AT instruction");
2508 } else if (Mnemonic == "tlbi") {
2509 if (!Op.compare_lower("vmalle1is")) {
2510 // SYS #0, C8, C3, #0
2511 SYS_ALIAS(0, 8, 3, 0);
2512 } else if (!Op.compare_lower("alle2is")) {
2513 // SYS #4, C8, C3, #0
2514 SYS_ALIAS(4, 8, 3, 0);
2515 } else if (!Op.compare_lower("alle3is")) {
2516 // SYS #6, C8, C3, #0
2517 SYS_ALIAS(6, 8, 3, 0);
2518 } else if (!Op.compare_lower("vae1is")) {
2519 // SYS #0, C8, C3, #1
2520 SYS_ALIAS(0, 8, 3, 1);
2521 } else if (!Op.compare_lower("vae2is")) {
2522 // SYS #4, C8, C3, #1
2523 SYS_ALIAS(4, 8, 3, 1);
2524 } else if (!Op.compare_lower("vae3is")) {
2525 // SYS #6, C8, C3, #1
2526 SYS_ALIAS(6, 8, 3, 1);
2527 } else if (!Op.compare_lower("aside1is")) {
2528 // SYS #0, C8, C3, #2
2529 SYS_ALIAS(0, 8, 3, 2);
2530 } else if (!Op.compare_lower("vaae1is")) {
2531 // SYS #0, C8, C3, #3
2532 SYS_ALIAS(0, 8, 3, 3);
2533 } else if (!Op.compare_lower("alle1is")) {
2534 // SYS #4, C8, C3, #4
2535 SYS_ALIAS(4, 8, 3, 4);
2536 } else if (!Op.compare_lower("vale1is")) {
2537 // SYS #0, C8, C3, #5
2538 SYS_ALIAS(0, 8, 3, 5);
2539 } else if (!Op.compare_lower("vaale1is")) {
2540 // SYS #0, C8, C3, #7
2541 SYS_ALIAS(0, 8, 3, 7);
2542 } else if (!Op.compare_lower("vmalle1")) {
2543 // SYS #0, C8, C7, #0
2544 SYS_ALIAS(0, 8, 7, 0);
2545 } else if (!Op.compare_lower("alle2")) {
2546 // SYS #4, C8, C7, #0
2547 SYS_ALIAS(4, 8, 7, 0);
2548 } else if (!Op.compare_lower("vale2is")) {
2549 // SYS #4, C8, C3, #5
2550 SYS_ALIAS(4, 8, 3, 5);
2551 } else if (!Op.compare_lower("vale3is")) {
2552 // SYS #6, C8, C3, #5
2553 SYS_ALIAS(6, 8, 3, 5);
2554 } else if (!Op.compare_lower("alle3")) {
2555 // SYS #6, C8, C7, #0
2556 SYS_ALIAS(6, 8, 7, 0);
2557 } else if (!Op.compare_lower("vae1")) {
2558 // SYS #0, C8, C7, #1
2559 SYS_ALIAS(0, 8, 7, 1);
2560 } else if (!Op.compare_lower("vae2")) {
2561 // SYS #4, C8, C7, #1
2562 SYS_ALIAS(4, 8, 7, 1);
2563 } else if (!Op.compare_lower("vae3")) {
2564 // SYS #6, C8, C7, #1
2565 SYS_ALIAS(6, 8, 7, 1);
2566 } else if (!Op.compare_lower("aside1")) {
2567 // SYS #0, C8, C7, #2
2568 SYS_ALIAS(0, 8, 7, 2);
2569 } else if (!Op.compare_lower("vaae1")) {
2570 // SYS #0, C8, C7, #3
2571 SYS_ALIAS(0, 8, 7, 3);
2572 } else if (!Op.compare_lower("alle1")) {
2573 // SYS #4, C8, C7, #4
2574 SYS_ALIAS(4, 8, 7, 4);
2575 } else if (!Op.compare_lower("vale1")) {
2576 // SYS #0, C8, C7, #5
2577 SYS_ALIAS(0, 8, 7, 5);
2578 } else if (!Op.compare_lower("vale2")) {
2579 // SYS #4, C8, C7, #5
2580 SYS_ALIAS(4, 8, 7, 5);
2581 } else if (!Op.compare_lower("vale3")) {
2582 // SYS #6, C8, C7, #5
2583 SYS_ALIAS(6, 8, 7, 5);
2584 } else if (!Op.compare_lower("vaale1")) {
2585 // SYS #0, C8, C7, #7
2586 SYS_ALIAS(0, 8, 7, 7);
2587 } else if (!Op.compare_lower("ipas2e1")) {
2588 // SYS #4, C8, C4, #1
2589 SYS_ALIAS(4, 8, 4, 1);
2590 } else if (!Op.compare_lower("ipas2le1")) {
2591 // SYS #4, C8, C4, #5
2592 SYS_ALIAS(4, 8, 4, 5);
2593 } else if (!Op.compare_lower("ipas2e1is")) {
2594 // SYS #4, C8, C4, #1
2595 SYS_ALIAS(4, 8, 0, 1);
2596 } else if (!Op.compare_lower("ipas2le1is")) {
2597 // SYS #4, C8, C4, #5
2598 SYS_ALIAS(4, 8, 0, 5);
2599 } else if (!Op.compare_lower("vmalls12e1")) {
2600 // SYS #4, C8, C7, #6
2601 SYS_ALIAS(4, 8, 7, 6);
2602 } else if (!Op.compare_lower("vmalls12e1is")) {
2603 // SYS #4, C8, C3, #6
2604 SYS_ALIAS(4, 8, 3, 6);
2606 return TokError("invalid operand for TLBI instruction");
2612 Parser.Lex(); // Eat operand.
2614 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2615 bool HasRegister = false;
2617 // Check for the optional register operand.
2618 if (getLexer().is(AsmToken::Comma)) {
2619 Parser.Lex(); // Eat comma.
2621 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2622 return TokError("expected register operand");
2627 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2628 Parser.eatToEndOfStatement();
2629 return TokError("unexpected token in argument list");
2632 if (ExpectRegister && !HasRegister) {
2633 return TokError("specified " + Mnemonic + " op requires a register");
2635 else if (!ExpectRegister && HasRegister) {
2636 return TokError("specified " + Mnemonic + " op does not use a register");
2639 Parser.Lex(); // Consume the EndOfStatement
2643 AArch64AsmParser::OperandMatchResultTy
2644 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2645 MCAsmParser &Parser = getParser();
2646 const AsmToken &Tok = Parser.getTok();
2648 // Can be either a #imm style literal or an option name
2649 bool Hash = Tok.is(AsmToken::Hash);
2650 if (Hash || Tok.is(AsmToken::Integer)) {
2651 // Immediate operand.
2653 Parser.Lex(); // Eat the '#'
2654 const MCExpr *ImmVal;
2655 SMLoc ExprLoc = getLoc();
2656 if (getParser().parseExpression(ImmVal))
2657 return MatchOperand_ParseFail;
2658 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2660 Error(ExprLoc, "immediate value expected for barrier operand");
2661 return MatchOperand_ParseFail;
2663 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2664 Error(ExprLoc, "barrier operand out of range");
2665 return MatchOperand_ParseFail;
2668 auto Mapper = AArch64DB::DBarrierMapper();
2670 Mapper.toString(MCE->getValue(), STI.getFeatureBits(), Valid);
2671 Operands.push_back( AArch64Operand::CreateBarrier(MCE->getValue(), Name,
2672 ExprLoc, getContext()));
2673 return MatchOperand_Success;
2676 if (Tok.isNot(AsmToken::Identifier)) {
2677 TokError("invalid operand for instruction");
2678 return MatchOperand_ParseFail;
2682 auto Mapper = AArch64DB::DBarrierMapper();
2684 Mapper.fromString(Tok.getString(), STI.getFeatureBits(), Valid);
2686 TokError("invalid barrier option name");
2687 return MatchOperand_ParseFail;
2690 // The only valid named option for ISB is 'sy'
2691 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2692 TokError("'sy' or #imm operand expected");
2693 return MatchOperand_ParseFail;
2696 Operands.push_back( AArch64Operand::CreateBarrier(Opt, Tok.getString(),
2697 getLoc(), getContext()));
2698 Parser.Lex(); // Consume the option
2700 return MatchOperand_Success;
2703 AArch64AsmParser::OperandMatchResultTy
2704 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2705 MCAsmParser &Parser = getParser();
2706 const AsmToken &Tok = Parser.getTok();
2708 if (Tok.isNot(AsmToken::Identifier))
2709 return MatchOperand_NoMatch;
2712 auto MRSMapper = AArch64SysReg::MRSMapper();
2713 uint32_t MRSReg = MRSMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2715 assert(IsKnown == (MRSReg != -1U) &&
2716 "register should be -1 if and only if it's unknown");
2718 auto MSRMapper = AArch64SysReg::MSRMapper();
2719 uint32_t MSRReg = MSRMapper.fromString(Tok.getString(), STI.getFeatureBits(),
2721 assert(IsKnown == (MSRReg != -1U) &&
2722 "register should be -1 if and only if it's unknown");
2724 auto PStateMapper = AArch64PState::PStateMapper();
2725 uint32_t PStateField =
2726 PStateMapper.fromString(Tok.getString(), STI.getFeatureBits(), IsKnown);
2727 assert(IsKnown == (PStateField != -1U) &&
2728 "register should be -1 if and only if it's unknown");
2730 Operands.push_back(AArch64Operand::CreateSysReg(
2731 Tok.getString(), getLoc(), MRSReg, MSRReg, PStateField, getContext()));
2732 Parser.Lex(); // Eat identifier
2734 return MatchOperand_Success;
2737 /// tryParseVectorRegister - Parse a vector register operand.
2738 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2739 MCAsmParser &Parser = getParser();
2740 if (Parser.getTok().isNot(AsmToken::Identifier))
2744 // Check for a vector register specifier first.
2746 int64_t Reg = tryMatchVectorRegister(Kind, false);
2750 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2751 // If there was an explicit qualifier, that goes on as a literal text
2755 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2757 // If there is an index specifier following the register, parse that too.
2758 if (Parser.getTok().is(AsmToken::LBrac)) {
2759 SMLoc SIdx = getLoc();
2760 Parser.Lex(); // Eat left bracket token.
2762 const MCExpr *ImmVal;
2763 if (getParser().parseExpression(ImmVal))
2765 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2767 TokError("immediate value expected for vector index");
2772 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2773 Error(E, "']' expected");
2777 Parser.Lex(); // Eat right bracket token.
2779 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2786 /// parseRegister - Parse a non-vector register operand.
2787 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2788 MCAsmParser &Parser = getParser();
2790 // Try for a vector register.
2791 if (!tryParseVectorRegister(Operands))
2794 // Try for a scalar register.
2795 int64_t Reg = tryParseRegister();
2799 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2801 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2802 // as a string token in the instruction itself.
2803 if (getLexer().getKind() == AsmToken::LBrac) {
2804 SMLoc LBracS = getLoc();
2806 const AsmToken &Tok = Parser.getTok();
2807 if (Tok.is(AsmToken::Integer)) {
2808 SMLoc IntS = getLoc();
2809 int64_t Val = Tok.getIntVal();
2812 if (getLexer().getKind() == AsmToken::RBrac) {
2813 SMLoc RBracS = getLoc();
2816 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2818 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2820 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2830 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2831 MCAsmParser &Parser = getParser();
2832 bool HasELFModifier = false;
2833 AArch64MCExpr::VariantKind RefKind;
2835 if (Parser.getTok().is(AsmToken::Colon)) {
2836 Parser.Lex(); // Eat ':"
2837 HasELFModifier = true;
2839 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2840 Error(Parser.getTok().getLoc(),
2841 "expect relocation specifier in operand after ':'");
2845 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2846 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2847 .Case("lo12", AArch64MCExpr::VK_LO12)
2848 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2849 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2850 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2851 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2852 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2853 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2854 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2855 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2856 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2857 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2858 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2859 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2860 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2861 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2862 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2863 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2864 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2865 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2866 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2867 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2868 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2869 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2870 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2871 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2872 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2873 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2874 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2875 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2876 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2877 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2878 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2879 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2880 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2881 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2882 .Default(AArch64MCExpr::VK_INVALID);
2884 if (RefKind == AArch64MCExpr::VK_INVALID) {
2885 Error(Parser.getTok().getLoc(),
2886 "expect relocation specifier in operand after ':'");
2890 Parser.Lex(); // Eat identifier
2892 if (Parser.getTok().isNot(AsmToken::Colon)) {
2893 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2896 Parser.Lex(); // Eat ':'
2899 if (getParser().parseExpression(ImmVal))
2903 ImmVal = AArch64MCExpr::create(ImmVal, RefKind, getContext());
2908 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2909 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2910 MCAsmParser &Parser = getParser();
2911 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2913 Parser.Lex(); // Eat left bracket token.
2915 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2918 int64_t PrevReg = FirstReg;
2921 if (Parser.getTok().is(AsmToken::Minus)) {
2922 Parser.Lex(); // Eat the minus.
2924 SMLoc Loc = getLoc();
2926 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2929 // Any Kind suffices must match on all regs in the list.
2930 if (Kind != NextKind)
2931 return Error(Loc, "mismatched register size suffix");
2933 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2935 if (Space == 0 || Space > 3) {
2936 return Error(Loc, "invalid number of vectors");
2942 while (Parser.getTok().is(AsmToken::Comma)) {
2943 Parser.Lex(); // Eat the comma token.
2945 SMLoc Loc = getLoc();
2947 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2950 // Any Kind suffices must match on all regs in the list.
2951 if (Kind != NextKind)
2952 return Error(Loc, "mismatched register size suffix");
2954 // Registers must be incremental (with wraparound at 31)
2955 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2956 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2957 return Error(Loc, "registers must be sequential");
2964 if (Parser.getTok().isNot(AsmToken::RCurly))
2965 return Error(getLoc(), "'}' expected");
2966 Parser.Lex(); // Eat the '}' token.
2969 return Error(S, "invalid number of vectors");
2971 unsigned NumElements = 0;
2972 char ElementKind = 0;
2974 parseValidVectorKind(Kind, NumElements, ElementKind);
2976 Operands.push_back(AArch64Operand::CreateVectorList(
2977 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2979 // If there is an index specifier following the list, parse that too.
2980 if (Parser.getTok().is(AsmToken::LBrac)) {
2981 SMLoc SIdx = getLoc();
2982 Parser.Lex(); // Eat left bracket token.
2984 const MCExpr *ImmVal;
2985 if (getParser().parseExpression(ImmVal))
2987 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2989 TokError("immediate value expected for vector index");
2994 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2995 Error(E, "']' expected");
2999 Parser.Lex(); // Eat right bracket token.
3001 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
3007 AArch64AsmParser::OperandMatchResultTy
3008 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
3009 MCAsmParser &Parser = getParser();
3010 const AsmToken &Tok = Parser.getTok();
3011 if (!Tok.is(AsmToken::Identifier))
3012 return MatchOperand_NoMatch;
3014 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
3016 MCContext &Ctx = getContext();
3017 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
3018 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
3019 return MatchOperand_NoMatch;
3022 Parser.Lex(); // Eat register
3024 if (Parser.getTok().isNot(AsmToken::Comma)) {
3026 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3027 return MatchOperand_Success;
3029 Parser.Lex(); // Eat comma.
3031 if (Parser.getTok().is(AsmToken::Hash))
3032 Parser.Lex(); // Eat hash
3034 if (Parser.getTok().isNot(AsmToken::Integer)) {
3035 Error(getLoc(), "index must be absent or #0");
3036 return MatchOperand_ParseFail;
3039 const MCExpr *ImmVal;
3040 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3041 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3042 Error(getLoc(), "index must be absent or #0");
3043 return MatchOperand_ParseFail;
3047 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
3048 return MatchOperand_Success;
3051 /// parseOperand - Parse a arm instruction operand. For now this parses the
3052 /// operand regardless of the mnemonic.
3053 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3054 bool invertCondCode) {
3055 MCAsmParser &Parser = getParser();
3056 // Check if the current operand has a custom associated parser, if so, try to
3057 // custom parse the operand, or fallback to the general approach.
3058 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3059 if (ResTy == MatchOperand_Success)
3061 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3062 // there was a match, but an error occurred, in which case, just return that
3063 // the operand parsing failed.
3064 if (ResTy == MatchOperand_ParseFail)
3067 // Nothing custom, so do general case parsing.
3069 switch (getLexer().getKind()) {
3073 if (parseSymbolicImmVal(Expr))
3074 return Error(S, "invalid operand");
3076 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3077 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3080 case AsmToken::LBrac: {
3081 SMLoc Loc = Parser.getTok().getLoc();
3082 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
3084 Parser.Lex(); // Eat '['
3086 // There's no comma after a '[', so we can parse the next operand
3088 return parseOperand(Operands, false, false);
3090 case AsmToken::LCurly:
3091 return parseVectorList(Operands);
3092 case AsmToken::Identifier: {
3093 // If we're expecting a Condition Code operand, then just parse that.
3095 return parseCondCode(Operands, invertCondCode);
3097 // If it's a register name, parse it.
3098 if (!parseRegister(Operands))
3101 // This could be an optional "shift" or "extend" operand.
3102 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
3103 // We can only continue if no tokens were eaten.
3104 if (GotShift != MatchOperand_NoMatch)
3107 // This was not a register so parse other operands that start with an
3108 // identifier (like labels) as expressions and create them as immediates.
3109 const MCExpr *IdVal;
3111 if (getParser().parseExpression(IdVal))
3114 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3115 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3118 case AsmToken::Integer:
3119 case AsmToken::Real:
3120 case AsmToken::Hash: {
3121 // #42 -> immediate.
3123 if (getLexer().is(AsmToken::Hash))
3126 // Parse a negative sign
3127 bool isNegative = false;
3128 if (Parser.getTok().is(AsmToken::Minus)) {
3130 // We need to consume this token only when we have a Real, otherwise
3131 // we let parseSymbolicImmVal take care of it
3132 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3136 // The only Real that should come through here is a literal #0.0 for
3137 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3138 // so convert the value.
3139 const AsmToken &Tok = Parser.getTok();
3140 if (Tok.is(AsmToken::Real)) {
3141 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3142 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3143 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3144 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3145 Mnemonic != "fcmlt")
3146 return TokError("unexpected floating point literal");
3147 else if (IntVal != 0 || isNegative)
3148 return TokError("expected floating-point constant #0.0");
3149 Parser.Lex(); // Eat the token.
3152 AArch64Operand::CreateToken("#0", false, S, getContext()));
3154 AArch64Operand::CreateToken(".0", false, S, getContext()));
3158 const MCExpr *ImmVal;
3159 if (parseSymbolicImmVal(ImmVal))
3162 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3163 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3166 case AsmToken::Equal: {
3167 SMLoc Loc = Parser.getTok().getLoc();
3168 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3169 return Error(Loc, "unexpected token in operand");
3170 Parser.Lex(); // Eat '='
3171 const MCExpr *SubExprVal;
3172 if (getParser().parseExpression(SubExprVal))
3175 if (Operands.size() < 2 ||
3176 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3180 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3181 Operands[1]->getReg());
3183 MCContext& Ctx = getContext();
3184 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3185 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3186 if (isa<MCConstantExpr>(SubExprVal)) {
3187 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3188 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3189 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3193 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3194 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3195 Operands.push_back(AArch64Operand::CreateImm(
3196 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
3198 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3199 ShiftAmt, true, S, E, Ctx));
3202 APInt Simm = APInt(64, Imm << ShiftAmt);
3203 // check if the immediate is an unsigned or signed 32-bit int for W regs
3204 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3205 return Error(Loc, "Immediate too large for register");
3207 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3208 const MCExpr *CPLoc =
3209 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3210 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3216 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3218 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3219 StringRef Name, SMLoc NameLoc,
3220 OperandVector &Operands) {
3221 MCAsmParser &Parser = getParser();
3222 Name = StringSwitch<StringRef>(Name.lower())
3223 .Case("beq", "b.eq")
3224 .Case("bne", "b.ne")
3225 .Case("bhs", "b.hs")
3226 .Case("bcs", "b.cs")
3227 .Case("blo", "b.lo")
3228 .Case("bcc", "b.cc")
3229 .Case("bmi", "b.mi")
3230 .Case("bpl", "b.pl")
3231 .Case("bvs", "b.vs")
3232 .Case("bvc", "b.vc")
3233 .Case("bhi", "b.hi")
3234 .Case("bls", "b.ls")
3235 .Case("bge", "b.ge")
3236 .Case("blt", "b.lt")
3237 .Case("bgt", "b.gt")
3238 .Case("ble", "b.le")
3239 .Case("bal", "b.al")
3240 .Case("bnv", "b.nv")
3243 // First check for the AArch64-specific .req directive.
3244 if (Parser.getTok().is(AsmToken::Identifier) &&
3245 Parser.getTok().getIdentifier() == ".req") {
3246 parseDirectiveReq(Name, NameLoc);
3247 // We always return 'error' for this, as we're done with this
3248 // statement and don't need to match the 'instruction."
3252 // Create the leading tokens for the mnemonic, split by '.' characters.
3253 size_t Start = 0, Next = Name.find('.');
3254 StringRef Head = Name.slice(Start, Next);
3256 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3257 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3258 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3259 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3260 Parser.eatToEndOfStatement();
3265 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3268 // Handle condition codes for a branch mnemonic
3269 if (Head == "b" && Next != StringRef::npos) {
3271 Next = Name.find('.', Start + 1);
3272 Head = Name.slice(Start + 1, Next);
3274 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3275 (Head.data() - Name.data()));
3276 AArch64CC::CondCode CC = parseCondCodeString(Head);
3277 if (CC == AArch64CC::Invalid)
3278 return Error(SuffixLoc, "invalid condition code");
3280 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3282 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3285 // Add the remaining tokens in the mnemonic.
3286 while (Next != StringRef::npos) {
3288 Next = Name.find('.', Start + 1);
3289 Head = Name.slice(Start, Next);
3290 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3291 (Head.data() - Name.data()) + 1);
3293 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3296 // Conditional compare instructions have a Condition Code operand, which needs
3297 // to be parsed and an immediate operand created.
3298 bool condCodeFourthOperand =
3299 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3300 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3301 Head == "csinc" || Head == "csinv" || Head == "csneg");
3303 // These instructions are aliases to some of the conditional select
3304 // instructions. However, the condition code is inverted in the aliased
3307 // FIXME: Is this the correct way to handle these? Or should the parser
3308 // generate the aliased instructions directly?
3309 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3310 bool condCodeThirdOperand =
3311 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3313 // Read the remaining operands.
3314 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3315 // Read the first operand.
3316 if (parseOperand(Operands, false, false)) {
3317 Parser.eatToEndOfStatement();
3322 while (getLexer().is(AsmToken::Comma)) {
3323 Parser.Lex(); // Eat the comma.
3325 // Parse and remember the operand.
3326 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3327 (N == 3 && condCodeThirdOperand) ||
3328 (N == 2 && condCodeSecondOperand),
3329 condCodeSecondOperand || condCodeThirdOperand)) {
3330 Parser.eatToEndOfStatement();
3334 // After successfully parsing some operands there are two special cases to
3335 // consider (i.e. notional operands not separated by commas). Both are due
3336 // to memory specifiers:
3337 // + An RBrac will end an address for load/store/prefetch
3338 // + An '!' will indicate a pre-indexed operation.
3340 // It's someone else's responsibility to make sure these tokens are sane
3341 // in the given context!
3342 if (Parser.getTok().is(AsmToken::RBrac)) {
3343 SMLoc Loc = Parser.getTok().getLoc();
3344 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3349 if (Parser.getTok().is(AsmToken::Exclaim)) {
3350 SMLoc Loc = Parser.getTok().getLoc();
3351 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3360 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3361 SMLoc Loc = Parser.getTok().getLoc();
3362 Parser.eatToEndOfStatement();
3363 return Error(Loc, "unexpected token in argument list");
3366 Parser.Lex(); // Consume the EndOfStatement
3370 // FIXME: This entire function is a giant hack to provide us with decent
3371 // operand range validation/diagnostics until TableGen/MC can be extended
3372 // to support autogeneration of this kind of validation.
3373 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3374 SmallVectorImpl<SMLoc> &Loc) {
3375 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3376 // Check for indexed addressing modes w/ the base register being the
3377 // same as a destination/source register or pair load where
3378 // the Rt == Rt2. All of those are undefined behaviour.
3379 switch (Inst.getOpcode()) {
3380 case AArch64::LDPSWpre:
3381 case AArch64::LDPWpost:
3382 case AArch64::LDPWpre:
3383 case AArch64::LDPXpost:
3384 case AArch64::LDPXpre: {
3385 unsigned Rt = Inst.getOperand(1).getReg();
3386 unsigned Rt2 = Inst.getOperand(2).getReg();
3387 unsigned Rn = Inst.getOperand(3).getReg();
3388 if (RI->isSubRegisterEq(Rn, Rt))
3389 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3390 "is also a destination");
3391 if (RI->isSubRegisterEq(Rn, Rt2))
3392 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3393 "is also a destination");
3396 case AArch64::LDPDi:
3397 case AArch64::LDPQi:
3398 case AArch64::LDPSi:
3399 case AArch64::LDPSWi:
3400 case AArch64::LDPWi:
3401 case AArch64::LDPXi: {
3402 unsigned Rt = Inst.getOperand(0).getReg();
3403 unsigned Rt2 = Inst.getOperand(1).getReg();
3405 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3408 case AArch64::LDPDpost:
3409 case AArch64::LDPDpre:
3410 case AArch64::LDPQpost:
3411 case AArch64::LDPQpre:
3412 case AArch64::LDPSpost:
3413 case AArch64::LDPSpre:
3414 case AArch64::LDPSWpost: {
3415 unsigned Rt = Inst.getOperand(1).getReg();
3416 unsigned Rt2 = Inst.getOperand(2).getReg();
3418 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3421 case AArch64::STPDpost:
3422 case AArch64::STPDpre:
3423 case AArch64::STPQpost:
3424 case AArch64::STPQpre:
3425 case AArch64::STPSpost:
3426 case AArch64::STPSpre:
3427 case AArch64::STPWpost:
3428 case AArch64::STPWpre:
3429 case AArch64::STPXpost:
3430 case AArch64::STPXpre: {
3431 unsigned Rt = Inst.getOperand(1).getReg();
3432 unsigned Rt2 = Inst.getOperand(2).getReg();
3433 unsigned Rn = Inst.getOperand(3).getReg();
3434 if (RI->isSubRegisterEq(Rn, Rt))
3435 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3436 "is also a source");
3437 if (RI->isSubRegisterEq(Rn, Rt2))
3438 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3439 "is also a source");
3442 case AArch64::LDRBBpre:
3443 case AArch64::LDRBpre:
3444 case AArch64::LDRHHpre:
3445 case AArch64::LDRHpre:
3446 case AArch64::LDRSBWpre:
3447 case AArch64::LDRSBXpre:
3448 case AArch64::LDRSHWpre:
3449 case AArch64::LDRSHXpre:
3450 case AArch64::LDRSWpre:
3451 case AArch64::LDRWpre:
3452 case AArch64::LDRXpre:
3453 case AArch64::LDRBBpost:
3454 case AArch64::LDRBpost:
3455 case AArch64::LDRHHpost:
3456 case AArch64::LDRHpost:
3457 case AArch64::LDRSBWpost:
3458 case AArch64::LDRSBXpost:
3459 case AArch64::LDRSHWpost:
3460 case AArch64::LDRSHXpost:
3461 case AArch64::LDRSWpost:
3462 case AArch64::LDRWpost:
3463 case AArch64::LDRXpost: {
3464 unsigned Rt = Inst.getOperand(1).getReg();
3465 unsigned Rn = Inst.getOperand(2).getReg();
3466 if (RI->isSubRegisterEq(Rn, Rt))
3467 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3468 "is also a source");
3471 case AArch64::STRBBpost:
3472 case AArch64::STRBpost:
3473 case AArch64::STRHHpost:
3474 case AArch64::STRHpost:
3475 case AArch64::STRWpost:
3476 case AArch64::STRXpost:
3477 case AArch64::STRBBpre:
3478 case AArch64::STRBpre:
3479 case AArch64::STRHHpre:
3480 case AArch64::STRHpre:
3481 case AArch64::STRWpre:
3482 case AArch64::STRXpre: {
3483 unsigned Rt = Inst.getOperand(1).getReg();
3484 unsigned Rn = Inst.getOperand(2).getReg();
3485 if (RI->isSubRegisterEq(Rn, Rt))
3486 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3487 "is also a source");
3492 // Now check immediate ranges. Separate from the above as there is overlap
3493 // in the instructions being checked and this keeps the nested conditionals
3495 switch (Inst.getOpcode()) {
3496 case AArch64::ADDSWri:
3497 case AArch64::ADDSXri:
3498 case AArch64::ADDWri:
3499 case AArch64::ADDXri:
3500 case AArch64::SUBSWri:
3501 case AArch64::SUBSXri:
3502 case AArch64::SUBWri:
3503 case AArch64::SUBXri: {
3504 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3505 // some slight duplication here.
3506 if (Inst.getOperand(2).isExpr()) {
3507 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3508 AArch64MCExpr::VariantKind ELFRefKind;
3509 MCSymbolRefExpr::VariantKind DarwinRefKind;
3511 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3512 return Error(Loc[2], "invalid immediate expression");
3515 // Only allow these with ADDXri.
3516 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3517 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3518 Inst.getOpcode() == AArch64::ADDXri)
3521 // Only allow these with ADDXri/ADDWri
3522 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3523 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3524 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3525 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3526 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3527 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3528 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3529 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3530 (Inst.getOpcode() == AArch64::ADDXri ||
3531 Inst.getOpcode() == AArch64::ADDWri))
3534 // Don't allow expressions in the immediate field otherwise
3535 return Error(Loc[2], "invalid immediate expression");
3544 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3546 case Match_MissingFeature:
3548 "instruction requires a CPU feature not currently enabled");
3549 case Match_InvalidOperand:
3550 return Error(Loc, "invalid operand for instruction");
3551 case Match_InvalidSuffix:
3552 return Error(Loc, "invalid type suffix for instruction");
3553 case Match_InvalidCondCode:
3554 return Error(Loc, "expected AArch64 condition code");
3555 case Match_AddSubRegExtendSmall:
3557 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3558 case Match_AddSubRegExtendLarge:
3560 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3561 case Match_AddSubSecondSource:
3563 "expected compatible register, symbol or integer in range [0, 4095]");
3564 case Match_LogicalSecondSource:
3565 return Error(Loc, "expected compatible register or logical immediate");
3566 case Match_InvalidMovImm32Shift:
3567 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3568 case Match_InvalidMovImm64Shift:
3569 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3570 case Match_AddSubRegShift32:
3572 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3573 case Match_AddSubRegShift64:
3575 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3576 case Match_InvalidFPImm:
3578 "expected compatible register or floating-point constant");
3579 case Match_InvalidMemoryIndexedSImm9:
3580 return Error(Loc, "index must be an integer in range [-256, 255].");
3581 case Match_InvalidMemoryIndexed4SImm7:
3582 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3583 case Match_InvalidMemoryIndexed8SImm7:
3584 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3585 case Match_InvalidMemoryIndexed16SImm7:
3586 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3587 case Match_InvalidMemoryWExtend8:
3589 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3590 case Match_InvalidMemoryWExtend16:
3592 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3593 case Match_InvalidMemoryWExtend32:
3595 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3596 case Match_InvalidMemoryWExtend64:
3598 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3599 case Match_InvalidMemoryWExtend128:
3601 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3602 case Match_InvalidMemoryXExtend8:
3604 "expected 'lsl' or 'sxtx' with optional shift of #0");
3605 case Match_InvalidMemoryXExtend16:
3607 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3608 case Match_InvalidMemoryXExtend32:
3610 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3611 case Match_InvalidMemoryXExtend64:
3613 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3614 case Match_InvalidMemoryXExtend128:
3616 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3617 case Match_InvalidMemoryIndexed1:
3618 return Error(Loc, "index must be an integer in range [0, 4095].");
3619 case Match_InvalidMemoryIndexed2:
3620 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3621 case Match_InvalidMemoryIndexed4:
3622 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3623 case Match_InvalidMemoryIndexed8:
3624 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3625 case Match_InvalidMemoryIndexed16:
3626 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3627 case Match_InvalidImm0_1:
3628 return Error(Loc, "immediate must be an integer in range [0, 1].");
3629 case Match_InvalidImm0_7:
3630 return Error(Loc, "immediate must be an integer in range [0, 7].");
3631 case Match_InvalidImm0_15:
3632 return Error(Loc, "immediate must be an integer in range [0, 15].");
3633 case Match_InvalidImm0_31:
3634 return Error(Loc, "immediate must be an integer in range [0, 31].");
3635 case Match_InvalidImm0_63:
3636 return Error(Loc, "immediate must be an integer in range [0, 63].");
3637 case Match_InvalidImm0_127:
3638 return Error(Loc, "immediate must be an integer in range [0, 127].");
3639 case Match_InvalidImm0_65535:
3640 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3641 case Match_InvalidImm1_8:
3642 return Error(Loc, "immediate must be an integer in range [1, 8].");
3643 case Match_InvalidImm1_16:
3644 return Error(Loc, "immediate must be an integer in range [1, 16].");
3645 case Match_InvalidImm1_32:
3646 return Error(Loc, "immediate must be an integer in range [1, 32].");
3647 case Match_InvalidImm1_64:
3648 return Error(Loc, "immediate must be an integer in range [1, 64].");
3649 case Match_InvalidIndex1:
3650 return Error(Loc, "expected lane specifier '[1]'");
3651 case Match_InvalidIndexB:
3652 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3653 case Match_InvalidIndexH:
3654 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3655 case Match_InvalidIndexS:
3656 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3657 case Match_InvalidIndexD:
3658 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3659 case Match_InvalidLabel:
3660 return Error(Loc, "expected label or encodable integer pc offset");
3662 return Error(Loc, "expected readable system register");
3664 return Error(Loc, "expected writable system register or pstate");
3665 case Match_MnemonicFail:
3666 return Error(Loc, "unrecognized instruction mnemonic");
3668 llvm_unreachable("unexpected error code!");
3672 static const char *getSubtargetFeatureName(uint64_t Val);
3674 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3675 OperandVector &Operands,
3677 uint64_t &ErrorInfo,
3678 bool MatchingInlineAsm) {
3679 assert(!Operands.empty() && "Unexpect empty operand list!");
3680 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3681 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3683 StringRef Tok = Op.getToken();
3684 unsigned NumOperands = Operands.size();
3686 if (NumOperands == 4 && Tok == "lsl") {
3687 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3688 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3689 if (Op2.isReg() && Op3.isImm()) {
3690 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3692 uint64_t Op3Val = Op3CE->getValue();
3693 uint64_t NewOp3Val = 0;
3694 uint64_t NewOp4Val = 0;
3695 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3697 NewOp3Val = (32 - Op3Val) & 0x1f;
3698 NewOp4Val = 31 - Op3Val;
3700 NewOp3Val = (64 - Op3Val) & 0x3f;
3701 NewOp4Val = 63 - Op3Val;
3704 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
3705 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
3707 Operands[0] = AArch64Operand::CreateToken(
3708 "ubfm", false, Op.getStartLoc(), getContext());
3709 Operands.push_back(AArch64Operand::CreateImm(
3710 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3711 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3712 Op3.getEndLoc(), getContext());
3715 } else if (NumOperands == 4 && Tok == "bfc") {
3716 // FIXME: Horrible hack to handle BFC->BFM alias.
3717 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3718 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
3719 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
3721 if (Op1.isReg() && LSBOp.isImm() && WidthOp.isImm()) {
3722 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
3723 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
3725 if (LSBCE && WidthCE) {
3726 uint64_t LSB = LSBCE->getValue();
3727 uint64_t Width = WidthCE->getValue();
3729 uint64_t RegWidth = 0;
3730 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3736 if (LSB >= RegWidth)
3737 return Error(LSBOp.getStartLoc(),
3738 "expected integer in range [0, 31]");
3739 if (Width < 1 || Width > RegWidth)
3740 return Error(WidthOp.getStartLoc(),
3741 "expected integer in range [1, 32]");
3745 ImmR = (32 - LSB) & 0x1f;
3747 ImmR = (64 - LSB) & 0x3f;
3749 uint64_t ImmS = Width - 1;
3751 if (ImmR != 0 && ImmS >= ImmR)
3752 return Error(WidthOp.getStartLoc(),
3753 "requested insert overflows register");
3755 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
3756 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
3757 Operands[0] = AArch64Operand::CreateToken(
3758 "bfm", false, Op.getStartLoc(), getContext());
3759 Operands[2] = AArch64Operand::CreateReg(
3760 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, false, SMLoc(),
3761 SMLoc(), getContext());
3762 Operands[3] = AArch64Operand::CreateImm(
3763 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
3764 Operands.emplace_back(
3765 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
3766 WidthOp.getEndLoc(), getContext()));
3769 } else if (NumOperands == 5) {
3770 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3771 // UBFIZ -> UBFM aliases.
3772 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3773 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3774 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3775 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3777 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3778 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3779 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3781 if (Op3CE && Op4CE) {
3782 uint64_t Op3Val = Op3CE->getValue();
3783 uint64_t Op4Val = Op4CE->getValue();
3785 uint64_t RegWidth = 0;
3786 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3792 if (Op3Val >= RegWidth)
3793 return Error(Op3.getStartLoc(),
3794 "expected integer in range [0, 31]");
3795 if (Op4Val < 1 || Op4Val > RegWidth)
3796 return Error(Op4.getStartLoc(),
3797 "expected integer in range [1, 32]");
3799 uint64_t NewOp3Val = 0;
3801 NewOp3Val = (32 - Op3Val) & 0x1f;
3803 NewOp3Val = (64 - Op3Val) & 0x3f;
3805 uint64_t NewOp4Val = Op4Val - 1;
3807 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3808 return Error(Op4.getStartLoc(),
3809 "requested insert overflows register");
3811 const MCExpr *NewOp3 =
3812 MCConstantExpr::create(NewOp3Val, getContext());
3813 const MCExpr *NewOp4 =
3814 MCConstantExpr::create(NewOp4Val, getContext());
3815 Operands[3] = AArch64Operand::CreateImm(
3816 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3817 Operands[4] = AArch64Operand::CreateImm(
3818 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3820 Operands[0] = AArch64Operand::CreateToken(
3821 "bfm", false, Op.getStartLoc(), getContext());
3822 else if (Tok == "sbfiz")
3823 Operands[0] = AArch64Operand::CreateToken(
3824 "sbfm", false, Op.getStartLoc(), getContext());
3825 else if (Tok == "ubfiz")
3826 Operands[0] = AArch64Operand::CreateToken(
3827 "ubfm", false, Op.getStartLoc(), getContext());
3829 llvm_unreachable("No valid mnemonic for alias?");
3833 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3834 // UBFX -> UBFM aliases.
3835 } else if (NumOperands == 5 &&
3836 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3837 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3838 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3839 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3841 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3842 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3843 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3845 if (Op3CE && Op4CE) {
3846 uint64_t Op3Val = Op3CE->getValue();
3847 uint64_t Op4Val = Op4CE->getValue();
3849 uint64_t RegWidth = 0;
3850 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3856 if (Op3Val >= RegWidth)
3857 return Error(Op3.getStartLoc(),
3858 "expected integer in range [0, 31]");
3859 if (Op4Val < 1 || Op4Val > RegWidth)
3860 return Error(Op4.getStartLoc(),
3861 "expected integer in range [1, 32]");
3863 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3865 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3866 return Error(Op4.getStartLoc(),
3867 "requested extract overflows register");
3869 const MCExpr *NewOp4 =
3870 MCConstantExpr::create(NewOp4Val, getContext());
3871 Operands[4] = AArch64Operand::CreateImm(
3872 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3874 Operands[0] = AArch64Operand::CreateToken(
3875 "bfm", false, Op.getStartLoc(), getContext());
3876 else if (Tok == "sbfx")
3877 Operands[0] = AArch64Operand::CreateToken(
3878 "sbfm", false, Op.getStartLoc(), getContext());
3879 else if (Tok == "ubfx")
3880 Operands[0] = AArch64Operand::CreateToken(
3881 "ubfm", false, Op.getStartLoc(), getContext());
3883 llvm_unreachable("No valid mnemonic for alias?");
3888 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3889 // InstAlias can't quite handle this since the reg classes aren't
3891 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3892 // The source register can be Wn here, but the matcher expects a
3893 // GPR64. Twiddle it here if necessary.
3894 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3896 unsigned Reg = getXRegFromWReg(Op.getReg());
3897 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3898 Op.getEndLoc(), getContext());
3901 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3902 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3903 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3905 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3907 // The source register can be Wn here, but the matcher expects a
3908 // GPR64. Twiddle it here if necessary.
3909 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3911 unsigned Reg = getXRegFromWReg(Op.getReg());
3912 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3913 Op.getEndLoc(), getContext());
3917 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3918 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3919 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3921 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3923 // The source register can be Wn here, but the matcher expects a
3924 // GPR32. Twiddle it here if necessary.
3925 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3927 unsigned Reg = getWRegFromXReg(Op.getReg());
3928 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3929 Op.getEndLoc(), getContext());
3934 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3935 if (NumOperands == 3 && Tok == "fmov") {
3936 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3937 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3938 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3940 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3944 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3945 Op.getEndLoc(), getContext());
3950 // First try to match against the secondary set of tables containing the
3951 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3952 unsigned MatchResult =
3953 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3955 // If that fails, try against the alternate table containing long-form NEON:
3956 // "fadd v0.2s, v1.2s, v2.2s"
3957 if (MatchResult != Match_Success) {
3958 // But first, save the short-form match result: we can use it in case the
3959 // long-form match also fails.
3960 auto ShortFormNEONErrorInfo = ErrorInfo;
3961 auto ShortFormNEONMatchResult = MatchResult;
3964 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3966 // Now, both matches failed, and the long-form match failed on the mnemonic
3967 // suffix token operand. The short-form match failure is probably more
3968 // relevant: use it instead.
3969 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
3970 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
3971 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
3972 MatchResult = ShortFormNEONMatchResult;
3973 ErrorInfo = ShortFormNEONErrorInfo;
3978 switch (MatchResult) {
3979 case Match_Success: {
3980 // Perform range checking and other semantic validations
3981 SmallVector<SMLoc, 8> OperandLocs;
3982 NumOperands = Operands.size();
3983 for (unsigned i = 1; i < NumOperands; ++i)
3984 OperandLocs.push_back(Operands[i]->getStartLoc());
3985 if (validateInstruction(Inst, OperandLocs))
3989 Out.EmitInstruction(Inst, STI);
3992 case Match_MissingFeature: {
3993 assert(ErrorInfo && "Unknown missing feature!");
3994 // Special case the error message for the very common case where only
3995 // a single subtarget feature is missing (neon, e.g.).
3996 std::string Msg = "instruction requires:";
3998 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3999 if (ErrorInfo & Mask) {
4001 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4005 return Error(IDLoc, Msg);
4007 case Match_MnemonicFail:
4008 return showMatchError(IDLoc, MatchResult);
4009 case Match_InvalidOperand: {
4010 SMLoc ErrorLoc = IDLoc;
4012 if (ErrorInfo != ~0ULL) {
4013 if (ErrorInfo >= Operands.size())
4014 return Error(IDLoc, "too few operands for instruction");
4016 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4017 if (ErrorLoc == SMLoc())
4020 // If the match failed on a suffix token operand, tweak the diagnostic
4022 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4023 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4024 MatchResult = Match_InvalidSuffix;
4026 return showMatchError(ErrorLoc, MatchResult);
4028 case Match_InvalidMemoryIndexed1:
4029 case Match_InvalidMemoryIndexed2:
4030 case Match_InvalidMemoryIndexed4:
4031 case Match_InvalidMemoryIndexed8:
4032 case Match_InvalidMemoryIndexed16:
4033 case Match_InvalidCondCode:
4034 case Match_AddSubRegExtendSmall:
4035 case Match_AddSubRegExtendLarge:
4036 case Match_AddSubSecondSource:
4037 case Match_LogicalSecondSource:
4038 case Match_AddSubRegShift32:
4039 case Match_AddSubRegShift64:
4040 case Match_InvalidMovImm32Shift:
4041 case Match_InvalidMovImm64Shift:
4042 case Match_InvalidFPImm:
4043 case Match_InvalidMemoryWExtend8:
4044 case Match_InvalidMemoryWExtend16:
4045 case Match_InvalidMemoryWExtend32:
4046 case Match_InvalidMemoryWExtend64:
4047 case Match_InvalidMemoryWExtend128:
4048 case Match_InvalidMemoryXExtend8:
4049 case Match_InvalidMemoryXExtend16:
4050 case Match_InvalidMemoryXExtend32:
4051 case Match_InvalidMemoryXExtend64:
4052 case Match_InvalidMemoryXExtend128:
4053 case Match_InvalidMemoryIndexed4SImm7:
4054 case Match_InvalidMemoryIndexed8SImm7:
4055 case Match_InvalidMemoryIndexed16SImm7:
4056 case Match_InvalidMemoryIndexedSImm9:
4057 case Match_InvalidImm0_1:
4058 case Match_InvalidImm0_7:
4059 case Match_InvalidImm0_15:
4060 case Match_InvalidImm0_31:
4061 case Match_InvalidImm0_63:
4062 case Match_InvalidImm0_127:
4063 case Match_InvalidImm0_65535:
4064 case Match_InvalidImm1_8:
4065 case Match_InvalidImm1_16:
4066 case Match_InvalidImm1_32:
4067 case Match_InvalidImm1_64:
4068 case Match_InvalidIndex1:
4069 case Match_InvalidIndexB:
4070 case Match_InvalidIndexH:
4071 case Match_InvalidIndexS:
4072 case Match_InvalidIndexD:
4073 case Match_InvalidLabel:
4076 if (ErrorInfo >= Operands.size())
4077 return Error(IDLoc, "too few operands for instruction");
4078 // Any time we get here, there's nothing fancy to do. Just get the
4079 // operand SMLoc and display the diagnostic.
4080 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4081 if (ErrorLoc == SMLoc())
4083 return showMatchError(ErrorLoc, MatchResult);
4087 llvm_unreachable("Implement any new match types added!");
4090 /// ParseDirective parses the arm specific directives
4091 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
4092 const MCObjectFileInfo::Environment Format =
4093 getContext().getObjectFileInfo()->getObjectFileType();
4094 bool IsMachO = Format == MCObjectFileInfo::IsMachO;
4095 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
4097 StringRef IDVal = DirectiveID.getIdentifier();
4098 SMLoc Loc = DirectiveID.getLoc();
4099 if (IDVal == ".hword")
4100 return parseDirectiveWord(2, Loc);
4101 if (IDVal == ".word")
4102 return parseDirectiveWord(4, Loc);
4103 if (IDVal == ".xword")
4104 return parseDirectiveWord(8, Loc);
4105 if (IDVal == ".tlsdesccall")
4106 return parseDirectiveTLSDescCall(Loc);
4107 if (IDVal == ".ltorg" || IDVal == ".pool")
4108 return parseDirectiveLtorg(Loc);
4109 if (IDVal == ".unreq")
4110 return parseDirectiveUnreq(Loc);
4112 if (!IsMachO && !IsCOFF) {
4113 if (IDVal == ".inst")
4114 return parseDirectiveInst(Loc);
4117 return parseDirectiveLOH(IDVal, Loc);
4120 /// parseDirectiveWord
4121 /// ::= .word [ expression (, expression)* ]
4122 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4123 MCAsmParser &Parser = getParser();
4124 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4126 const MCExpr *Value;
4127 if (getParser().parseExpression(Value))
4130 getParser().getStreamer().EmitValue(Value, Size);
4132 if (getLexer().is(AsmToken::EndOfStatement))
4135 // FIXME: Improve diagnostic.
4136 if (getLexer().isNot(AsmToken::Comma))
4137 return Error(L, "unexpected token in directive");
4146 /// parseDirectiveInst
4147 /// ::= .inst opcode [, ...]
4148 bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
4149 MCAsmParser &Parser = getParser();
4150 if (getLexer().is(AsmToken::EndOfStatement)) {
4151 Parser.eatToEndOfStatement();
4152 Error(Loc, "expected expression following directive");
4159 if (getParser().parseExpression(Expr)) {
4160 Error(Loc, "expected expression");
4164 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
4166 Error(Loc, "expected constant expression");
4170 getTargetStreamer().emitInst(Value->getValue());
4172 if (getLexer().is(AsmToken::EndOfStatement))
4175 if (getLexer().isNot(AsmToken::Comma)) {
4176 Error(Loc, "unexpected token in directive");
4180 Parser.Lex(); // Eat comma.
4187 // parseDirectiveTLSDescCall:
4188 // ::= .tlsdesccall symbol
4189 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4191 if (getParser().parseIdentifier(Name))
4192 return Error(L, "expected symbol after directive");
4194 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
4195 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
4196 Expr = AArch64MCExpr::create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
4199 Inst.setOpcode(AArch64::TLSDESCCALL);
4200 Inst.addOperand(MCOperand::createExpr(Expr));
4202 getParser().getStreamer().EmitInstruction(Inst, STI);
4206 /// ::= .loh <lohName | lohId> label1, ..., labelN
4207 /// The number of arguments depends on the loh identifier.
4208 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4209 if (IDVal != MCLOHDirectiveName())
4212 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4213 if (getParser().getTok().isNot(AsmToken::Integer))
4214 return TokError("expected an identifier or a number in directive");
4215 // We successfully get a numeric value for the identifier.
4216 // Check if it is valid.
4217 int64_t Id = getParser().getTok().getIntVal();
4218 if (Id <= -1U && !isValidMCLOHType(Id))
4219 return TokError("invalid numeric identifier in directive");
4220 Kind = (MCLOHType)Id;
4222 StringRef Name = getTok().getIdentifier();
4223 // We successfully parse an identifier.
4224 // Check if it is a recognized one.
4225 int Id = MCLOHNameToId(Name);
4228 return TokError("invalid identifier in directive");
4229 Kind = (MCLOHType)Id;
4231 // Consume the identifier.
4233 // Get the number of arguments of this LOH.
4234 int NbArgs = MCLOHIdToNbArgs(Kind);
4236 assert(NbArgs != -1 && "Invalid number of arguments");
4238 SmallVector<MCSymbol *, 3> Args;
4239 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4241 if (getParser().parseIdentifier(Name))
4242 return TokError("expected identifier in directive");
4243 Args.push_back(getContext().getOrCreateSymbol(Name));
4245 if (Idx + 1 == NbArgs)
4247 if (getLexer().isNot(AsmToken::Comma))
4248 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4251 if (getLexer().isNot(AsmToken::EndOfStatement))
4252 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4254 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4258 /// parseDirectiveLtorg
4259 /// ::= .ltorg | .pool
4260 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4261 getTargetStreamer().emitCurrentConstantPool();
4265 /// parseDirectiveReq
4266 /// ::= name .req registername
4267 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4268 MCAsmParser &Parser = getParser();
4269 Parser.Lex(); // Eat the '.req' token.
4270 SMLoc SRegLoc = getLoc();
4271 unsigned RegNum = tryParseRegister();
4272 bool IsVector = false;
4274 if (RegNum == static_cast<unsigned>(-1)) {
4276 RegNum = tryMatchVectorRegister(Kind, false);
4277 if (!Kind.empty()) {
4278 Error(SRegLoc, "vector register without type specifier expected");
4284 if (RegNum == static_cast<unsigned>(-1)) {
4285 Parser.eatToEndOfStatement();
4286 Error(SRegLoc, "register name or alias expected");
4290 // Shouldn't be anything else.
4291 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4292 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4293 Parser.eatToEndOfStatement();
4297 Parser.Lex(); // Consume the EndOfStatement
4299 auto pair = std::make_pair(IsVector, RegNum);
4300 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
4301 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4306 /// parseDirectiveUneq
4307 /// ::= .unreq registername
4308 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4309 MCAsmParser &Parser = getParser();
4310 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4311 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4312 Parser.eatToEndOfStatement();
4315 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4316 Parser.Lex(); // Eat the identifier.
4321 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4322 AArch64MCExpr::VariantKind &ELFRefKind,
4323 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4325 ELFRefKind = AArch64MCExpr::VK_INVALID;
4326 DarwinRefKind = MCSymbolRefExpr::VK_None;
4329 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4330 ELFRefKind = AE->getKind();
4331 Expr = AE->getSubExpr();
4334 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4336 // It's a simple symbol reference with no addend.
4337 DarwinRefKind = SE->getKind();
4341 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4345 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4348 DarwinRefKind = SE->getKind();
4350 if (BE->getOpcode() != MCBinaryExpr::Add &&
4351 BE->getOpcode() != MCBinaryExpr::Sub)
4354 // See if the addend is is a constant, otherwise there's more going
4355 // on here than we can deal with.
4356 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4360 Addend = AddendExpr->getValue();
4361 if (BE->getOpcode() == MCBinaryExpr::Sub)
4364 // It's some symbol reference + a constant addend, but really
4365 // shouldn't use both Darwin and ELF syntax.
4366 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4367 DarwinRefKind == MCSymbolRefExpr::VK_None;
4370 /// Force static initialization.
4371 extern "C" void LLVMInitializeAArch64AsmParser() {
4372 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4373 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4374 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4377 #define GET_REGISTER_MATCHER
4378 #define GET_SUBTARGET_FEATURE_NAME
4379 #define GET_MATCHER_IMPLEMENTATION
4380 #include "AArch64GenAsmMatcher.inc"
4382 // Define this matcher function after the auto-generated include so we
4383 // have the match class enum definitions.
4384 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4386 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4387 // If the kind is a token for a literal immediate, check if our asm
4388 // operand matches. This is for InstAliases which have a fixed-value
4389 // immediate in the syntax.
4390 int64_t ExpectedVal;
4393 return Match_InvalidOperand;
4435 return Match_InvalidOperand;
4436 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4438 return Match_InvalidOperand;
4439 if (CE->getValue() == ExpectedVal)
4440 return Match_Success;
4441 return Match_InvalidOperand;
4445 AArch64AsmParser::OperandMatchResultTy
4446 AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
4450 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4451 Error(S, "expected register");
4452 return MatchOperand_ParseFail;
4455 int FirstReg = tryParseRegister();
4456 if (FirstReg == -1) {
4457 return MatchOperand_ParseFail;
4459 const MCRegisterClass &WRegClass =
4460 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
4461 const MCRegisterClass &XRegClass =
4462 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
4464 bool isXReg = XRegClass.contains(FirstReg),
4465 isWReg = WRegClass.contains(FirstReg);
4466 if (!isXReg && !isWReg) {
4467 Error(S, "expected first even register of a "
4468 "consecutive same-size even/odd register pair");
4469 return MatchOperand_ParseFail;
4472 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4473 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
4475 if (FirstEncoding & 0x1) {
4476 Error(S, "expected first even register of a "
4477 "consecutive same-size even/odd register pair");
4478 return MatchOperand_ParseFail;
4482 if (getParser().getTok().isNot(AsmToken::Comma)) {
4483 Error(M, "expected comma");
4484 return MatchOperand_ParseFail;
4490 int SecondReg = tryParseRegister();
4491 if (SecondReg ==-1) {
4492 return MatchOperand_ParseFail;
4495 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
4496 (isXReg && !XRegClass.contains(SecondReg)) ||
4497 (isWReg && !WRegClass.contains(SecondReg))) {
4498 Error(E,"expected second odd register of a "
4499 "consecutive same-size even/odd register pair");
4500 return MatchOperand_ParseFail;
4505 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
4506 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
4508 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
4509 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
4512 Operands.push_back(AArch64Operand::CreateReg(Pair, false, S, getLoc(),
4515 return MatchOperand_Success;