1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/AArch64AddressingModes.h"
11 #include "MCTargetDesc/AArch64MCExpr.h"
12 #include "Utils/AArch64BaseInfo.h"
13 #include "llvm/ADT/APInt.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallString.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Twine.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCExpr.h"
21 #include "llvm/MC/MCInst.h"
22 #include "llvm/MC/MCParser/MCAsmLexer.h"
23 #include "llvm/MC/MCParser/MCAsmParser.h"
24 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
25 #include "llvm/MC/MCRegisterInfo.h"
26 #include "llvm/MC/MCStreamer.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/MC/MCSymbol.h"
29 #include "llvm/MC/MCTargetAsmParser.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/SourceMgr.h"
32 #include "llvm/Support/TargetRegistry.h"
33 #include "llvm/Support/raw_ostream.h"
41 class AArch64AsmParser : public MCTargetAsmParser {
43 StringRef Mnemonic; ///< Instruction mnemonic.
47 // Map of register aliases registers via the .req directive.
48 StringMap<std::pair<bool, unsigned> > RegisterReqs;
50 AArch64TargetStreamer &getTargetStreamer() {
51 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
52 return static_cast<AArch64TargetStreamer &>(TS);
55 MCAsmParser &getParser() const { return Parser; }
56 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
58 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
60 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
61 AArch64CC::CondCode parseCondCodeString(StringRef Cond);
62 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
63 unsigned matchRegisterNameAlias(StringRef Name, bool isVector);
64 int tryParseRegister();
65 int tryMatchVectorRegister(StringRef &Kind, bool expected);
66 bool parseRegister(OperandVector &Operands);
67 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
68 bool parseVectorList(OperandVector &Operands);
69 bool parseOperand(OperandVector &Operands, bool isCondCode,
72 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
73 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
74 bool showMatchError(SMLoc Loc, unsigned ErrCode);
76 bool parseDirectiveWord(unsigned Size, SMLoc L);
77 bool parseDirectiveTLSDescCall(SMLoc L);
79 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
80 bool parseDirectiveLtorg(SMLoc L);
82 bool parseDirectiveReq(StringRef Name, SMLoc L);
83 bool parseDirectiveUnreq(SMLoc L);
85 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
86 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
87 OperandVector &Operands, MCStreamer &Out,
89 bool MatchingInlineAsm) override;
90 /// @name Auto-generated Match Functions
93 #define GET_ASSEMBLER_HEADER
94 #include "AArch64GenAsmMatcher.inc"
98 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
99 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
100 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
101 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
102 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
103 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
104 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
105 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
106 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
107 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
108 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
109 bool tryParseVectorRegister(OperandVector &Operands);
112 enum AArch64MatchResultTy {
113 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
114 #define GET_OPERAND_DIAGNOSTIC_TYPES
115 #include "AArch64GenAsmMatcher.inc"
117 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
118 const MCInstrInfo &MII,
119 const MCTargetOptions &Options)
120 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
121 MCAsmParserExtension::Initialize(_Parser);
122 if (Parser.getStreamer().getTargetStreamer() == nullptr)
123 new AArch64TargetStreamer(Parser.getStreamer());
125 // Initialize the set of available features.
126 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
129 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
130 SMLoc NameLoc, OperandVector &Operands) override;
131 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
132 bool ParseDirective(AsmToken DirectiveID) override;
133 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
134 unsigned Kind) override;
136 static bool classifySymbolRef(const MCExpr *Expr,
137 AArch64MCExpr::VariantKind &ELFRefKind,
138 MCSymbolRefExpr::VariantKind &DarwinRefKind,
141 } // end anonymous namespace
145 /// AArch64Operand - Instances of this class represent a parsed AArch64 machine
147 class AArch64Operand : public MCParsedAsmOperand {
165 SMLoc StartLoc, EndLoc;
170 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
178 struct VectorListOp {
181 unsigned NumElements;
182 unsigned ElementKind;
185 struct VectorIndexOp {
193 struct ShiftedImmOp {
195 unsigned ShiftAmount;
199 AArch64CC::CondCode Code;
203 unsigned Val; // Encoded 8-bit representation.
207 unsigned Val; // Not the enum since not all values have names.
213 uint64_t FeatureBits; // We need to pass through information about which
214 // core we are compiling for so that the SysReg
215 // Mappers can appropriately conditionalize.
226 struct ShiftExtendOp {
227 AArch64_AM::ShiftExtendType Type;
229 bool HasExplicitAmount;
239 struct VectorListOp VectorList;
240 struct VectorIndexOp VectorIndex;
242 struct ShiftedImmOp ShiftedImm;
243 struct CondCodeOp CondCode;
244 struct FPImmOp FPImm;
245 struct BarrierOp Barrier;
246 struct SysRegOp SysReg;
247 struct SysCRImmOp SysCRImm;
248 struct PrefetchOp Prefetch;
249 struct ShiftExtendOp ShiftExtend;
252 // Keep the MCContext around as the MCExprs may need manipulated during
253 // the add<>Operands() calls.
257 AArch64Operand(KindTy K, MCContext &_Ctx)
258 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
260 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
262 StartLoc = o.StartLoc;
272 ShiftedImm = o.ShiftedImm;
275 CondCode = o.CondCode;
287 VectorList = o.VectorList;
290 VectorIndex = o.VectorIndex;
296 SysCRImm = o.SysCRImm;
299 Prefetch = o.Prefetch;
302 ShiftExtend = o.ShiftExtend;
307 /// getStartLoc - Get the location of the first token of this operand.
308 SMLoc getStartLoc() const override { return StartLoc; }
309 /// getEndLoc - Get the location of the last token of this operand.
310 SMLoc getEndLoc() const override { return EndLoc; }
312 StringRef getToken() const {
313 assert(Kind == k_Token && "Invalid access!");
314 return StringRef(Tok.Data, Tok.Length);
317 bool isTokenSuffix() const {
318 assert(Kind == k_Token && "Invalid access!");
322 const MCExpr *getImm() const {
323 assert(Kind == k_Immediate && "Invalid access!");
327 const MCExpr *getShiftedImmVal() const {
328 assert(Kind == k_ShiftedImm && "Invalid access!");
329 return ShiftedImm.Val;
332 unsigned getShiftedImmShift() const {
333 assert(Kind == k_ShiftedImm && "Invalid access!");
334 return ShiftedImm.ShiftAmount;
337 AArch64CC::CondCode getCondCode() const {
338 assert(Kind == k_CondCode && "Invalid access!");
339 return CondCode.Code;
342 unsigned getFPImm() const {
343 assert(Kind == k_FPImm && "Invalid access!");
347 unsigned getBarrier() const {
348 assert(Kind == k_Barrier && "Invalid access!");
352 unsigned getReg() const override {
353 assert(Kind == k_Register && "Invalid access!");
357 unsigned getVectorListStart() const {
358 assert(Kind == k_VectorList && "Invalid access!");
359 return VectorList.RegNum;
362 unsigned getVectorListCount() const {
363 assert(Kind == k_VectorList && "Invalid access!");
364 return VectorList.Count;
367 unsigned getVectorIndex() const {
368 assert(Kind == k_VectorIndex && "Invalid access!");
369 return VectorIndex.Val;
372 StringRef getSysReg() const {
373 assert(Kind == k_SysReg && "Invalid access!");
374 return StringRef(SysReg.Data, SysReg.Length);
377 uint64_t getSysRegFeatureBits() const {
378 assert(Kind == k_SysReg && "Invalid access!");
379 return SysReg.FeatureBits;
382 unsigned getSysCR() const {
383 assert(Kind == k_SysCR && "Invalid access!");
387 unsigned getPrefetch() const {
388 assert(Kind == k_Prefetch && "Invalid access!");
392 AArch64_AM::ShiftExtendType getShiftExtendType() const {
393 assert(Kind == k_ShiftExtend && "Invalid access!");
394 return ShiftExtend.Type;
397 unsigned getShiftExtendAmount() const {
398 assert(Kind == k_ShiftExtend && "Invalid access!");
399 return ShiftExtend.Amount;
402 bool hasShiftExtendAmount() const {
403 assert(Kind == k_ShiftExtend && "Invalid access!");
404 return ShiftExtend.HasExplicitAmount;
407 bool isImm() const override { return Kind == k_Immediate; }
408 bool isMem() const override { return false; }
409 bool isSImm9() const {
412 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
415 int64_t Val = MCE->getValue();
416 return (Val >= -256 && Val < 256);
418 bool isSImm7s4() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
427 bool isSImm7s8() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
436 bool isSImm7s16() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
446 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
447 AArch64MCExpr::VariantKind ELFRefKind;
448 MCSymbolRefExpr::VariantKind DarwinRefKind;
450 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
452 // If we don't understand the expression, assume the best and
453 // let the fixup and relocation code deal with it.
457 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
458 ELFRefKind == AArch64MCExpr::VK_LO12 ||
459 ELFRefKind == AArch64MCExpr::VK_GOT_LO12 ||
460 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
461 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
462 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
463 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
464 ELFRefKind == AArch64MCExpr::VK_GOTTPREL_LO12_NC ||
465 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) {
466 // Note that we don't range-check the addend. It's adjusted modulo page
467 // size when converted, so there is no "out of range" condition when using
469 return Addend >= 0 && (Addend % Scale) == 0;
470 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
471 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
472 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
479 template <int Scale> bool isUImm12Offset() const {
483 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
485 return isSymbolicUImm12Offset(getImm(), Scale);
487 int64_t Val = MCE->getValue();
488 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
491 bool isImm0_7() const {
494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
497 int64_t Val = MCE->getValue();
498 return (Val >= 0 && Val < 8);
500 bool isImm1_8() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val > 0 && Val < 9);
509 bool isImm0_15() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val >= 0 && Val < 16);
518 bool isImm1_16() const {
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
524 int64_t Val = MCE->getValue();
525 return (Val > 0 && Val < 17);
527 bool isImm0_31() const {
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533 int64_t Val = MCE->getValue();
534 return (Val >= 0 && Val < 32);
536 bool isImm1_31() const {
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 int64_t Val = MCE->getValue();
543 return (Val >= 1 && Val < 32);
545 bool isImm1_32() const {
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 int64_t Val = MCE->getValue();
552 return (Val >= 1 && Val < 33);
554 bool isImm0_63() const {
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
560 int64_t Val = MCE->getValue();
561 return (Val >= 0 && Val < 64);
563 bool isImm1_63() const {
566 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
569 int64_t Val = MCE->getValue();
570 return (Val >= 1 && Val < 64);
572 bool isImm1_64() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
579 return (Val >= 1 && Val < 65);
581 bool isImm0_127() const {
584 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
587 int64_t Val = MCE->getValue();
588 return (Val >= 0 && Val < 128);
590 bool isImm0_255() const {
593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
596 int64_t Val = MCE->getValue();
597 return (Val >= 0 && Val < 256);
599 bool isImm0_65535() const {
602 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
605 int64_t Val = MCE->getValue();
606 return (Val >= 0 && Val < 65536);
608 bool isImm32_63() const {
611 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
614 int64_t Val = MCE->getValue();
615 return (Val >= 32 && Val < 64);
617 bool isLogicalImm32() const {
620 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
623 int64_t Val = MCE->getValue();
624 if (Val >> 32 != 0 && Val >> 32 != ~0LL)
627 return AArch64_AM::isLogicalImmediate(Val, 32);
629 bool isLogicalImm64() const {
632 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
635 return AArch64_AM::isLogicalImmediate(MCE->getValue(), 64);
637 bool isLogicalImm32Not() const {
640 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
643 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
644 return AArch64_AM::isLogicalImmediate(Val, 32);
646 bool isLogicalImm64Not() const {
649 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
652 return AArch64_AM::isLogicalImmediate(~MCE->getValue(), 64);
654 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
655 bool isAddSubImm() const {
656 if (!isShiftedImm() && !isImm())
661 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
662 if (isShiftedImm()) {
663 unsigned Shift = ShiftedImm.ShiftAmount;
664 Expr = ShiftedImm.Val;
665 if (Shift != 0 && Shift != 12)
671 AArch64MCExpr::VariantKind ELFRefKind;
672 MCSymbolRefExpr::VariantKind DarwinRefKind;
674 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
675 DarwinRefKind, Addend)) {
676 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
677 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
678 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
679 || ELFRefKind == AArch64MCExpr::VK_LO12
680 || ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12
681 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12
682 || ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC
683 || ELFRefKind == AArch64MCExpr::VK_TPREL_HI12
684 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12
685 || ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC
686 || ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12;
689 // Otherwise it should be a real immediate in range:
690 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
691 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
693 bool isCondCode() const { return Kind == k_CondCode; }
694 bool isSIMDImmType10() const {
697 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
700 return AArch64_AM::isAdvSIMDModImmType10(MCE->getValue());
702 bool isBranchTarget26() const {
705 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
708 int64_t Val = MCE->getValue();
711 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
713 bool isPCRelLabel19() const {
716 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
719 int64_t Val = MCE->getValue();
722 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
724 bool isBranchTarget14() const {
727 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
730 int64_t Val = MCE->getValue();
733 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
737 isMovWSymbol(ArrayRef<AArch64MCExpr::VariantKind> AllowedModifiers) const {
741 AArch64MCExpr::VariantKind ELFRefKind;
742 MCSymbolRefExpr::VariantKind DarwinRefKind;
744 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
745 DarwinRefKind, Addend)) {
748 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
751 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
752 if (ELFRefKind == AllowedModifiers[i])
759 bool isMovZSymbolG3() const {
760 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
761 return isMovWSymbol(Variants);
764 bool isMovZSymbolG2() const {
765 static AArch64MCExpr::VariantKind Variants[] = {
766 AArch64MCExpr::VK_ABS_G2, AArch64MCExpr::VK_ABS_G2_S,
767 AArch64MCExpr::VK_TPREL_G2, AArch64MCExpr::VK_DTPREL_G2};
768 return isMovWSymbol(Variants);
771 bool isMovZSymbolG1() const {
772 static AArch64MCExpr::VariantKind Variants[] = {
773 AArch64MCExpr::VK_ABS_G1, AArch64MCExpr::VK_ABS_G1_S,
774 AArch64MCExpr::VK_GOTTPREL_G1, AArch64MCExpr::VK_TPREL_G1,
775 AArch64MCExpr::VK_DTPREL_G1,
777 return isMovWSymbol(Variants);
780 bool isMovZSymbolG0() const {
781 static AArch64MCExpr::VariantKind Variants[] = {
782 AArch64MCExpr::VK_ABS_G0, AArch64MCExpr::VK_ABS_G0_S,
783 AArch64MCExpr::VK_TPREL_G0, AArch64MCExpr::VK_DTPREL_G0};
784 return isMovWSymbol(Variants);
787 bool isMovKSymbolG3() const {
788 static AArch64MCExpr::VariantKind Variants[] = { AArch64MCExpr::VK_ABS_G3 };
789 return isMovWSymbol(Variants);
792 bool isMovKSymbolG2() const {
793 static AArch64MCExpr::VariantKind Variants[] = {
794 AArch64MCExpr::VK_ABS_G2_NC};
795 return isMovWSymbol(Variants);
798 bool isMovKSymbolG1() const {
799 static AArch64MCExpr::VariantKind Variants[] = {
800 AArch64MCExpr::VK_ABS_G1_NC, AArch64MCExpr::VK_TPREL_G1_NC,
801 AArch64MCExpr::VK_DTPREL_G1_NC
803 return isMovWSymbol(Variants);
806 bool isMovKSymbolG0() const {
807 static AArch64MCExpr::VariantKind Variants[] = {
808 AArch64MCExpr::VK_ABS_G0_NC, AArch64MCExpr::VK_GOTTPREL_G0_NC,
809 AArch64MCExpr::VK_TPREL_G0_NC, AArch64MCExpr::VK_DTPREL_G0_NC
811 return isMovWSymbol(Variants);
814 template<int RegWidth, int Shift>
815 bool isMOVZMovAlias() const {
816 if (!isImm()) return false;
818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819 if (!CE) return false;
820 uint64_t Value = CE->getValue();
823 Value &= 0xffffffffULL;
825 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
826 if (Value == 0 && Shift != 0)
829 return (Value & ~(0xffffULL << Shift)) == 0;
832 template<int RegWidth, int Shift>
833 bool isMOVNMovAlias() const {
834 if (!isImm()) return false;
836 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837 if (!CE) return false;
838 uint64_t Value = CE->getValue();
840 // MOVZ takes precedence over MOVN.
841 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
842 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
847 Value &= 0xffffffffULL;
849 return (Value & ~(0xffffULL << Shift)) == 0;
852 bool isFPImm() const { return Kind == k_FPImm; }
853 bool isBarrier() const { return Kind == k_Barrier; }
854 bool isSysReg() const { return Kind == k_SysReg; }
855 bool isMRSSystemRegister() const {
856 if (!isSysReg()) return false;
858 bool IsKnownRegister;
859 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
860 Mapper.fromString(getSysReg(), IsKnownRegister);
862 return IsKnownRegister;
864 bool isMSRSystemRegister() const {
865 if (!isSysReg()) return false;
867 bool IsKnownRegister;
868 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
869 Mapper.fromString(getSysReg(), IsKnownRegister);
871 return IsKnownRegister;
873 bool isSystemPStateField() const {
874 if (!isSysReg()) return false;
876 bool IsKnownRegister;
877 AArch64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
879 return IsKnownRegister;
881 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
882 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
883 bool isVectorRegLo() const {
884 return Kind == k_Register && Reg.isVector &&
885 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
888 bool isGPR32as64() const {
889 return Kind == k_Register && !Reg.isVector &&
890 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
893 bool isGPR64sp0() const {
894 return Kind == k_Register && !Reg.isVector &&
895 AArch64MCRegisterClasses[AArch64::GPR64spRegClassID].contains(Reg.RegNum);
898 /// Is this a vector list with the type implicit (presumably attached to the
899 /// instruction itself)?
900 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
901 return Kind == k_VectorList && VectorList.Count == NumRegs &&
902 !VectorList.ElementKind;
905 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
906 bool isTypedVectorList() const {
907 if (Kind != k_VectorList)
909 if (VectorList.Count != NumRegs)
911 if (VectorList.ElementKind != ElementKind)
913 return VectorList.NumElements == NumElements;
916 bool isVectorIndex1() const {
917 return Kind == k_VectorIndex && VectorIndex.Val == 1;
919 bool isVectorIndexB() const {
920 return Kind == k_VectorIndex && VectorIndex.Val < 16;
922 bool isVectorIndexH() const {
923 return Kind == k_VectorIndex && VectorIndex.Val < 8;
925 bool isVectorIndexS() const {
926 return Kind == k_VectorIndex && VectorIndex.Val < 4;
928 bool isVectorIndexD() const {
929 return Kind == k_VectorIndex && VectorIndex.Val < 2;
931 bool isToken() const override { return Kind == k_Token; }
932 bool isTokenEqual(StringRef Str) const {
933 return Kind == k_Token && getToken() == Str;
935 bool isSysCR() const { return Kind == k_SysCR; }
936 bool isPrefetch() const { return Kind == k_Prefetch; }
937 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
938 bool isShifter() const {
939 if (!isShiftExtend())
942 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
943 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
944 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
945 ST == AArch64_AM::MSL);
947 bool isExtend() const {
948 if (!isShiftExtend())
951 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
952 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
953 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
954 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
955 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
956 ET == AArch64_AM::LSL) &&
957 getShiftExtendAmount() <= 4;
960 bool isExtend64() const {
963 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
964 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
965 return ET != AArch64_AM::UXTX && ET != AArch64_AM::SXTX;
967 bool isExtendLSL64() const {
970 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
971 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
972 ET == AArch64_AM::LSL) &&
973 getShiftExtendAmount() <= 4;
976 template<int Width> bool isMemXExtend() const {
979 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
980 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
981 (getShiftExtendAmount() == Log2_32(Width / 8) ||
982 getShiftExtendAmount() == 0);
985 template<int Width> bool isMemWExtend() const {
988 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
989 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
990 (getShiftExtendAmount() == Log2_32(Width / 8) ||
991 getShiftExtendAmount() == 0);
994 template <unsigned width>
995 bool isArithmeticShifter() const {
999 // An arithmetic shifter is LSL, LSR, or ASR.
1000 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1001 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1002 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1005 template <unsigned width>
1006 bool isLogicalShifter() const {
1010 // A logical shifter is LSL, LSR, ASR or ROR.
1011 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1012 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1013 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1014 getShiftExtendAmount() < width;
1017 bool isMovImm32Shifter() const {
1021 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1022 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1023 if (ST != AArch64_AM::LSL)
1025 uint64_t Val = getShiftExtendAmount();
1026 return (Val == 0 || Val == 16);
1029 bool isMovImm64Shifter() const {
1033 // A MOVi shifter is LSL of 0 or 16.
1034 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1035 if (ST != AArch64_AM::LSL)
1037 uint64_t Val = getShiftExtendAmount();
1038 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1041 bool isLogicalVecShifter() const {
1045 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1046 unsigned Shift = getShiftExtendAmount();
1047 return getShiftExtendType() == AArch64_AM::LSL &&
1048 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1051 bool isLogicalVecHalfWordShifter() const {
1052 if (!isLogicalVecShifter())
1055 // A logical vector shifter is a left shift by 0 or 8.
1056 unsigned Shift = getShiftExtendAmount();
1057 return getShiftExtendType() == AArch64_AM::LSL &&
1058 (Shift == 0 || Shift == 8);
1061 bool isMoveVecShifter() const {
1062 if (!isShiftExtend())
1065 // A logical vector shifter is a left shift by 8 or 16.
1066 unsigned Shift = getShiftExtendAmount();
1067 return getShiftExtendType() == AArch64_AM::MSL &&
1068 (Shift == 8 || Shift == 16);
1071 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1072 // to LDUR/STUR when the offset is not legal for the former but is for
1073 // the latter. As such, in addition to checking for being a legal unscaled
1074 // address, also check that it is not a legal scaled address. This avoids
1075 // ambiguity in the matcher.
1077 bool isSImm9OffsetFB() const {
1078 return isSImm9() && !isUImm12Offset<Width / 8>();
1081 bool isAdrpLabel() const {
1082 // Validation was handled during parsing, so we just sanity check that
1083 // something didn't go haywire.
1087 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1088 int64_t Val = CE->getValue();
1089 int64_t Min = - (4096 * (1LL << (21 - 1)));
1090 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1091 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1097 bool isAdrLabel() const {
1098 // Validation was handled during parsing, so we just sanity check that
1099 // something didn't go haywire.
1103 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1104 int64_t Val = CE->getValue();
1105 int64_t Min = - (1LL << (21 - 1));
1106 int64_t Max = ((1LL << (21 - 1)) - 1);
1107 return Val >= Min && Val <= Max;
1113 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1114 // Add as immediates when possible. Null MCExpr = 0.
1116 Inst.addOperand(MCOperand::CreateImm(0));
1117 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1118 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1120 Inst.addOperand(MCOperand::CreateExpr(Expr));
1123 void addRegOperands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1125 Inst.addOperand(MCOperand::CreateReg(getReg()));
1128 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1129 assert(N == 1 && "Invalid number of operands!");
1131 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1133 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1134 uint32_t Reg = RI->getRegClass(AArch64::GPR32RegClassID).getRegister(
1135 RI->getEncodingValue(getReg()));
1137 Inst.addOperand(MCOperand::CreateReg(Reg));
1140 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1141 assert(N == 1 && "Invalid number of operands!");
1143 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1144 Inst.addOperand(MCOperand::CreateReg(AArch64::D0 + getReg() - AArch64::Q0));
1147 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1148 assert(N == 1 && "Invalid number of operands!");
1150 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1151 Inst.addOperand(MCOperand::CreateReg(getReg()));
1154 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateReg(getReg()));
1159 template <unsigned NumRegs>
1160 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1161 assert(N == 1 && "Invalid number of operands!");
1162 static unsigned FirstRegs[] = { AArch64::D0, AArch64::D0_D1,
1163 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 };
1164 unsigned FirstReg = FirstRegs[NumRegs - 1];
1167 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1170 template <unsigned NumRegs>
1171 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 static unsigned FirstRegs[] = { AArch64::Q0, AArch64::Q0_Q1,
1174 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 };
1175 unsigned FirstReg = FirstRegs[NumRegs - 1];
1178 MCOperand::CreateReg(FirstReg + getVectorListStart() - AArch64::Q0));
1181 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1186 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1191 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1192 assert(N == 1 && "Invalid number of operands!");
1193 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1196 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1201 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1206 void addImmOperands(MCInst &Inst, unsigned N) const {
1207 assert(N == 1 && "Invalid number of operands!");
1208 // If this is a pageoff symrefexpr with an addend, adjust the addend
1209 // to be only the page-offset portion. Otherwise, just add the expr
1211 addExpr(Inst, getImm());
1214 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1215 assert(N == 2 && "Invalid number of operands!");
1216 if (isShiftedImm()) {
1217 addExpr(Inst, getShiftedImmVal());
1218 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1220 addExpr(Inst, getImm());
1221 Inst.addOperand(MCOperand::CreateImm(0));
1225 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1226 assert(N == 1 && "Invalid number of operands!");
1227 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1230 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1232 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234 addExpr(Inst, getImm());
1236 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1239 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1240 addImmOperands(Inst, N);
1244 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1249 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1252 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1255 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1256 assert(N == 1 && "Invalid number of operands!");
1257 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1258 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1261 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1264 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1267 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1268 assert(N == 1 && "Invalid number of operands!");
1269 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1270 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1273 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1276 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1279 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1280 assert(N == 1 && "Invalid number of operands!");
1281 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1282 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1285 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1286 assert(N == 1 && "Invalid number of operands!");
1287 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1288 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1291 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1292 assert(N == 1 && "Invalid number of operands!");
1293 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1294 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1297 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
1299 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1300 assert(MCE && "Invalid constant immediate operand!");
1301 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1304 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1310 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1313 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1316 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1319 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1322 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1325 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1328 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1331 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1334 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1335 assert(N == 1 && "Invalid number of operands!");
1336 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1337 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1340 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1341 assert(N == 1 && "Invalid number of operands!");
1342 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1343 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1346 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1349 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1352 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1355 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1358 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1361 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1364 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1368 AArch64_AM::encodeLogicalImmediate(MCE->getValue() & 0xFFFFFFFF, 32);
1369 Inst.addOperand(MCOperand::CreateImm(encoding));
1372 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1375 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1376 Inst.addOperand(MCOperand::CreateImm(encoding));
1379 void addLogicalImm32NotOperands(MCInst &Inst, unsigned N) const {
1380 assert(N == 1 && "Invalid number of operands!");
1381 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1382 int64_t Val = ~MCE->getValue() & 0xFFFFFFFF;
1383 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, 32);
1384 Inst.addOperand(MCOperand::CreateImm(encoding));
1387 void addLogicalImm64NotOperands(MCInst &Inst, unsigned N) const {
1388 assert(N == 1 && "Invalid number of operands!");
1389 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1391 AArch64_AM::encodeLogicalImmediate(~MCE->getValue(), 64);
1392 Inst.addOperand(MCOperand::CreateImm(encoding));
1395 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
1397 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
1398 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1399 Inst.addOperand(MCOperand::CreateImm(encoding));
1402 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1403 // Branch operands don't encode the low bits, so shift them off
1404 // here. If it's a label, however, just put it on directly as there's
1405 // not enough information now to do anything.
1406 assert(N == 1 && "Invalid number of operands!");
1407 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1409 addExpr(Inst, getImm());
1412 assert(MCE && "Invalid constant immediate operand!");
1413 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1416 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1417 // Branch operands don't encode the low bits, so shift them off
1418 // here. If it's a label, however, just put it on directly as there's
1419 // not enough information now to do anything.
1420 assert(N == 1 && "Invalid number of operands!");
1421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1423 addExpr(Inst, getImm());
1426 assert(MCE && "Invalid constant immediate operand!");
1427 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1430 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1431 // Branch operands don't encode the low bits, so shift them off
1432 // here. If it's a label, however, just put it on directly as there's
1433 // not enough information now to do anything.
1434 assert(N == 1 && "Invalid number of operands!");
1435 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1437 addExpr(Inst, getImm());
1440 assert(MCE && "Invalid constant immediate operand!");
1441 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1444 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1449 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!");
1451 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1454 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1455 assert(N == 1 && "Invalid number of operands!");
1458 auto Mapper = AArch64SysReg::MRSMapper(getSysRegFeatureBits());
1459 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1461 Inst.addOperand(MCOperand::CreateImm(Bits));
1464 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1465 assert(N == 1 && "Invalid number of operands!");
1468 auto Mapper = AArch64SysReg::MSRMapper(getSysRegFeatureBits());
1469 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1471 Inst.addOperand(MCOperand::CreateImm(Bits));
1474 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1475 assert(N == 1 && "Invalid number of operands!");
1479 AArch64PState::PStateMapper().fromString(getSysReg(), Valid);
1481 Inst.addOperand(MCOperand::CreateImm(Bits));
1484 void addSysCROperands(MCInst &Inst, unsigned N) const {
1485 assert(N == 1 && "Invalid number of operands!");
1486 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1489 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1490 assert(N == 1 && "Invalid number of operands!");
1491 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1494 void addShifterOperands(MCInst &Inst, unsigned N) const {
1495 assert(N == 1 && "Invalid number of operands!");
1497 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1498 Inst.addOperand(MCOperand::CreateImm(Imm));
1501 void addExtendOperands(MCInst &Inst, unsigned N) const {
1502 assert(N == 1 && "Invalid number of operands!");
1503 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1504 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
1505 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1506 Inst.addOperand(MCOperand::CreateImm(Imm));
1509 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1510 assert(N == 1 && "Invalid number of operands!");
1511 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1512 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
1513 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1514 Inst.addOperand(MCOperand::CreateImm(Imm));
1517 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1518 assert(N == 2 && "Invalid number of operands!");
1519 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1520 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1521 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1522 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1525 // For 8-bit load/store instructions with a register offset, both the
1526 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1527 // they're disambiguated by whether the shift was explicit or implicit rather
1529 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1530 assert(N == 2 && "Invalid number of operands!");
1531 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1532 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
1533 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1534 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1538 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1539 assert(N == 1 && "Invalid number of operands!");
1541 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1542 uint64_t Value = CE->getValue();
1543 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1547 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1548 assert(N == 1 && "Invalid number of operands!");
1550 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1551 uint64_t Value = CE->getValue();
1552 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1555 void print(raw_ostream &OS) const override;
1557 static std::unique_ptr<AArch64Operand>
1558 CreateToken(StringRef Str, bool IsSuffix, SMLoc S, MCContext &Ctx) {
1559 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1560 Op->Tok.Data = Str.data();
1561 Op->Tok.Length = Str.size();
1562 Op->Tok.IsSuffix = IsSuffix;
1568 static std::unique_ptr<AArch64Operand>
1569 CreateReg(unsigned RegNum, bool isVector, SMLoc S, SMLoc E, MCContext &Ctx) {
1570 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1571 Op->Reg.RegNum = RegNum;
1572 Op->Reg.isVector = isVector;
1578 static std::unique_ptr<AArch64Operand>
1579 CreateVectorList(unsigned RegNum, unsigned Count, unsigned NumElements,
1580 char ElementKind, SMLoc S, SMLoc E, MCContext &Ctx) {
1581 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1582 Op->VectorList.RegNum = RegNum;
1583 Op->VectorList.Count = Count;
1584 Op->VectorList.NumElements = NumElements;
1585 Op->VectorList.ElementKind = ElementKind;
1591 static std::unique_ptr<AArch64Operand>
1592 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
1593 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1594 Op->VectorIndex.Val = Idx;
1600 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
1601 SMLoc E, MCContext &Ctx) {
1602 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1609 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
1610 unsigned ShiftAmount,
1613 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1614 Op->ShiftedImm .Val = Val;
1615 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1621 static std::unique_ptr<AArch64Operand>
1622 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
1623 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1624 Op->CondCode.Code = Code;
1630 static std::unique_ptr<AArch64Operand> CreateFPImm(unsigned Val, SMLoc S,
1632 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1633 Op->FPImm.Val = Val;
1639 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val, SMLoc S,
1641 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1642 Op->Barrier.Val = Val;
1648 static std::unique_ptr<AArch64Operand>
1649 CreateSysReg(StringRef Str, SMLoc S, uint64_t FeatureBits, MCContext &Ctx) {
1650 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1651 Op->SysReg.Data = Str.data();
1652 Op->SysReg.Length = Str.size();
1653 Op->SysReg.FeatureBits = FeatureBits;
1659 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
1660 SMLoc E, MCContext &Ctx) {
1661 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1662 Op->SysCRImm.Val = Val;
1668 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val, SMLoc S,
1670 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1671 Op->Prefetch.Val = Val;
1677 static std::unique_ptr<AArch64Operand>
1678 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
1679 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
1680 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
1681 Op->ShiftExtend.Type = ShOp;
1682 Op->ShiftExtend.Amount = Val;
1683 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1690 } // end anonymous namespace.
1692 void AArch64Operand::print(raw_ostream &OS) const {
1695 OS << "<fpimm " << getFPImm() << "("
1696 << AArch64_AM::getFPImmFloat(getFPImm()) << ") >";
1700 StringRef Name = AArch64DB::DBarrierMapper().toString(getBarrier(), Valid);
1702 OS << "<barrier " << Name << ">";
1704 OS << "<barrier invalid #" << getBarrier() << ">";
1708 getImm()->print(OS);
1710 case k_ShiftedImm: {
1711 unsigned Shift = getShiftedImmShift();
1712 OS << "<shiftedimm ";
1713 getShiftedImmVal()->print(OS);
1714 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
1718 OS << "<condcode " << getCondCode() << ">";
1721 OS << "<register " << getReg() << ">";
1723 case k_VectorList: {
1724 OS << "<vectorlist ";
1725 unsigned Reg = getVectorListStart();
1726 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1727 OS << Reg + i << " ";
1732 OS << "<vectorindex " << getVectorIndex() << ">";
1735 OS << "<sysreg: " << getSysReg() << '>';
1738 OS << "'" << getToken() << "'";
1741 OS << "c" << getSysCR();
1745 StringRef Name = AArch64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1747 OS << "<prfop " << Name << ">";
1749 OS << "<prfop invalid #" << getPrefetch() << ">";
1752 case k_ShiftExtend: {
1753 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1754 << getShiftExtendAmount();
1755 if (!hasShiftExtendAmount())
1763 /// @name Auto-generated Match Functions
1766 static unsigned MatchRegisterName(StringRef Name);
1770 static unsigned matchVectorRegName(StringRef Name) {
1771 return StringSwitch<unsigned>(Name)
1772 .Case("v0", AArch64::Q0)
1773 .Case("v1", AArch64::Q1)
1774 .Case("v2", AArch64::Q2)
1775 .Case("v3", AArch64::Q3)
1776 .Case("v4", AArch64::Q4)
1777 .Case("v5", AArch64::Q5)
1778 .Case("v6", AArch64::Q6)
1779 .Case("v7", AArch64::Q7)
1780 .Case("v8", AArch64::Q8)
1781 .Case("v9", AArch64::Q9)
1782 .Case("v10", AArch64::Q10)
1783 .Case("v11", AArch64::Q11)
1784 .Case("v12", AArch64::Q12)
1785 .Case("v13", AArch64::Q13)
1786 .Case("v14", AArch64::Q14)
1787 .Case("v15", AArch64::Q15)
1788 .Case("v16", AArch64::Q16)
1789 .Case("v17", AArch64::Q17)
1790 .Case("v18", AArch64::Q18)
1791 .Case("v19", AArch64::Q19)
1792 .Case("v20", AArch64::Q20)
1793 .Case("v21", AArch64::Q21)
1794 .Case("v22", AArch64::Q22)
1795 .Case("v23", AArch64::Q23)
1796 .Case("v24", AArch64::Q24)
1797 .Case("v25", AArch64::Q25)
1798 .Case("v26", AArch64::Q26)
1799 .Case("v27", AArch64::Q27)
1800 .Case("v28", AArch64::Q28)
1801 .Case("v29", AArch64::Q29)
1802 .Case("v30", AArch64::Q30)
1803 .Case("v31", AArch64::Q31)
1807 static bool isValidVectorKind(StringRef Name) {
1808 return StringSwitch<bool>(Name.lower())
1818 // Accept the width neutral ones, too, for verbose syntax. If those
1819 // aren't used in the right places, the token operand won't match so
1820 // all will work out.
1828 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1829 char &ElementKind) {
1830 assert(isValidVectorKind(Name));
1832 ElementKind = Name.lower()[Name.size() - 1];
1835 if (Name.size() == 2)
1838 // Parse the lane count
1839 Name = Name.drop_front();
1840 while (isdigit(Name.front())) {
1841 NumElements = 10 * NumElements + (Name.front() - '0');
1842 Name = Name.drop_front();
1846 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1848 StartLoc = getLoc();
1849 RegNo = tryParseRegister();
1850 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1851 return (RegNo == (unsigned)-1);
1854 // Matches a register name or register alias previously defined by '.req'
1855 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
1857 unsigned RegNum = isVector ? matchVectorRegName(Name)
1858 : MatchRegisterName(Name);
1861 // Check for aliases registered via .req. Canonicalize to lower case.
1862 // That's more consistent since register names are case insensitive, and
1863 // it's how the original entry was passed in from MC/MCParser/AsmParser.
1864 auto Entry = RegisterReqs.find(Name.lower());
1865 if (Entry == RegisterReqs.end())
1867 // set RegNum if the match is the right kind of register
1868 if (isVector == Entry->getValue().first)
1869 RegNum = Entry->getValue().second;
1874 /// tryParseRegister - Try to parse a register name. The token must be an
1875 /// Identifier when called, and if it is a register name the token is eaten and
1876 /// the register is added to the operand list.
1877 int AArch64AsmParser::tryParseRegister() {
1878 const AsmToken &Tok = Parser.getTok();
1879 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1881 std::string lowerCase = Tok.getString().lower();
1882 unsigned RegNum = matchRegisterNameAlias(lowerCase, false);
1883 // Also handle a few aliases of registers.
1885 RegNum = StringSwitch<unsigned>(lowerCase)
1886 .Case("fp", AArch64::FP)
1887 .Case("lr", AArch64::LR)
1888 .Case("x31", AArch64::XZR)
1889 .Case("w31", AArch64::WZR)
1895 Parser.Lex(); // Eat identifier token.
1899 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1900 /// kind specifier. If it is a register specifier, eat the token and return it.
1901 int AArch64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1902 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1903 TokError("vector register expected");
1907 StringRef Name = Parser.getTok().getString();
1908 // If there is a kind specifier, it's separated from the register name by
1910 size_t Start = 0, Next = Name.find('.');
1911 StringRef Head = Name.slice(Start, Next);
1912 unsigned RegNum = matchRegisterNameAlias(Head, true);
1915 if (Next != StringRef::npos) {
1916 Kind = Name.slice(Next, StringRef::npos);
1917 if (!isValidVectorKind(Kind)) {
1918 TokError("invalid vector kind qualifier");
1922 Parser.Lex(); // Eat the register token.
1927 TokError("vector register expected");
1931 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1932 AArch64AsmParser::OperandMatchResultTy
1933 AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1936 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1937 Error(S, "Expected cN operand where 0 <= N <= 15");
1938 return MatchOperand_ParseFail;
1941 StringRef Tok = Parser.getTok().getIdentifier();
1942 if (Tok[0] != 'c' && Tok[0] != 'C') {
1943 Error(S, "Expected cN operand where 0 <= N <= 15");
1944 return MatchOperand_ParseFail;
1948 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1949 if (BadNum || CRNum > 15) {
1950 Error(S, "Expected cN operand where 0 <= N <= 15");
1951 return MatchOperand_ParseFail;
1954 Parser.Lex(); // Eat identifier token.
1956 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1957 return MatchOperand_Success;
1960 /// tryParsePrefetch - Try to parse a prefetch operand.
1961 AArch64AsmParser::OperandMatchResultTy
1962 AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1964 const AsmToken &Tok = Parser.getTok();
1965 // Either an identifier for named values or a 5-bit immediate.
1966 bool Hash = Tok.is(AsmToken::Hash);
1967 if (Hash || Tok.is(AsmToken::Integer)) {
1969 Parser.Lex(); // Eat hash token.
1970 const MCExpr *ImmVal;
1971 if (getParser().parseExpression(ImmVal))
1972 return MatchOperand_ParseFail;
1974 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1976 TokError("immediate value expected for prefetch operand");
1977 return MatchOperand_ParseFail;
1979 unsigned prfop = MCE->getValue();
1981 TokError("prefetch operand out of range, [0,31] expected");
1982 return MatchOperand_ParseFail;
1985 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
1986 return MatchOperand_Success;
1989 if (Tok.isNot(AsmToken::Identifier)) {
1990 TokError("pre-fetch hint expected");
1991 return MatchOperand_ParseFail;
1995 unsigned prfop = AArch64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1997 TokError("pre-fetch hint expected");
1998 return MatchOperand_ParseFail;
2001 Parser.Lex(); // Eat identifier token.
2002 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, S, getContext()));
2003 return MatchOperand_Success;
2006 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2008 AArch64AsmParser::OperandMatchResultTy
2009 AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2013 if (Parser.getTok().is(AsmToken::Hash)) {
2014 Parser.Lex(); // Eat hash token.
2017 if (parseSymbolicImmVal(Expr))
2018 return MatchOperand_ParseFail;
2020 AArch64MCExpr::VariantKind ELFRefKind;
2021 MCSymbolRefExpr::VariantKind DarwinRefKind;
2023 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2024 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2025 ELFRefKind == AArch64MCExpr::VK_INVALID) {
2026 // No modifier was specified at all; this is the syntax for an ELF basic
2027 // ADRP relocation (unfortunately).
2029 AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_ABS_PAGE, getContext());
2030 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2031 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2033 Error(S, "gotpage label reference not allowed an addend");
2034 return MatchOperand_ParseFail;
2035 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2036 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2037 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2038 ELFRefKind != AArch64MCExpr::VK_GOT_PAGE &&
2039 ELFRefKind != AArch64MCExpr::VK_GOTTPREL_PAGE &&
2040 ELFRefKind != AArch64MCExpr::VK_TLSDESC_PAGE) {
2041 // The operand must be an @page or @gotpage qualified symbolref.
2042 Error(S, "page or gotpage label reference expected");
2043 return MatchOperand_ParseFail;
2047 // We have either a label reference possibly with addend or an immediate. The
2048 // addend is a raw value here. The linker will adjust it to only reference the
2050 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2051 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2053 return MatchOperand_Success;
2056 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2058 AArch64AsmParser::OperandMatchResultTy
2059 AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2063 if (Parser.getTok().is(AsmToken::Hash)) {
2064 Parser.Lex(); // Eat hash token.
2067 if (getParser().parseExpression(Expr))
2068 return MatchOperand_ParseFail;
2070 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2071 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2073 return MatchOperand_Success;
2076 /// tryParseFPImm - A floating point immediate expression operand.
2077 AArch64AsmParser::OperandMatchResultTy
2078 AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
2082 if (Parser.getTok().is(AsmToken::Hash)) {
2083 Parser.Lex(); // Eat '#'
2087 // Handle negation, as that still comes through as a separate token.
2088 bool isNegative = false;
2089 if (Parser.getTok().is(AsmToken::Minus)) {
2093 const AsmToken &Tok = Parser.getTok();
2094 if (Tok.is(AsmToken::Real)) {
2095 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2096 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2097 // If we had a '-' in front, toggle the sign bit.
2098 IntVal ^= (uint64_t)isNegative << 63;
2099 int Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2100 Parser.Lex(); // Eat the token.
2101 // Check for out of range values. As an exception, we let Zero through,
2102 // as we handle that special case in post-processing before matching in
2103 // order to use the zero register for it.
2104 if (Val == -1 && !RealVal.isZero()) {
2105 TokError("expected compatible register or floating-point constant");
2106 return MatchOperand_ParseFail;
2108 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2109 return MatchOperand_Success;
2111 if (Tok.is(AsmToken::Integer)) {
2113 if (!isNegative && Tok.getString().startswith("0x")) {
2114 Val = Tok.getIntVal();
2115 if (Val > 255 || Val < 0) {
2116 TokError("encoded floating point value out of range");
2117 return MatchOperand_ParseFail;
2120 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2121 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2122 // If we had a '-' in front, toggle the sign bit.
2123 IntVal ^= (uint64_t)isNegative << 63;
2124 Val = AArch64_AM::getFP64Imm(APInt(64, IntVal));
2126 Parser.Lex(); // Eat the token.
2127 Operands.push_back(AArch64Operand::CreateFPImm(Val, S, getContext()));
2128 return MatchOperand_Success;
2132 return MatchOperand_NoMatch;
2134 TokError("invalid floating point immediate");
2135 return MatchOperand_ParseFail;
2138 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2139 AArch64AsmParser::OperandMatchResultTy
2140 AArch64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2143 if (Parser.getTok().is(AsmToken::Hash))
2144 Parser.Lex(); // Eat '#'
2145 else if (Parser.getTok().isNot(AsmToken::Integer))
2146 // Operand should start from # or should be integer, emit error otherwise.
2147 return MatchOperand_NoMatch;
2150 if (parseSymbolicImmVal(Imm))
2151 return MatchOperand_ParseFail;
2152 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2153 uint64_t ShiftAmount = 0;
2154 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2156 int64_t Val = MCE->getValue();
2157 if (Val > 0xfff && (Val & 0xfff) == 0) {
2158 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2162 SMLoc E = Parser.getTok().getLoc();
2163 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2165 return MatchOperand_Success;
2171 // The optional operand must be "lsl #N" where N is non-negative.
2172 if (!Parser.getTok().is(AsmToken::Identifier) ||
2173 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2174 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2175 return MatchOperand_ParseFail;
2181 if (Parser.getTok().is(AsmToken::Hash)) {
2185 if (Parser.getTok().isNot(AsmToken::Integer)) {
2186 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2187 return MatchOperand_ParseFail;
2190 int64_t ShiftAmount = Parser.getTok().getIntVal();
2192 if (ShiftAmount < 0) {
2193 Error(Parser.getTok().getLoc(), "positive shift amount required");
2194 return MatchOperand_ParseFail;
2196 Parser.Lex(); // Eat the number
2198 SMLoc E = Parser.getTok().getLoc();
2199 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2200 S, E, getContext()));
2201 return MatchOperand_Success;
2204 /// parseCondCodeString - Parse a Condition Code string.
2205 AArch64CC::CondCode AArch64AsmParser::parseCondCodeString(StringRef Cond) {
2206 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
2207 .Case("eq", AArch64CC::EQ)
2208 .Case("ne", AArch64CC::NE)
2209 .Case("cs", AArch64CC::HS)
2210 .Case("hs", AArch64CC::HS)
2211 .Case("cc", AArch64CC::LO)
2212 .Case("lo", AArch64CC::LO)
2213 .Case("mi", AArch64CC::MI)
2214 .Case("pl", AArch64CC::PL)
2215 .Case("vs", AArch64CC::VS)
2216 .Case("vc", AArch64CC::VC)
2217 .Case("hi", AArch64CC::HI)
2218 .Case("ls", AArch64CC::LS)
2219 .Case("ge", AArch64CC::GE)
2220 .Case("lt", AArch64CC::LT)
2221 .Case("gt", AArch64CC::GT)
2222 .Case("le", AArch64CC::LE)
2223 .Case("al", AArch64CC::AL)
2224 .Case("nv", AArch64CC::NV)
2225 .Default(AArch64CC::Invalid);
2229 /// parseCondCode - Parse a Condition Code operand.
2230 bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
2231 bool invertCondCode) {
2233 const AsmToken &Tok = Parser.getTok();
2234 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2236 StringRef Cond = Tok.getString();
2237 AArch64CC::CondCode CC = parseCondCodeString(Cond);
2238 if (CC == AArch64CC::Invalid)
2239 return TokError("invalid condition code");
2240 Parser.Lex(); // Eat identifier token.
2242 if (invertCondCode) {
2243 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
2244 return TokError("condition codes AL and NV are invalid for this instruction");
2245 CC = AArch64CC::getInvertedCondCode(AArch64CC::CondCode(CC));
2249 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2253 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2254 /// them if present.
2255 AArch64AsmParser::OperandMatchResultTy
2256 AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2257 const AsmToken &Tok = Parser.getTok();
2258 std::string LowerID = Tok.getString().lower();
2259 AArch64_AM::ShiftExtendType ShOp =
2260 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
2261 .Case("lsl", AArch64_AM::LSL)
2262 .Case("lsr", AArch64_AM::LSR)
2263 .Case("asr", AArch64_AM::ASR)
2264 .Case("ror", AArch64_AM::ROR)
2265 .Case("msl", AArch64_AM::MSL)
2266 .Case("uxtb", AArch64_AM::UXTB)
2267 .Case("uxth", AArch64_AM::UXTH)
2268 .Case("uxtw", AArch64_AM::UXTW)
2269 .Case("uxtx", AArch64_AM::UXTX)
2270 .Case("sxtb", AArch64_AM::SXTB)
2271 .Case("sxth", AArch64_AM::SXTH)
2272 .Case("sxtw", AArch64_AM::SXTW)
2273 .Case("sxtx", AArch64_AM::SXTX)
2274 .Default(AArch64_AM::InvalidShiftExtend);
2276 if (ShOp == AArch64_AM::InvalidShiftExtend)
2277 return MatchOperand_NoMatch;
2279 SMLoc S = Tok.getLoc();
2282 bool Hash = getLexer().is(AsmToken::Hash);
2283 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2284 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
2285 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
2286 ShOp == AArch64_AM::MSL) {
2287 // We expect a number here.
2288 TokError("expected #imm after shift specifier");
2289 return MatchOperand_ParseFail;
2292 // "extend" type operatoins don't need an immediate, #0 is implicit.
2293 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2295 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2296 return MatchOperand_Success;
2300 Parser.Lex(); // Eat the '#'.
2302 // Make sure we do actually have a number
2303 if (!Parser.getTok().is(AsmToken::Integer)) {
2304 Error(Parser.getTok().getLoc(),
2305 "expected integer shift amount");
2306 return MatchOperand_ParseFail;
2309 const MCExpr *ImmVal;
2310 if (getParser().parseExpression(ImmVal))
2311 return MatchOperand_ParseFail;
2313 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2315 TokError("expected #imm after shift specifier");
2316 return MatchOperand_ParseFail;
2319 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2320 Operands.push_back(AArch64Operand::CreateShiftExtend(
2321 ShOp, MCE->getValue(), true, S, E, getContext()));
2322 return MatchOperand_Success;
2325 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2326 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2327 bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2328 OperandVector &Operands) {
2329 if (Name.find('.') != StringRef::npos)
2330 return TokError("invalid operand");
2334 AArch64Operand::CreateToken("sys", false, NameLoc, getContext()));
2336 const AsmToken &Tok = Parser.getTok();
2337 StringRef Op = Tok.getString();
2338 SMLoc S = Tok.getLoc();
2340 const MCExpr *Expr = nullptr;
2342 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2344 Expr = MCConstantExpr::Create(op1, getContext()); \
2345 Operands.push_back( \
2346 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2347 Operands.push_back( \
2348 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2349 Operands.push_back( \
2350 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2351 Expr = MCConstantExpr::Create(op2, getContext()); \
2352 Operands.push_back( \
2353 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2356 if (Mnemonic == "ic") {
2357 if (!Op.compare_lower("ialluis")) {
2358 // SYS #0, C7, C1, #0
2359 SYS_ALIAS(0, 7, 1, 0);
2360 } else if (!Op.compare_lower("iallu")) {
2361 // SYS #0, C7, C5, #0
2362 SYS_ALIAS(0, 7, 5, 0);
2363 } else if (!Op.compare_lower("ivau")) {
2364 // SYS #3, C7, C5, #1
2365 SYS_ALIAS(3, 7, 5, 1);
2367 return TokError("invalid operand for IC instruction");
2369 } else if (Mnemonic == "dc") {
2370 if (!Op.compare_lower("zva")) {
2371 // SYS #3, C7, C4, #1
2372 SYS_ALIAS(3, 7, 4, 1);
2373 } else if (!Op.compare_lower("ivac")) {
2374 // SYS #3, C7, C6, #1
2375 SYS_ALIAS(0, 7, 6, 1);
2376 } else if (!Op.compare_lower("isw")) {
2377 // SYS #0, C7, C6, #2
2378 SYS_ALIAS(0, 7, 6, 2);
2379 } else if (!Op.compare_lower("cvac")) {
2380 // SYS #3, C7, C10, #1
2381 SYS_ALIAS(3, 7, 10, 1);
2382 } else if (!Op.compare_lower("csw")) {
2383 // SYS #0, C7, C10, #2
2384 SYS_ALIAS(0, 7, 10, 2);
2385 } else if (!Op.compare_lower("cvau")) {
2386 // SYS #3, C7, C11, #1
2387 SYS_ALIAS(3, 7, 11, 1);
2388 } else if (!Op.compare_lower("civac")) {
2389 // SYS #3, C7, C14, #1
2390 SYS_ALIAS(3, 7, 14, 1);
2391 } else if (!Op.compare_lower("cisw")) {
2392 // SYS #0, C7, C14, #2
2393 SYS_ALIAS(0, 7, 14, 2);
2395 return TokError("invalid operand for DC instruction");
2397 } else if (Mnemonic == "at") {
2398 if (!Op.compare_lower("s1e1r")) {
2399 // SYS #0, C7, C8, #0
2400 SYS_ALIAS(0, 7, 8, 0);
2401 } else if (!Op.compare_lower("s1e2r")) {
2402 // SYS #4, C7, C8, #0
2403 SYS_ALIAS(4, 7, 8, 0);
2404 } else if (!Op.compare_lower("s1e3r")) {
2405 // SYS #6, C7, C8, #0
2406 SYS_ALIAS(6, 7, 8, 0);
2407 } else if (!Op.compare_lower("s1e1w")) {
2408 // SYS #0, C7, C8, #1
2409 SYS_ALIAS(0, 7, 8, 1);
2410 } else if (!Op.compare_lower("s1e2w")) {
2411 // SYS #4, C7, C8, #1
2412 SYS_ALIAS(4, 7, 8, 1);
2413 } else if (!Op.compare_lower("s1e3w")) {
2414 // SYS #6, C7, C8, #1
2415 SYS_ALIAS(6, 7, 8, 1);
2416 } else if (!Op.compare_lower("s1e0r")) {
2417 // SYS #0, C7, C8, #3
2418 SYS_ALIAS(0, 7, 8, 2);
2419 } else if (!Op.compare_lower("s1e0w")) {
2420 // SYS #0, C7, C8, #3
2421 SYS_ALIAS(0, 7, 8, 3);
2422 } else if (!Op.compare_lower("s12e1r")) {
2423 // SYS #4, C7, C8, #4
2424 SYS_ALIAS(4, 7, 8, 4);
2425 } else if (!Op.compare_lower("s12e1w")) {
2426 // SYS #4, C7, C8, #5
2427 SYS_ALIAS(4, 7, 8, 5);
2428 } else if (!Op.compare_lower("s12e0r")) {
2429 // SYS #4, C7, C8, #6
2430 SYS_ALIAS(4, 7, 8, 6);
2431 } else if (!Op.compare_lower("s12e0w")) {
2432 // SYS #4, C7, C8, #7
2433 SYS_ALIAS(4, 7, 8, 7);
2435 return TokError("invalid operand for AT instruction");
2437 } else if (Mnemonic == "tlbi") {
2438 if (!Op.compare_lower("vmalle1is")) {
2439 // SYS #0, C8, C3, #0
2440 SYS_ALIAS(0, 8, 3, 0);
2441 } else if (!Op.compare_lower("alle2is")) {
2442 // SYS #4, C8, C3, #0
2443 SYS_ALIAS(4, 8, 3, 0);
2444 } else if (!Op.compare_lower("alle3is")) {
2445 // SYS #6, C8, C3, #0
2446 SYS_ALIAS(6, 8, 3, 0);
2447 } else if (!Op.compare_lower("vae1is")) {
2448 // SYS #0, C8, C3, #1
2449 SYS_ALIAS(0, 8, 3, 1);
2450 } else if (!Op.compare_lower("vae2is")) {
2451 // SYS #4, C8, C3, #1
2452 SYS_ALIAS(4, 8, 3, 1);
2453 } else if (!Op.compare_lower("vae3is")) {
2454 // SYS #6, C8, C3, #1
2455 SYS_ALIAS(6, 8, 3, 1);
2456 } else if (!Op.compare_lower("aside1is")) {
2457 // SYS #0, C8, C3, #2
2458 SYS_ALIAS(0, 8, 3, 2);
2459 } else if (!Op.compare_lower("vaae1is")) {
2460 // SYS #0, C8, C3, #3
2461 SYS_ALIAS(0, 8, 3, 3);
2462 } else if (!Op.compare_lower("alle1is")) {
2463 // SYS #4, C8, C3, #4
2464 SYS_ALIAS(4, 8, 3, 4);
2465 } else if (!Op.compare_lower("vale1is")) {
2466 // SYS #0, C8, C3, #5
2467 SYS_ALIAS(0, 8, 3, 5);
2468 } else if (!Op.compare_lower("vaale1is")) {
2469 // SYS #0, C8, C3, #7
2470 SYS_ALIAS(0, 8, 3, 7);
2471 } else if (!Op.compare_lower("vmalle1")) {
2472 // SYS #0, C8, C7, #0
2473 SYS_ALIAS(0, 8, 7, 0);
2474 } else if (!Op.compare_lower("alle2")) {
2475 // SYS #4, C8, C7, #0
2476 SYS_ALIAS(4, 8, 7, 0);
2477 } else if (!Op.compare_lower("vale2is")) {
2478 // SYS #4, C8, C3, #5
2479 SYS_ALIAS(4, 8, 3, 5);
2480 } else if (!Op.compare_lower("vale3is")) {
2481 // SYS #6, C8, C3, #5
2482 SYS_ALIAS(6, 8, 3, 5);
2483 } else if (!Op.compare_lower("alle3")) {
2484 // SYS #6, C8, C7, #0
2485 SYS_ALIAS(6, 8, 7, 0);
2486 } else if (!Op.compare_lower("vae1")) {
2487 // SYS #0, C8, C7, #1
2488 SYS_ALIAS(0, 8, 7, 1);
2489 } else if (!Op.compare_lower("vae2")) {
2490 // SYS #4, C8, C7, #1
2491 SYS_ALIAS(4, 8, 7, 1);
2492 } else if (!Op.compare_lower("vae3")) {
2493 // SYS #6, C8, C7, #1
2494 SYS_ALIAS(6, 8, 7, 1);
2495 } else if (!Op.compare_lower("aside1")) {
2496 // SYS #0, C8, C7, #2
2497 SYS_ALIAS(0, 8, 7, 2);
2498 } else if (!Op.compare_lower("vaae1")) {
2499 // SYS #0, C8, C7, #3
2500 SYS_ALIAS(0, 8, 7, 3);
2501 } else if (!Op.compare_lower("alle1")) {
2502 // SYS #4, C8, C7, #4
2503 SYS_ALIAS(4, 8, 7, 4);
2504 } else if (!Op.compare_lower("vale1")) {
2505 // SYS #0, C8, C7, #5
2506 SYS_ALIAS(0, 8, 7, 5);
2507 } else if (!Op.compare_lower("vale2")) {
2508 // SYS #4, C8, C7, #5
2509 SYS_ALIAS(4, 8, 7, 5);
2510 } else if (!Op.compare_lower("vale3")) {
2511 // SYS #6, C8, C7, #5
2512 SYS_ALIAS(6, 8, 7, 5);
2513 } else if (!Op.compare_lower("vaale1")) {
2514 // SYS #0, C8, C7, #7
2515 SYS_ALIAS(0, 8, 7, 7);
2516 } else if (!Op.compare_lower("ipas2e1")) {
2517 // SYS #4, C8, C4, #1
2518 SYS_ALIAS(4, 8, 4, 1);
2519 } else if (!Op.compare_lower("ipas2le1")) {
2520 // SYS #4, C8, C4, #5
2521 SYS_ALIAS(4, 8, 4, 5);
2522 } else if (!Op.compare_lower("ipas2e1is")) {
2523 // SYS #4, C8, C4, #1
2524 SYS_ALIAS(4, 8, 0, 1);
2525 } else if (!Op.compare_lower("ipas2le1is")) {
2526 // SYS #4, C8, C4, #5
2527 SYS_ALIAS(4, 8, 0, 5);
2528 } else if (!Op.compare_lower("vmalls12e1")) {
2529 // SYS #4, C8, C7, #6
2530 SYS_ALIAS(4, 8, 7, 6);
2531 } else if (!Op.compare_lower("vmalls12e1is")) {
2532 // SYS #4, C8, C3, #6
2533 SYS_ALIAS(4, 8, 3, 6);
2535 return TokError("invalid operand for TLBI instruction");
2541 Parser.Lex(); // Eat operand.
2543 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2544 bool HasRegister = false;
2546 // Check for the optional register operand.
2547 if (getLexer().is(AsmToken::Comma)) {
2548 Parser.Lex(); // Eat comma.
2550 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2551 return TokError("expected register operand");
2556 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2557 Parser.eatToEndOfStatement();
2558 return TokError("unexpected token in argument list");
2561 if (ExpectRegister && !HasRegister) {
2562 return TokError("specified " + Mnemonic + " op requires a register");
2564 else if (!ExpectRegister && HasRegister) {
2565 return TokError("specified " + Mnemonic + " op does not use a register");
2568 Parser.Lex(); // Consume the EndOfStatement
2572 AArch64AsmParser::OperandMatchResultTy
2573 AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2574 const AsmToken &Tok = Parser.getTok();
2576 // Can be either a #imm style literal or an option name
2577 bool Hash = Tok.is(AsmToken::Hash);
2578 if (Hash || Tok.is(AsmToken::Integer)) {
2579 // Immediate operand.
2581 Parser.Lex(); // Eat the '#'
2582 const MCExpr *ImmVal;
2583 SMLoc ExprLoc = getLoc();
2584 if (getParser().parseExpression(ImmVal))
2585 return MatchOperand_ParseFail;
2586 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2588 Error(ExprLoc, "immediate value expected for barrier operand");
2589 return MatchOperand_ParseFail;
2591 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2592 Error(ExprLoc, "barrier operand out of range");
2593 return MatchOperand_ParseFail;
2596 AArch64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2597 return MatchOperand_Success;
2600 if (Tok.isNot(AsmToken::Identifier)) {
2601 TokError("invalid operand for instruction");
2602 return MatchOperand_ParseFail;
2606 unsigned Opt = AArch64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2608 TokError("invalid barrier option name");
2609 return MatchOperand_ParseFail;
2612 // The only valid named option for ISB is 'sy'
2613 if (Mnemonic == "isb" && Opt != AArch64DB::SY) {
2614 TokError("'sy' or #imm operand expected");
2615 return MatchOperand_ParseFail;
2619 AArch64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2620 Parser.Lex(); // Consume the option
2622 return MatchOperand_Success;
2625 AArch64AsmParser::OperandMatchResultTy
2626 AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
2627 const AsmToken &Tok = Parser.getTok();
2629 if (Tok.isNot(AsmToken::Identifier))
2630 return MatchOperand_NoMatch;
2632 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), getLoc(),
2633 STI.getFeatureBits(), getContext()));
2634 Parser.Lex(); // Eat identifier
2636 return MatchOperand_Success;
2639 /// tryParseVectorRegister - Parse a vector register operand.
2640 bool AArch64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2641 if (Parser.getTok().isNot(AsmToken::Identifier))
2645 // Check for a vector register specifier first.
2647 int64_t Reg = tryMatchVectorRegister(Kind, false);
2651 AArch64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2652 // If there was an explicit qualifier, that goes on as a literal text
2656 AArch64Operand::CreateToken(Kind, false, S, getContext()));
2658 // If there is an index specifier following the register, parse that too.
2659 if (Parser.getTok().is(AsmToken::LBrac)) {
2660 SMLoc SIdx = getLoc();
2661 Parser.Lex(); // Eat left bracket token.
2663 const MCExpr *ImmVal;
2664 if (getParser().parseExpression(ImmVal))
2666 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2668 TokError("immediate value expected for vector index");
2673 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2674 Error(E, "']' expected");
2678 Parser.Lex(); // Eat right bracket token.
2680 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2687 /// parseRegister - Parse a non-vector register operand.
2688 bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
2690 // Try for a vector register.
2691 if (!tryParseVectorRegister(Operands))
2694 // Try for a scalar register.
2695 int64_t Reg = tryParseRegister();
2699 AArch64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2701 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2702 // as a string token in the instruction itself.
2703 if (getLexer().getKind() == AsmToken::LBrac) {
2704 SMLoc LBracS = getLoc();
2706 const AsmToken &Tok = Parser.getTok();
2707 if (Tok.is(AsmToken::Integer)) {
2708 SMLoc IntS = getLoc();
2709 int64_t Val = Tok.getIntVal();
2712 if (getLexer().getKind() == AsmToken::RBrac) {
2713 SMLoc RBracS = getLoc();
2716 AArch64Operand::CreateToken("[", false, LBracS, getContext()));
2718 AArch64Operand::CreateToken("1", false, IntS, getContext()));
2720 AArch64Operand::CreateToken("]", false, RBracS, getContext()));
2730 bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2731 bool HasELFModifier = false;
2732 AArch64MCExpr::VariantKind RefKind;
2734 if (Parser.getTok().is(AsmToken::Colon)) {
2735 Parser.Lex(); // Eat ':"
2736 HasELFModifier = true;
2738 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2739 Error(Parser.getTok().getLoc(),
2740 "expect relocation specifier in operand after ':'");
2744 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2745 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
2746 .Case("lo12", AArch64MCExpr::VK_LO12)
2747 .Case("abs_g3", AArch64MCExpr::VK_ABS_G3)
2748 .Case("abs_g2", AArch64MCExpr::VK_ABS_G2)
2749 .Case("abs_g2_s", AArch64MCExpr::VK_ABS_G2_S)
2750 .Case("abs_g2_nc", AArch64MCExpr::VK_ABS_G2_NC)
2751 .Case("abs_g1", AArch64MCExpr::VK_ABS_G1)
2752 .Case("abs_g1_s", AArch64MCExpr::VK_ABS_G1_S)
2753 .Case("abs_g1_nc", AArch64MCExpr::VK_ABS_G1_NC)
2754 .Case("abs_g0", AArch64MCExpr::VK_ABS_G0)
2755 .Case("abs_g0_s", AArch64MCExpr::VK_ABS_G0_S)
2756 .Case("abs_g0_nc", AArch64MCExpr::VK_ABS_G0_NC)
2757 .Case("dtprel_g2", AArch64MCExpr::VK_DTPREL_G2)
2758 .Case("dtprel_g1", AArch64MCExpr::VK_DTPREL_G1)
2759 .Case("dtprel_g1_nc", AArch64MCExpr::VK_DTPREL_G1_NC)
2760 .Case("dtprel_g0", AArch64MCExpr::VK_DTPREL_G0)
2761 .Case("dtprel_g0_nc", AArch64MCExpr::VK_DTPREL_G0_NC)
2762 .Case("dtprel_hi12", AArch64MCExpr::VK_DTPREL_HI12)
2763 .Case("dtprel_lo12", AArch64MCExpr::VK_DTPREL_LO12)
2764 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_DTPREL_LO12_NC)
2765 .Case("tprel_g2", AArch64MCExpr::VK_TPREL_G2)
2766 .Case("tprel_g1", AArch64MCExpr::VK_TPREL_G1)
2767 .Case("tprel_g1_nc", AArch64MCExpr::VK_TPREL_G1_NC)
2768 .Case("tprel_g0", AArch64MCExpr::VK_TPREL_G0)
2769 .Case("tprel_g0_nc", AArch64MCExpr::VK_TPREL_G0_NC)
2770 .Case("tprel_hi12", AArch64MCExpr::VK_TPREL_HI12)
2771 .Case("tprel_lo12", AArch64MCExpr::VK_TPREL_LO12)
2772 .Case("tprel_lo12_nc", AArch64MCExpr::VK_TPREL_LO12_NC)
2773 .Case("tlsdesc_lo12", AArch64MCExpr::VK_TLSDESC_LO12)
2774 .Case("got", AArch64MCExpr::VK_GOT_PAGE)
2775 .Case("got_lo12", AArch64MCExpr::VK_GOT_LO12)
2776 .Case("gottprel", AArch64MCExpr::VK_GOTTPREL_PAGE)
2777 .Case("gottprel_lo12", AArch64MCExpr::VK_GOTTPREL_LO12_NC)
2778 .Case("gottprel_g1", AArch64MCExpr::VK_GOTTPREL_G1)
2779 .Case("gottprel_g0_nc", AArch64MCExpr::VK_GOTTPREL_G0_NC)
2780 .Case("tlsdesc", AArch64MCExpr::VK_TLSDESC_PAGE)
2781 .Default(AArch64MCExpr::VK_INVALID);
2783 if (RefKind == AArch64MCExpr::VK_INVALID) {
2784 Error(Parser.getTok().getLoc(),
2785 "expect relocation specifier in operand after ':'");
2789 Parser.Lex(); // Eat identifier
2791 if (Parser.getTok().isNot(AsmToken::Colon)) {
2792 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2795 Parser.Lex(); // Eat ':'
2798 if (getParser().parseExpression(ImmVal))
2802 ImmVal = AArch64MCExpr::Create(ImmVal, RefKind, getContext());
2807 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2808 bool AArch64AsmParser::parseVectorList(OperandVector &Operands) {
2809 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2811 Parser.Lex(); // Eat left bracket token.
2813 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2816 int64_t PrevReg = FirstReg;
2819 if (Parser.getTok().is(AsmToken::Minus)) {
2820 Parser.Lex(); // Eat the minus.
2822 SMLoc Loc = getLoc();
2824 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2827 // Any Kind suffices must match on all regs in the list.
2828 if (Kind != NextKind)
2829 return Error(Loc, "mismatched register size suffix");
2831 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2833 if (Space == 0 || Space > 3) {
2834 return Error(Loc, "invalid number of vectors");
2840 while (Parser.getTok().is(AsmToken::Comma)) {
2841 Parser.Lex(); // Eat the comma token.
2843 SMLoc Loc = getLoc();
2845 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2848 // Any Kind suffices must match on all regs in the list.
2849 if (Kind != NextKind)
2850 return Error(Loc, "mismatched register size suffix");
2852 // Registers must be incremental (with wraparound at 31)
2853 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2854 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2855 return Error(Loc, "registers must be sequential");
2862 if (Parser.getTok().isNot(AsmToken::RCurly))
2863 return Error(getLoc(), "'}' expected");
2864 Parser.Lex(); // Eat the '}' token.
2867 return Error(S, "invalid number of vectors");
2869 unsigned NumElements = 0;
2870 char ElementKind = 0;
2872 parseValidVectorKind(Kind, NumElements, ElementKind);
2874 Operands.push_back(AArch64Operand::CreateVectorList(
2875 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2877 // If there is an index specifier following the list, parse that too.
2878 if (Parser.getTok().is(AsmToken::LBrac)) {
2879 SMLoc SIdx = getLoc();
2880 Parser.Lex(); // Eat left bracket token.
2882 const MCExpr *ImmVal;
2883 if (getParser().parseExpression(ImmVal))
2885 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2887 TokError("immediate value expected for vector index");
2892 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2893 Error(E, "']' expected");
2897 Parser.Lex(); // Eat right bracket token.
2899 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
2905 AArch64AsmParser::OperandMatchResultTy
2906 AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2907 const AsmToken &Tok = Parser.getTok();
2908 if (!Tok.is(AsmToken::Identifier))
2909 return MatchOperand_NoMatch;
2911 unsigned RegNum = matchRegisterNameAlias(Tok.getString().lower(), false);
2913 MCContext &Ctx = getContext();
2914 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2915 if (!RI->getRegClass(AArch64::GPR64spRegClassID).contains(RegNum))
2916 return MatchOperand_NoMatch;
2919 Parser.Lex(); // Eat register
2921 if (Parser.getTok().isNot(AsmToken::Comma)) {
2923 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2924 return MatchOperand_Success;
2926 Parser.Lex(); // Eat comma.
2928 if (Parser.getTok().is(AsmToken::Hash))
2929 Parser.Lex(); // Eat hash
2931 if (Parser.getTok().isNot(AsmToken::Integer)) {
2932 Error(getLoc(), "index must be absent or #0");
2933 return MatchOperand_ParseFail;
2936 const MCExpr *ImmVal;
2937 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2938 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2939 Error(getLoc(), "index must be absent or #0");
2940 return MatchOperand_ParseFail;
2944 AArch64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2945 return MatchOperand_Success;
2948 /// parseOperand - Parse a arm instruction operand. For now this parses the
2949 /// operand regardless of the mnemonic.
2950 bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2951 bool invertCondCode) {
2952 // Check if the current operand has a custom associated parser, if so, try to
2953 // custom parse the operand, or fallback to the general approach.
2954 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2955 if (ResTy == MatchOperand_Success)
2957 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2958 // there was a match, but an error occurred, in which case, just return that
2959 // the operand parsing failed.
2960 if (ResTy == MatchOperand_ParseFail)
2963 // Nothing custom, so do general case parsing.
2965 switch (getLexer().getKind()) {
2969 if (parseSymbolicImmVal(Expr))
2970 return Error(S, "invalid operand");
2972 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2973 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2976 case AsmToken::LBrac: {
2977 SMLoc Loc = Parser.getTok().getLoc();
2978 Operands.push_back(AArch64Operand::CreateToken("[", false, Loc,
2980 Parser.Lex(); // Eat '['
2982 // There's no comma after a '[', so we can parse the next operand
2984 return parseOperand(Operands, false, false);
2986 case AsmToken::LCurly:
2987 return parseVectorList(Operands);
2988 case AsmToken::Identifier: {
2989 // If we're expecting a Condition Code operand, then just parse that.
2991 return parseCondCode(Operands, invertCondCode);
2993 // If it's a register name, parse it.
2994 if (!parseRegister(Operands))
2997 // This could be an optional "shift" or "extend" operand.
2998 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2999 // We can only continue if no tokens were eaten.
3000 if (GotShift != MatchOperand_NoMatch)
3003 // This was not a register so parse other operands that start with an
3004 // identifier (like labels) as expressions and create them as immediates.
3005 const MCExpr *IdVal;
3007 if (getParser().parseExpression(IdVal))
3010 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3011 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3014 case AsmToken::Integer:
3015 case AsmToken::Real:
3016 case AsmToken::Hash: {
3017 // #42 -> immediate.
3019 if (getLexer().is(AsmToken::Hash))
3022 // Parse a negative sign
3023 bool isNegative = false;
3024 if (Parser.getTok().is(AsmToken::Minus)) {
3026 // We need to consume this token only when we have a Real, otherwise
3027 // we let parseSymbolicImmVal take care of it
3028 if (Parser.getLexer().peekTok().is(AsmToken::Real))
3032 // The only Real that should come through here is a literal #0.0 for
3033 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3034 // so convert the value.
3035 const AsmToken &Tok = Parser.getTok();
3036 if (Tok.is(AsmToken::Real)) {
3037 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3038 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3039 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3040 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3041 Mnemonic != "fcmlt")
3042 return TokError("unexpected floating point literal");
3043 else if (IntVal != 0 || isNegative)
3044 return TokError("expected floating-point constant #0.0");
3045 Parser.Lex(); // Eat the token.
3048 AArch64Operand::CreateToken("#0", false, S, getContext()));
3050 AArch64Operand::CreateToken(".0", false, S, getContext()));
3054 const MCExpr *ImmVal;
3055 if (parseSymbolicImmVal(ImmVal))
3058 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3059 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3062 case AsmToken::Equal: {
3063 SMLoc Loc = Parser.getTok().getLoc();
3064 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
3065 return Error(Loc, "unexpected token in operand");
3066 Parser.Lex(); // Eat '='
3067 const MCExpr *SubExprVal;
3068 if (getParser().parseExpression(SubExprVal))
3071 if (Operands.size() < 2 ||
3072 !static_cast<AArch64Operand &>(*Operands[1]).isReg())
3076 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3077 Operands[1]->getReg());
3079 MCContext& Ctx = getContext();
3080 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
3081 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
3082 if (isa<MCConstantExpr>(SubExprVal)) {
3083 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3084 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3085 while(Imm > 0xFFFF && countTrailingZeros(Imm) >= 16) {
3089 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3090 Operands[0] = AArch64Operand::CreateToken("movz", false, Loc, Ctx);
3091 Operands.push_back(AArch64Operand::CreateImm(
3092 MCConstantExpr::Create(Imm, Ctx), S, E, Ctx));
3094 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
3095 ShiftAmt, true, S, E, Ctx));
3098 APInt Simm = APInt(64, Imm << ShiftAmt);
3099 // check if the immediate is an unsigned or signed 32-bit int for W regs
3100 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
3101 return Error(Loc, "Immediate too large for register");
3103 // If it is a label or an imm that cannot fit in a movz, put it into CP.
3104 const MCExpr *CPLoc =
3105 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4);
3106 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3112 /// ParseInstruction - Parse an AArch64 instruction mnemonic followed by its
3114 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3115 StringRef Name, SMLoc NameLoc,
3116 OperandVector &Operands) {
3117 Name = StringSwitch<StringRef>(Name.lower())
3118 .Case("beq", "b.eq")
3119 .Case("bne", "b.ne")
3120 .Case("bhs", "b.hs")
3121 .Case("bcs", "b.cs")
3122 .Case("blo", "b.lo")
3123 .Case("bcc", "b.cc")
3124 .Case("bmi", "b.mi")
3125 .Case("bpl", "b.pl")
3126 .Case("bvs", "b.vs")
3127 .Case("bvc", "b.vc")
3128 .Case("bhi", "b.hi")
3129 .Case("bls", "b.ls")
3130 .Case("bge", "b.ge")
3131 .Case("blt", "b.lt")
3132 .Case("bgt", "b.gt")
3133 .Case("ble", "b.le")
3134 .Case("bal", "b.al")
3135 .Case("bnv", "b.nv")
3138 // First check for the AArch64-specific .req directive.
3139 if (Parser.getTok().is(AsmToken::Identifier) &&
3140 Parser.getTok().getIdentifier() == ".req") {
3141 parseDirectiveReq(Name, NameLoc);
3142 // We always return 'error' for this, as we're done with this
3143 // statement and don't need to match the 'instruction."
3147 // Create the leading tokens for the mnemonic, split by '.' characters.
3148 size_t Start = 0, Next = Name.find('.');
3149 StringRef Head = Name.slice(Start, Next);
3151 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3152 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3153 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3154 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3155 Parser.eatToEndOfStatement();
3160 AArch64Operand::CreateToken(Head, false, NameLoc, getContext()));
3163 // Handle condition codes for a branch mnemonic
3164 if (Head == "b" && Next != StringRef::npos) {
3166 Next = Name.find('.', Start + 1);
3167 Head = Name.slice(Start + 1, Next);
3169 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3170 (Head.data() - Name.data()));
3171 AArch64CC::CondCode CC = parseCondCodeString(Head);
3172 if (CC == AArch64CC::Invalid)
3173 return Error(SuffixLoc, "invalid condition code");
3175 AArch64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3177 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3180 // Add the remaining tokens in the mnemonic.
3181 while (Next != StringRef::npos) {
3183 Next = Name.find('.', Start + 1);
3184 Head = Name.slice(Start, Next);
3185 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3186 (Head.data() - Name.data()) + 1);
3188 AArch64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3191 // Conditional compare instructions have a Condition Code operand, which needs
3192 // to be parsed and an immediate operand created.
3193 bool condCodeFourthOperand =
3194 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3195 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3196 Head == "csinc" || Head == "csinv" || Head == "csneg");
3198 // These instructions are aliases to some of the conditional select
3199 // instructions. However, the condition code is inverted in the aliased
3202 // FIXME: Is this the correct way to handle these? Or should the parser
3203 // generate the aliased instructions directly?
3204 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3205 bool condCodeThirdOperand =
3206 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3208 // Read the remaining operands.
3209 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3210 // Read the first operand.
3211 if (parseOperand(Operands, false, false)) {
3212 Parser.eatToEndOfStatement();
3217 while (getLexer().is(AsmToken::Comma)) {
3218 Parser.Lex(); // Eat the comma.
3220 // Parse and remember the operand.
3221 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3222 (N == 3 && condCodeThirdOperand) ||
3223 (N == 2 && condCodeSecondOperand),
3224 condCodeSecondOperand || condCodeThirdOperand)) {
3225 Parser.eatToEndOfStatement();
3229 // After successfully parsing some operands there are two special cases to
3230 // consider (i.e. notional operands not separated by commas). Both are due
3231 // to memory specifiers:
3232 // + An RBrac will end an address for load/store/prefetch
3233 // + An '!' will indicate a pre-indexed operation.
3235 // It's someone else's responsibility to make sure these tokens are sane
3236 // in the given context!
3237 if (Parser.getTok().is(AsmToken::RBrac)) {
3238 SMLoc Loc = Parser.getTok().getLoc();
3239 Operands.push_back(AArch64Operand::CreateToken("]", false, Loc,
3244 if (Parser.getTok().is(AsmToken::Exclaim)) {
3245 SMLoc Loc = Parser.getTok().getLoc();
3246 Operands.push_back(AArch64Operand::CreateToken("!", false, Loc,
3255 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3256 SMLoc Loc = Parser.getTok().getLoc();
3257 Parser.eatToEndOfStatement();
3258 return Error(Loc, "unexpected token in argument list");
3261 Parser.Lex(); // Consume the EndOfStatement
3265 // FIXME: This entire function is a giant hack to provide us with decent
3266 // operand range validation/diagnostics until TableGen/MC can be extended
3267 // to support autogeneration of this kind of validation.
3268 bool AArch64AsmParser::validateInstruction(MCInst &Inst,
3269 SmallVectorImpl<SMLoc> &Loc) {
3270 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3271 // Check for indexed addressing modes w/ the base register being the
3272 // same as a destination/source register or pair load where
3273 // the Rt == Rt2. All of those are undefined behaviour.
3274 switch (Inst.getOpcode()) {
3275 case AArch64::LDPSWpre:
3276 case AArch64::LDPWpost:
3277 case AArch64::LDPWpre:
3278 case AArch64::LDPXpost:
3279 case AArch64::LDPXpre: {
3280 unsigned Rt = Inst.getOperand(1).getReg();
3281 unsigned Rt2 = Inst.getOperand(2).getReg();
3282 unsigned Rn = Inst.getOperand(3).getReg();
3283 if (RI->isSubRegisterEq(Rn, Rt))
3284 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3285 "is also a destination");
3286 if (RI->isSubRegisterEq(Rn, Rt2))
3287 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3288 "is also a destination");
3291 case AArch64::LDPDi:
3292 case AArch64::LDPQi:
3293 case AArch64::LDPSi:
3294 case AArch64::LDPSWi:
3295 case AArch64::LDPWi:
3296 case AArch64::LDPXi: {
3297 unsigned Rt = Inst.getOperand(0).getReg();
3298 unsigned Rt2 = Inst.getOperand(1).getReg();
3300 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3303 case AArch64::LDPDpost:
3304 case AArch64::LDPDpre:
3305 case AArch64::LDPQpost:
3306 case AArch64::LDPQpre:
3307 case AArch64::LDPSpost:
3308 case AArch64::LDPSpre:
3309 case AArch64::LDPSWpost: {
3310 unsigned Rt = Inst.getOperand(1).getReg();
3311 unsigned Rt2 = Inst.getOperand(2).getReg();
3313 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3316 case AArch64::STPDpost:
3317 case AArch64::STPDpre:
3318 case AArch64::STPQpost:
3319 case AArch64::STPQpre:
3320 case AArch64::STPSpost:
3321 case AArch64::STPSpre:
3322 case AArch64::STPWpost:
3323 case AArch64::STPWpre:
3324 case AArch64::STPXpost:
3325 case AArch64::STPXpre: {
3326 unsigned Rt = Inst.getOperand(1).getReg();
3327 unsigned Rt2 = Inst.getOperand(2).getReg();
3328 unsigned Rn = Inst.getOperand(3).getReg();
3329 if (RI->isSubRegisterEq(Rn, Rt))
3330 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3331 "is also a source");
3332 if (RI->isSubRegisterEq(Rn, Rt2))
3333 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3334 "is also a source");
3337 case AArch64::LDRBBpre:
3338 case AArch64::LDRBpre:
3339 case AArch64::LDRHHpre:
3340 case AArch64::LDRHpre:
3341 case AArch64::LDRSBWpre:
3342 case AArch64::LDRSBXpre:
3343 case AArch64::LDRSHWpre:
3344 case AArch64::LDRSHXpre:
3345 case AArch64::LDRSWpre:
3346 case AArch64::LDRWpre:
3347 case AArch64::LDRXpre:
3348 case AArch64::LDRBBpost:
3349 case AArch64::LDRBpost:
3350 case AArch64::LDRHHpost:
3351 case AArch64::LDRHpost:
3352 case AArch64::LDRSBWpost:
3353 case AArch64::LDRSBXpost:
3354 case AArch64::LDRSHWpost:
3355 case AArch64::LDRSHXpost:
3356 case AArch64::LDRSWpost:
3357 case AArch64::LDRWpost:
3358 case AArch64::LDRXpost: {
3359 unsigned Rt = Inst.getOperand(1).getReg();
3360 unsigned Rn = Inst.getOperand(2).getReg();
3361 if (RI->isSubRegisterEq(Rn, Rt))
3362 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3363 "is also a source");
3366 case AArch64::STRBBpost:
3367 case AArch64::STRBpost:
3368 case AArch64::STRHHpost:
3369 case AArch64::STRHpost:
3370 case AArch64::STRWpost:
3371 case AArch64::STRXpost:
3372 case AArch64::STRBBpre:
3373 case AArch64::STRBpre:
3374 case AArch64::STRHHpre:
3375 case AArch64::STRHpre:
3376 case AArch64::STRWpre:
3377 case AArch64::STRXpre: {
3378 unsigned Rt = Inst.getOperand(1).getReg();
3379 unsigned Rn = Inst.getOperand(2).getReg();
3380 if (RI->isSubRegisterEq(Rn, Rt))
3381 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3382 "is also a source");
3387 // Now check immediate ranges. Separate from the above as there is overlap
3388 // in the instructions being checked and this keeps the nested conditionals
3390 switch (Inst.getOpcode()) {
3391 case AArch64::ADDSWri:
3392 case AArch64::ADDSXri:
3393 case AArch64::ADDWri:
3394 case AArch64::ADDXri:
3395 case AArch64::SUBSWri:
3396 case AArch64::SUBSXri:
3397 case AArch64::SUBWri:
3398 case AArch64::SUBXri: {
3399 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3400 // some slight duplication here.
3401 if (Inst.getOperand(2).isExpr()) {
3402 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3403 AArch64MCExpr::VariantKind ELFRefKind;
3404 MCSymbolRefExpr::VariantKind DarwinRefKind;
3406 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3407 return Error(Loc[2], "invalid immediate expression");
3410 // Only allow these with ADDXri.
3411 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3412 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3413 Inst.getOpcode() == AArch64::ADDXri)
3416 // Only allow these with ADDXri/ADDWri
3417 if ((ELFRefKind == AArch64MCExpr::VK_LO12 ||
3418 ELFRefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
3419 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12 ||
3420 ELFRefKind == AArch64MCExpr::VK_DTPREL_LO12_NC ||
3421 ELFRefKind == AArch64MCExpr::VK_TPREL_HI12 ||
3422 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12 ||
3423 ELFRefKind == AArch64MCExpr::VK_TPREL_LO12_NC ||
3424 ELFRefKind == AArch64MCExpr::VK_TLSDESC_LO12) &&
3425 (Inst.getOpcode() == AArch64::ADDXri ||
3426 Inst.getOpcode() == AArch64::ADDWri))
3429 // Don't allow expressions in the immediate field otherwise
3430 return Error(Loc[2], "invalid immediate expression");
3439 bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3441 case Match_MissingFeature:
3443 "instruction requires a CPU feature not currently enabled");
3444 case Match_InvalidOperand:
3445 return Error(Loc, "invalid operand for instruction");
3446 case Match_InvalidSuffix:
3447 return Error(Loc, "invalid type suffix for instruction");
3448 case Match_InvalidCondCode:
3449 return Error(Loc, "expected AArch64 condition code");
3450 case Match_AddSubRegExtendSmall:
3452 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3453 case Match_AddSubRegExtendLarge:
3455 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3456 case Match_AddSubSecondSource:
3458 "expected compatible register, symbol or integer in range [0, 4095]");
3459 case Match_LogicalSecondSource:
3460 return Error(Loc, "expected compatible register or logical immediate");
3461 case Match_InvalidMovImm32Shift:
3462 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3463 case Match_InvalidMovImm64Shift:
3464 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3465 case Match_AddSubRegShift32:
3467 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3468 case Match_AddSubRegShift64:
3470 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3471 case Match_InvalidFPImm:
3473 "expected compatible register or floating-point constant");
3474 case Match_InvalidMemoryIndexedSImm9:
3475 return Error(Loc, "index must be an integer in range [-256, 255].");
3476 case Match_InvalidMemoryIndexed4SImm7:
3477 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3478 case Match_InvalidMemoryIndexed8SImm7:
3479 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3480 case Match_InvalidMemoryIndexed16SImm7:
3481 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3482 case Match_InvalidMemoryWExtend8:
3484 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3485 case Match_InvalidMemoryWExtend16:
3487 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3488 case Match_InvalidMemoryWExtend32:
3490 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3491 case Match_InvalidMemoryWExtend64:
3493 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3494 case Match_InvalidMemoryWExtend128:
3496 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3497 case Match_InvalidMemoryXExtend8:
3499 "expected 'lsl' or 'sxtx' with optional shift of #0");
3500 case Match_InvalidMemoryXExtend16:
3502 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3503 case Match_InvalidMemoryXExtend32:
3505 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3506 case Match_InvalidMemoryXExtend64:
3508 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3509 case Match_InvalidMemoryXExtend128:
3511 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3512 case Match_InvalidMemoryIndexed1:
3513 return Error(Loc, "index must be an integer in range [0, 4095].");
3514 case Match_InvalidMemoryIndexed2:
3515 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3516 case Match_InvalidMemoryIndexed4:
3517 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3518 case Match_InvalidMemoryIndexed8:
3519 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3520 case Match_InvalidMemoryIndexed16:
3521 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3522 case Match_InvalidImm0_7:
3523 return Error(Loc, "immediate must be an integer in range [0, 7].");
3524 case Match_InvalidImm0_15:
3525 return Error(Loc, "immediate must be an integer in range [0, 15].");
3526 case Match_InvalidImm0_31:
3527 return Error(Loc, "immediate must be an integer in range [0, 31].");
3528 case Match_InvalidImm0_63:
3529 return Error(Loc, "immediate must be an integer in range [0, 63].");
3530 case Match_InvalidImm0_127:
3531 return Error(Loc, "immediate must be an integer in range [0, 127].");
3532 case Match_InvalidImm0_65535:
3533 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3534 case Match_InvalidImm1_8:
3535 return Error(Loc, "immediate must be an integer in range [1, 8].");
3536 case Match_InvalidImm1_16:
3537 return Error(Loc, "immediate must be an integer in range [1, 16].");
3538 case Match_InvalidImm1_32:
3539 return Error(Loc, "immediate must be an integer in range [1, 32].");
3540 case Match_InvalidImm1_64:
3541 return Error(Loc, "immediate must be an integer in range [1, 64].");
3542 case Match_InvalidIndex1:
3543 return Error(Loc, "expected lane specifier '[1]'");
3544 case Match_InvalidIndexB:
3545 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3546 case Match_InvalidIndexH:
3547 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3548 case Match_InvalidIndexS:
3549 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3550 case Match_InvalidIndexD:
3551 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3552 case Match_InvalidLabel:
3553 return Error(Loc, "expected label or encodable integer pc offset");
3555 return Error(Loc, "expected readable system register");
3557 return Error(Loc, "expected writable system register or pstate");
3558 case Match_MnemonicFail:
3559 return Error(Loc, "unrecognized instruction mnemonic");
3561 llvm_unreachable("unexpected error code!");
3565 static const char *getSubtargetFeatureName(uint64_t Val);
3567 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3568 OperandVector &Operands,
3570 uint64_t &ErrorInfo,
3571 bool MatchingInlineAsm) {
3572 assert(!Operands.empty() && "Unexpect empty operand list!");
3573 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
3574 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
3576 StringRef Tok = Op.getToken();
3577 unsigned NumOperands = Operands.size();
3579 if (NumOperands == 4 && Tok == "lsl") {
3580 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
3581 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3582 if (Op2.isReg() && Op3.isImm()) {
3583 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3585 uint64_t Op3Val = Op3CE->getValue();
3586 uint64_t NewOp3Val = 0;
3587 uint64_t NewOp4Val = 0;
3588 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3590 NewOp3Val = (32 - Op3Val) & 0x1f;
3591 NewOp4Val = 31 - Op3Val;
3593 NewOp3Val = (64 - Op3Val) & 0x3f;
3594 NewOp4Val = 63 - Op3Val;
3597 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3598 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3600 Operands[0] = AArch64Operand::CreateToken(
3601 "ubfm", false, Op.getStartLoc(), getContext());
3602 Operands.push_back(AArch64Operand::CreateImm(
3603 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
3604 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
3605 Op3.getEndLoc(), getContext());
3608 } else if (NumOperands == 5) {
3609 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3610 // UBFIZ -> UBFM aliases.
3611 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3612 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3613 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3614 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3616 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3617 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3618 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3620 if (Op3CE && Op4CE) {
3621 uint64_t Op3Val = Op3CE->getValue();
3622 uint64_t Op4Val = Op4CE->getValue();
3624 uint64_t RegWidth = 0;
3625 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3631 if (Op3Val >= RegWidth)
3632 return Error(Op3.getStartLoc(),
3633 "expected integer in range [0, 31]");
3634 if (Op4Val < 1 || Op4Val > RegWidth)
3635 return Error(Op4.getStartLoc(),
3636 "expected integer in range [1, 32]");
3638 uint64_t NewOp3Val = 0;
3639 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
3641 NewOp3Val = (32 - Op3Val) & 0x1f;
3643 NewOp3Val = (64 - Op3Val) & 0x3f;
3645 uint64_t NewOp4Val = Op4Val - 1;
3647 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3648 return Error(Op4.getStartLoc(),
3649 "requested insert overflows register");
3651 const MCExpr *NewOp3 =
3652 MCConstantExpr::Create(NewOp3Val, getContext());
3653 const MCExpr *NewOp4 =
3654 MCConstantExpr::Create(NewOp4Val, getContext());
3655 Operands[3] = AArch64Operand::CreateImm(
3656 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
3657 Operands[4] = AArch64Operand::CreateImm(
3658 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3660 Operands[0] = AArch64Operand::CreateToken(
3661 "bfm", false, Op.getStartLoc(), getContext());
3662 else if (Tok == "sbfiz")
3663 Operands[0] = AArch64Operand::CreateToken(
3664 "sbfm", false, Op.getStartLoc(), getContext());
3665 else if (Tok == "ubfiz")
3666 Operands[0] = AArch64Operand::CreateToken(
3667 "ubfm", false, Op.getStartLoc(), getContext());
3669 llvm_unreachable("No valid mnemonic for alias?");
3673 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3674 // UBFX -> UBFM aliases.
3675 } else if (NumOperands == 5 &&
3676 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3677 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
3678 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
3679 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
3681 if (Op1.isReg() && Op3.isImm() && Op4.isImm()) {
3682 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
3683 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
3685 if (Op3CE && Op4CE) {
3686 uint64_t Op3Val = Op3CE->getValue();
3687 uint64_t Op4Val = Op4CE->getValue();
3689 uint64_t RegWidth = 0;
3690 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3696 if (Op3Val >= RegWidth)
3697 return Error(Op3.getStartLoc(),
3698 "expected integer in range [0, 31]");
3699 if (Op4Val < 1 || Op4Val > RegWidth)
3700 return Error(Op4.getStartLoc(),
3701 "expected integer in range [1, 32]");
3703 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3705 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3706 return Error(Op4.getStartLoc(),
3707 "requested extract overflows register");
3709 const MCExpr *NewOp4 =
3710 MCConstantExpr::Create(NewOp4Val, getContext());
3711 Operands[4] = AArch64Operand::CreateImm(
3712 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
3714 Operands[0] = AArch64Operand::CreateToken(
3715 "bfm", false, Op.getStartLoc(), getContext());
3716 else if (Tok == "sbfx")
3717 Operands[0] = AArch64Operand::CreateToken(
3718 "sbfm", false, Op.getStartLoc(), getContext());
3719 else if (Tok == "ubfx")
3720 Operands[0] = AArch64Operand::CreateToken(
3721 "ubfm", false, Op.getStartLoc(), getContext());
3723 llvm_unreachable("No valid mnemonic for alias?");
3728 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3729 // InstAlias can't quite handle this since the reg classes aren't
3731 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3732 // The source register can be Wn here, but the matcher expects a
3733 // GPR64. Twiddle it here if necessary.
3734 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3736 unsigned Reg = getXRegFromWReg(Op.getReg());
3737 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3738 Op.getEndLoc(), getContext());
3741 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3742 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3743 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3745 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3747 // The source register can be Wn here, but the matcher expects a
3748 // GPR64. Twiddle it here if necessary.
3749 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
3751 unsigned Reg = getXRegFromWReg(Op.getReg());
3752 Operands[2] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3753 Op.getEndLoc(), getContext());
3757 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3758 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3759 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3761 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3763 // The source register can be Wn here, but the matcher expects a
3764 // GPR32. Twiddle it here if necessary.
3765 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
3767 unsigned Reg = getWRegFromXReg(Op.getReg());
3768 Operands[1] = AArch64Operand::CreateReg(Reg, false, Op.getStartLoc(),
3769 Op.getEndLoc(), getContext());
3774 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3775 if (NumOperands == 3 && Tok == "fmov") {
3776 AArch64Operand &RegOp = static_cast<AArch64Operand &>(*Operands[1]);
3777 AArch64Operand &ImmOp = static_cast<AArch64Operand &>(*Operands[2]);
3778 if (RegOp.isReg() && ImmOp.isFPImm() && ImmOp.getFPImm() == (unsigned)-1) {
3780 AArch64MCRegisterClasses[AArch64::FPR32RegClassID].contains(
3784 Operands[2] = AArch64Operand::CreateReg(zreg, false, Op.getStartLoc(),
3785 Op.getEndLoc(), getContext());
3790 // First try to match against the secondary set of tables containing the
3791 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3792 unsigned MatchResult =
3793 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3795 // If that fails, try against the alternate table containing long-form NEON:
3796 // "fadd v0.2s, v1.2s, v2.2s"
3797 if (MatchResult != Match_Success)
3799 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3801 switch (MatchResult) {
3802 case Match_Success: {
3803 // Perform range checking and other semantic validations
3804 SmallVector<SMLoc, 8> OperandLocs;
3805 NumOperands = Operands.size();
3806 for (unsigned i = 1; i < NumOperands; ++i)
3807 OperandLocs.push_back(Operands[i]->getStartLoc());
3808 if (validateInstruction(Inst, OperandLocs))
3812 Out.EmitInstruction(Inst, STI);
3815 case Match_MissingFeature: {
3816 assert(ErrorInfo && "Unknown missing feature!");
3817 // Special case the error message for the very common case where only
3818 // a single subtarget feature is missing (neon, e.g.).
3819 std::string Msg = "instruction requires:";
3821 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3822 if (ErrorInfo & Mask) {
3824 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3828 return Error(IDLoc, Msg);
3830 case Match_MnemonicFail:
3831 return showMatchError(IDLoc, MatchResult);
3832 case Match_InvalidOperand: {
3833 SMLoc ErrorLoc = IDLoc;
3834 if (ErrorInfo != ~0ULL) {
3835 if (ErrorInfo >= Operands.size())
3836 return Error(IDLoc, "too few operands for instruction");
3838 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3839 if (ErrorLoc == SMLoc())
3842 // If the match failed on a suffix token operand, tweak the diagnostic
3844 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
3845 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
3846 MatchResult = Match_InvalidSuffix;
3848 return showMatchError(ErrorLoc, MatchResult);
3850 case Match_InvalidMemoryIndexed1:
3851 case Match_InvalidMemoryIndexed2:
3852 case Match_InvalidMemoryIndexed4:
3853 case Match_InvalidMemoryIndexed8:
3854 case Match_InvalidMemoryIndexed16:
3855 case Match_InvalidCondCode:
3856 case Match_AddSubRegExtendSmall:
3857 case Match_AddSubRegExtendLarge:
3858 case Match_AddSubSecondSource:
3859 case Match_LogicalSecondSource:
3860 case Match_AddSubRegShift32:
3861 case Match_AddSubRegShift64:
3862 case Match_InvalidMovImm32Shift:
3863 case Match_InvalidMovImm64Shift:
3864 case Match_InvalidFPImm:
3865 case Match_InvalidMemoryWExtend8:
3866 case Match_InvalidMemoryWExtend16:
3867 case Match_InvalidMemoryWExtend32:
3868 case Match_InvalidMemoryWExtend64:
3869 case Match_InvalidMemoryWExtend128:
3870 case Match_InvalidMemoryXExtend8:
3871 case Match_InvalidMemoryXExtend16:
3872 case Match_InvalidMemoryXExtend32:
3873 case Match_InvalidMemoryXExtend64:
3874 case Match_InvalidMemoryXExtend128:
3875 case Match_InvalidMemoryIndexed4SImm7:
3876 case Match_InvalidMemoryIndexed8SImm7:
3877 case Match_InvalidMemoryIndexed16SImm7:
3878 case Match_InvalidMemoryIndexedSImm9:
3879 case Match_InvalidImm0_7:
3880 case Match_InvalidImm0_15:
3881 case Match_InvalidImm0_31:
3882 case Match_InvalidImm0_63:
3883 case Match_InvalidImm0_127:
3884 case Match_InvalidImm0_65535:
3885 case Match_InvalidImm1_8:
3886 case Match_InvalidImm1_16:
3887 case Match_InvalidImm1_32:
3888 case Match_InvalidImm1_64:
3889 case Match_InvalidIndex1:
3890 case Match_InvalidIndexB:
3891 case Match_InvalidIndexH:
3892 case Match_InvalidIndexS:
3893 case Match_InvalidIndexD:
3894 case Match_InvalidLabel:
3897 if (ErrorInfo >= Operands.size())
3898 return Error(IDLoc, "too few operands for instruction");
3899 // Any time we get here, there's nothing fancy to do. Just get the
3900 // operand SMLoc and display the diagnostic.
3901 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
3902 if (ErrorLoc == SMLoc())
3904 return showMatchError(ErrorLoc, MatchResult);
3908 llvm_unreachable("Implement any new match types added!");
3912 /// ParseDirective parses the arm specific directives
3913 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
3914 StringRef IDVal = DirectiveID.getIdentifier();
3915 SMLoc Loc = DirectiveID.getLoc();
3916 if (IDVal == ".hword")
3917 return parseDirectiveWord(2, Loc);
3918 if (IDVal == ".word")
3919 return parseDirectiveWord(4, Loc);
3920 if (IDVal == ".xword")
3921 return parseDirectiveWord(8, Loc);
3922 if (IDVal == ".tlsdesccall")
3923 return parseDirectiveTLSDescCall(Loc);
3924 if (IDVal == ".ltorg" || IDVal == ".pool")
3925 return parseDirectiveLtorg(Loc);
3926 if (IDVal == ".unreq")
3927 return parseDirectiveUnreq(DirectiveID.getLoc());
3929 return parseDirectiveLOH(IDVal, Loc);
3932 /// parseDirectiveWord
3933 /// ::= .word [ expression (, expression)* ]
3934 bool AArch64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3935 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3937 const MCExpr *Value;
3938 if (getParser().parseExpression(Value))
3941 getParser().getStreamer().EmitValue(Value, Size);
3943 if (getLexer().is(AsmToken::EndOfStatement))
3946 // FIXME: Improve diagnostic.
3947 if (getLexer().isNot(AsmToken::Comma))
3948 return Error(L, "unexpected token in directive");
3957 // parseDirectiveTLSDescCall:
3958 // ::= .tlsdesccall symbol
3959 bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3961 if (getParser().parseIdentifier(Name))
3962 return Error(L, "expected symbol after directive");
3964 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3965 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3966 Expr = AArch64MCExpr::Create(Expr, AArch64MCExpr::VK_TLSDESC, getContext());
3969 Inst.setOpcode(AArch64::TLSDESCCALL);
3970 Inst.addOperand(MCOperand::CreateExpr(Expr));
3972 getParser().getStreamer().EmitInstruction(Inst, STI);
3976 /// ::= .loh <lohName | lohId> label1, ..., labelN
3977 /// The number of arguments depends on the loh identifier.
3978 bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3979 if (IDVal != MCLOHDirectiveName())
3982 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3983 if (getParser().getTok().isNot(AsmToken::Integer))
3984 return TokError("expected an identifier or a number in directive");
3985 // We successfully get a numeric value for the identifier.
3986 // Check if it is valid.
3987 int64_t Id = getParser().getTok().getIntVal();
3988 Kind = (MCLOHType)Id;
3989 // Check that Id does not overflow MCLOHType.
3990 if (!isValidMCLOHType(Kind) || Id != Kind)
3991 return TokError("invalid numeric identifier in directive");
3993 StringRef Name = getTok().getIdentifier();
3994 // We successfully parse an identifier.
3995 // Check if it is a recognized one.
3996 int Id = MCLOHNameToId(Name);
3999 return TokError("invalid identifier in directive");
4000 Kind = (MCLOHType)Id;
4002 // Consume the identifier.
4004 // Get the number of arguments of this LOH.
4005 int NbArgs = MCLOHIdToNbArgs(Kind);
4007 assert(NbArgs != -1 && "Invalid number of arguments");
4009 SmallVector<MCSymbol *, 3> Args;
4010 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4012 if (getParser().parseIdentifier(Name))
4013 return TokError("expected identifier in directive");
4014 Args.push_back(getContext().GetOrCreateSymbol(Name));
4016 if (Idx + 1 == NbArgs)
4018 if (getLexer().isNot(AsmToken::Comma))
4019 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4022 if (getLexer().isNot(AsmToken::EndOfStatement))
4023 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4025 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4029 /// parseDirectiveLtorg
4030 /// ::= .ltorg | .pool
4031 bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
4032 getTargetStreamer().emitCurrentConstantPool();
4036 /// parseDirectiveReq
4037 /// ::= name .req registername
4038 bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
4039 Parser.Lex(); // Eat the '.req' token.
4040 SMLoc SRegLoc = getLoc();
4041 unsigned RegNum = tryParseRegister();
4042 bool IsVector = false;
4044 if (RegNum == static_cast<unsigned>(-1)) {
4046 RegNum = tryMatchVectorRegister(Kind, false);
4047 if (!Kind.empty()) {
4048 Error(SRegLoc, "vector register without type specifier expected");
4054 if (RegNum == static_cast<unsigned>(-1)) {
4055 Parser.eatToEndOfStatement();
4056 Error(SRegLoc, "register name or alias expected");
4060 // Shouldn't be anything else.
4061 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
4062 Error(Parser.getTok().getLoc(), "unexpected input in .req directive");
4063 Parser.eatToEndOfStatement();
4067 Parser.Lex(); // Consume the EndOfStatement
4069 auto pair = std::make_pair(IsVector, RegNum);
4070 if (RegisterReqs.GetOrCreateValue(Name, pair).getValue() != pair)
4071 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
4076 /// parseDirectiveUneq
4077 /// ::= .unreq registername
4078 bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
4079 if (Parser.getTok().isNot(AsmToken::Identifier)) {
4080 Error(Parser.getTok().getLoc(), "unexpected input in .unreq directive.");
4081 Parser.eatToEndOfStatement();
4084 RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
4085 Parser.Lex(); // Eat the identifier.
4090 AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
4091 AArch64MCExpr::VariantKind &ELFRefKind,
4092 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4094 ELFRefKind = AArch64MCExpr::VK_INVALID;
4095 DarwinRefKind = MCSymbolRefExpr::VK_None;
4098 if (const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
4099 ELFRefKind = AE->getKind();
4100 Expr = AE->getSubExpr();
4103 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4105 // It's a simple symbol reference with no addend.
4106 DarwinRefKind = SE->getKind();
4110 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4114 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4117 DarwinRefKind = SE->getKind();
4119 if (BE->getOpcode() != MCBinaryExpr::Add &&
4120 BE->getOpcode() != MCBinaryExpr::Sub)
4123 // See if the addend is is a constant, otherwise there's more going
4124 // on here than we can deal with.
4125 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4129 Addend = AddendExpr->getValue();
4130 if (BE->getOpcode() == MCBinaryExpr::Sub)
4133 // It's some symbol reference + a constant addend, but really
4134 // shouldn't use both Darwin and ELF syntax.
4135 return ELFRefKind == AArch64MCExpr::VK_INVALID ||
4136 DarwinRefKind == MCSymbolRefExpr::VK_None;
4139 /// Force static initialization.
4140 extern "C" void LLVMInitializeAArch64AsmParser() {
4141 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64leTarget);
4142 RegisterMCAsmParser<AArch64AsmParser> Y(TheAArch64beTarget);
4143 RegisterMCAsmParser<AArch64AsmParser> Z(TheARM64Target);
4146 #define GET_REGISTER_MATCHER
4147 #define GET_SUBTARGET_FEATURE_NAME
4148 #define GET_MATCHER_IMPLEMENTATION
4149 #include "AArch64GenAsmMatcher.inc"
4151 // Define this matcher function after the auto-generated include so we
4152 // have the match class enum definitions.
4153 unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
4155 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
4156 // If the kind is a token for a literal immediate, check if our asm
4157 // operand matches. This is for InstAliases which have a fixed-value
4158 // immediate in the syntax.
4159 int64_t ExpectedVal;
4162 return Match_InvalidOperand;
4204 return Match_InvalidOperand;
4205 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
4207 return Match_InvalidOperand;
4208 if (CE->getValue() == ExpectedVal)
4209 return Match_Success;
4210 return Match_InvalidOperand;