1 //==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the (GNU-style) assembly parser for the AArch64
13 //===----------------------------------------------------------------------===//
16 #include "MCTargetDesc/AArch64MCTargetDesc.h"
17 #include "MCTargetDesc/AArch64MCExpr.h"
18 #include "Utils/AArch64BaseInfo.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/StringSwitch.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetAsmParser.h"
27 #include "llvm/MC/MCExpr.h"
28 #include "llvm/MC/MCRegisterInfo.h"
29 #include "llvm/MC/MCStreamer.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Support/TargetRegistry.h"
43 class AArch64AsmParser : public MCTargetAsmParser {
47 #define GET_ASSEMBLER_HEADER
48 #include "AArch64GenAsmMatcher.inc"
51 enum AArch64MatchResultTy {
52 Match_FirstAArch64 = FIRST_TARGET_MATCH_RESULT_TY,
53 #define GET_OPERAND_DIAGNOSTIC_TYPES
54 #include "AArch64GenAsmMatcher.inc"
57 AArch64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
58 const MCInstrInfo &MII)
59 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
60 MCAsmParserExtension::Initialize(_Parser);
62 // Initialize the set of available features.
63 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
66 // These are the public interface of the MCTargetAsmParser
67 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
68 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
70 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
72 bool ParseDirective(AsmToken DirectiveID);
73 bool ParseDirectiveTLSDescCall(SMLoc L);
74 bool ParseDirectiveWord(unsigned Size, SMLoc L);
76 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
77 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
78 MCStreamer&Out, unsigned &ErrorInfo,
79 bool MatchingInlineAsm);
81 // The rest of the sub-parsers have more freedom over interface: they return
82 // an OperandMatchResultTy because it's less ambiguous than true/false or
83 // -1/0/1 even if it is more verbose
85 ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
88 OperandMatchResultTy ParseImmediate(const MCExpr *&ExprVal);
90 OperandMatchResultTy ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind);
93 ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
97 ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
101 ParseImmWithLSLOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
104 ParseCondCodeOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
107 ParseCRxOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
110 ParseFPImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
112 template<typename SomeNamedImmMapper> OperandMatchResultTy
113 ParseNamedImmOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
114 return ParseNamedImmOperand(SomeNamedImmMapper(), Operands);
118 ParseNamedImmOperand(const NamedImmMapper &Mapper,
119 SmallVectorImpl<MCParsedAsmOperand*> &Operands);
122 ParseLSXAddressOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
125 ParseShiftExtend(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
128 ParseSysRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands);
130 bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout,
133 OperandMatchResultTy ParseVectorList(SmallVectorImpl<MCParsedAsmOperand *> &);
135 bool validateInstruction(MCInst &Inst,
136 const SmallVectorImpl<MCParsedAsmOperand*> &Operands);
138 /// Scan the next token (which had better be an identifier) and determine
139 /// whether it represents a general-purpose or vector register. It returns
140 /// true if an identifier was found and populates its reference arguments. It
141 /// does not consume the token.
143 IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc, StringRef &LayoutSpec,
144 SMLoc &LayoutLoc) const;
152 /// Instances of this class represent a parsed AArch64 machine instruction.
153 class AArch64Operand : public MCParsedAsmOperand {
156 k_ImmWithLSL, // #uimm {, LSL #amt }
157 k_CondCode, // eq/ne/...
158 k_FPImmediate, // Limited-precision floating-point imm
159 k_Immediate, // Including expressions referencing symbols
162 k_VectorList, // A sequential list of 1 to 4 registers.
163 k_SysReg, // The register operand of MRS and MSR instructions
164 k_Token, // The mnemonic; other raw tokens the auto-generated
165 k_WrappedRegister // Load/store exclusive permit a wrapped register.
168 SMLoc StartLoc, EndLoc;
170 struct ImmWithLSLOp {
172 unsigned ShiftAmount;
177 A64CC::CondCodes Code;
192 struct ShiftExtendOp {
193 A64SE::ShiftExtSpecifiers ShiftType;
198 // A vector register list is a sequential list of 1 to 4 registers.
199 struct VectorListOp {
202 A64Layout::VectorLayout Layout;
216 struct ImmWithLSLOp ImmWithLSL;
217 struct CondCodeOp CondCode;
218 struct FPImmOp FPImm;
221 struct ShiftExtendOp ShiftExtend;
222 struct VectorListOp VectorList;
223 struct SysRegOp SysReg;
227 AArch64Operand(KindTy K, SMLoc S, SMLoc E)
228 : MCParsedAsmOperand(), Kind(K), StartLoc(S), EndLoc(E) {}
231 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand() {
234 SMLoc getStartLoc() const { return StartLoc; }
235 SMLoc getEndLoc() const { return EndLoc; }
236 void print(raw_ostream&) const;
239 StringRef getToken() const {
240 assert(Kind == k_Token && "Invalid access!");
241 return StringRef(Tok.Data, Tok.Length);
244 unsigned getReg() const {
245 assert((Kind == k_Register || Kind == k_WrappedRegister)
246 && "Invalid access!");
250 const MCExpr *getImm() const {
251 assert(Kind == k_Immediate && "Invalid access!");
255 A64CC::CondCodes getCondCode() const {
256 assert(Kind == k_CondCode && "Invalid access!");
257 return CondCode.Code;
260 static bool isNonConstantExpr(const MCExpr *E,
261 AArch64MCExpr::VariantKind &Variant) {
262 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
263 Variant = A64E->getKind();
265 } else if (!isa<MCConstantExpr>(E)) {
266 Variant = AArch64MCExpr::VK_AARCH64_None;
273 bool isCondCode() const { return Kind == k_CondCode; }
274 bool isToken() const { return Kind == k_Token; }
275 bool isReg() const { return Kind == k_Register; }
276 bool isImm() const { return Kind == k_Immediate; }
277 bool isMem() const { return false; }
278 bool isFPImm() const { return Kind == k_FPImmediate; }
279 bool isShiftOrExtend() const { return Kind == k_ShiftExtend; }
280 bool isSysReg() const { return Kind == k_SysReg; }
281 bool isImmWithLSL() const { return Kind == k_ImmWithLSL; }
282 bool isWrappedReg() const { return Kind == k_WrappedRegister; }
284 bool isAddSubImmLSL0() const {
285 if (!isImmWithLSL()) return false;
286 if (ImmWithLSL.ShiftAmount != 0) return false;
288 AArch64MCExpr::VariantKind Variant;
289 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
290 return Variant == AArch64MCExpr::VK_AARCH64_LO12
291 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12
292 || Variant == AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC
293 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12
294 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC
295 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC_LO12;
298 // Otherwise it should be a real immediate in range:
299 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
300 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
303 bool isAddSubImmLSL12() const {
304 if (!isImmWithLSL()) return false;
305 if (ImmWithLSL.ShiftAmount != 12) return false;
307 AArch64MCExpr::VariantKind Variant;
308 if (isNonConstantExpr(ImmWithLSL.Val, Variant)) {
309 return Variant == AArch64MCExpr::VK_AARCH64_DTPREL_HI12
310 || Variant == AArch64MCExpr::VK_AARCH64_TPREL_HI12;
313 // Otherwise it should be a real immediate in range:
314 const MCConstantExpr *CE = cast<MCConstantExpr>(ImmWithLSL.Val);
315 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
318 template<unsigned MemSize, unsigned RmSize> bool isAddrRegExtend() const {
319 if (!isShiftOrExtend()) return false;
321 A64SE::ShiftExtSpecifiers Ext = ShiftExtend.ShiftType;
322 if (RmSize == 32 && !(Ext == A64SE::UXTW || Ext == A64SE::SXTW))
325 if (RmSize == 64 && !(Ext == A64SE::LSL || Ext == A64SE::SXTX))
328 return ShiftExtend.Amount == Log2_32(MemSize) || ShiftExtend.Amount == 0;
331 bool isAdrpLabel() const {
332 if (!isImm()) return false;
334 AArch64MCExpr::VariantKind Variant;
335 if (isNonConstantExpr(getImm(), Variant)) {
336 return Variant == AArch64MCExpr::VK_AARCH64_None
337 || Variant == AArch64MCExpr::VK_AARCH64_GOT
338 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL
339 || Variant == AArch64MCExpr::VK_AARCH64_TLSDESC;
342 return isLabel<21, 4096>();
345 template<unsigned RegWidth> bool isBitfieldWidth() const {
346 if (!isImm()) return false;
348 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
349 if (!CE) return false;
351 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
354 template<int RegWidth>
355 bool isCVTFixedPos() const {
356 if (!isImm()) return false;
358 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
359 if (!CE) return false;
361 return CE->getValue() >= 1 && CE->getValue() <= RegWidth;
364 bool isFMOVImm() const {
365 if (!isFPImm()) return false;
367 APFloat RealVal(FPImm.Val);
369 return A64Imms::isFPImm(RealVal, ImmVal);
372 bool isFPZero() const {
373 if (!isFPImm()) return false;
375 APFloat RealVal(FPImm.Val);
376 return RealVal.isPosZero();
379 template<unsigned field_width, unsigned scale>
380 bool isLabel() const {
381 if (!isImm()) return false;
383 if (dyn_cast<MCSymbolRefExpr>(Imm.Val)) {
385 } else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
386 int64_t Val = CE->getValue();
387 int64_t Min = - (scale * (1LL << (field_width - 1)));
388 int64_t Max = scale * ((1LL << (field_width - 1)) - 1);
389 return (Val % scale) == 0 && Val >= Min && Val <= Max;
392 // N.b. this disallows explicit relocation specifications via an
393 // AArch64MCExpr. Users needing that behaviour
397 bool isLane1() const {
398 if (!isImm()) return false;
400 // Because it's come through custom assembly parsing, it must always be a
401 // constant expression.
402 return cast<MCConstantExpr>(getImm())->getValue() == 1;
405 bool isLoadLitLabel() const {
406 if (!isImm()) return false;
408 AArch64MCExpr::VariantKind Variant;
409 if (isNonConstantExpr(getImm(), Variant)) {
410 return Variant == AArch64MCExpr::VK_AARCH64_None
411 || Variant == AArch64MCExpr::VK_AARCH64_GOTTPREL;
414 return isLabel<19, 4>();
417 template<unsigned RegWidth> bool isLogicalImm() const {
418 if (!isImm()) return false;
420 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
421 if (!CE) return false;
424 return A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
427 template<unsigned RegWidth> bool isLogicalImmMOV() const {
428 if (!isLogicalImm<RegWidth>()) return false;
430 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
432 // The move alias for ORR is only valid if the immediate cannot be
433 // represented with a move (immediate) instruction; they take priority.
435 return !A64Imms::isMOVZImm(RegWidth, CE->getValue(), UImm16, Shift)
436 && !A64Imms::isMOVNImm(RegWidth, CE->getValue(), UImm16, Shift);
439 template<int MemSize>
440 bool isOffsetUImm12() const {
441 if (!isImm()) return false;
443 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
445 // Assume they know what they're doing for now if they've given us a
446 // non-constant expression. In principle we could check for ridiculous
447 // things that can't possibly work or relocations that would almost
448 // certainly break resulting code.
452 int64_t Val = CE->getValue();
454 // Must be a multiple of the access size in bytes.
455 if ((Val & (MemSize - 1)) != 0) return false;
457 // Must be 12-bit unsigned
458 return Val >= 0 && Val <= 0xfff * MemSize;
461 template<A64SE::ShiftExtSpecifiers SHKind, bool is64Bit>
462 bool isShift() const {
463 if (!isShiftOrExtend()) return false;
465 if (ShiftExtend.ShiftType != SHKind)
468 return is64Bit ? ShiftExtend.Amount <= 63 : ShiftExtend.Amount <= 31;
471 bool isMOVN32Imm() const {
472 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
473 AArch64MCExpr::VK_AARCH64_SABS_G0,
474 AArch64MCExpr::VK_AARCH64_SABS_G1,
475 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
476 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
477 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
478 AArch64MCExpr::VK_AARCH64_TPREL_G1,
479 AArch64MCExpr::VK_AARCH64_TPREL_G0,
481 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
483 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
486 bool isMOVN64Imm() const {
487 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
488 AArch64MCExpr::VK_AARCH64_SABS_G0,
489 AArch64MCExpr::VK_AARCH64_SABS_G1,
490 AArch64MCExpr::VK_AARCH64_SABS_G2,
491 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
492 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
493 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
494 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
495 AArch64MCExpr::VK_AARCH64_TPREL_G2,
496 AArch64MCExpr::VK_AARCH64_TPREL_G1,
497 AArch64MCExpr::VK_AARCH64_TPREL_G0,
499 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
501 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
505 bool isMOVZ32Imm() const {
506 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
507 AArch64MCExpr::VK_AARCH64_ABS_G0,
508 AArch64MCExpr::VK_AARCH64_ABS_G1,
509 AArch64MCExpr::VK_AARCH64_SABS_G0,
510 AArch64MCExpr::VK_AARCH64_SABS_G1,
511 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
512 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
513 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
514 AArch64MCExpr::VK_AARCH64_TPREL_G1,
515 AArch64MCExpr::VK_AARCH64_TPREL_G0,
517 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
519 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
522 bool isMOVZ64Imm() const {
523 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
524 AArch64MCExpr::VK_AARCH64_ABS_G0,
525 AArch64MCExpr::VK_AARCH64_ABS_G1,
526 AArch64MCExpr::VK_AARCH64_ABS_G2,
527 AArch64MCExpr::VK_AARCH64_ABS_G3,
528 AArch64MCExpr::VK_AARCH64_SABS_G0,
529 AArch64MCExpr::VK_AARCH64_SABS_G1,
530 AArch64MCExpr::VK_AARCH64_SABS_G2,
531 AArch64MCExpr::VK_AARCH64_DTPREL_G2,
532 AArch64MCExpr::VK_AARCH64_DTPREL_G1,
533 AArch64MCExpr::VK_AARCH64_DTPREL_G0,
534 AArch64MCExpr::VK_AARCH64_GOTTPREL_G1,
535 AArch64MCExpr::VK_AARCH64_TPREL_G2,
536 AArch64MCExpr::VK_AARCH64_TPREL_G1,
537 AArch64MCExpr::VK_AARCH64_TPREL_G0,
539 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
541 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
544 bool isMOVK32Imm() const {
545 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
546 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
547 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
548 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
549 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
550 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
551 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
552 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
554 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
556 return isMoveWideImm(32, PermittedModifiers, NumModifiers);
559 bool isMOVK64Imm() const {
560 static const AArch64MCExpr::VariantKind PermittedModifiers[] = {
561 AArch64MCExpr::VK_AARCH64_ABS_G0_NC,
562 AArch64MCExpr::VK_AARCH64_ABS_G1_NC,
563 AArch64MCExpr::VK_AARCH64_ABS_G2_NC,
564 AArch64MCExpr::VK_AARCH64_ABS_G3,
565 AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC,
566 AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC,
567 AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC,
568 AArch64MCExpr::VK_AARCH64_TPREL_G1_NC,
569 AArch64MCExpr::VK_AARCH64_TPREL_G0_NC,
571 const unsigned NumModifiers = llvm::array_lengthof(PermittedModifiers);
573 return isMoveWideImm(64, PermittedModifiers, NumModifiers);
576 bool isMoveWideImm(unsigned RegWidth,
577 const AArch64MCExpr::VariantKind *PermittedModifiers,
578 unsigned NumModifiers) const {
579 if (!isImmWithLSL()) return false;
581 if (ImmWithLSL.ShiftAmount % 16 != 0) return false;
582 if (ImmWithLSL.ShiftAmount >= RegWidth) return false;
584 AArch64MCExpr::VariantKind Modifier;
585 if (isNonConstantExpr(ImmWithLSL.Val, Modifier)) {
586 // E.g. "#:abs_g0:sym, lsl #16" makes no sense.
587 if (!ImmWithLSL.ImplicitAmount) return false;
589 for (unsigned i = 0; i < NumModifiers; ++i)
590 if (PermittedModifiers[i] == Modifier) return true;
595 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmWithLSL.Val);
596 return CE && CE->getValue() >= 0 && CE->getValue() <= 0xffff;
599 template<int RegWidth, bool (*isValidImm)(int, uint64_t, int&, int&)>
600 bool isMoveWideMovAlias() const {
601 if (!isImm()) return false;
603 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
604 if (!CE) return false;
607 uint64_t Value = CE->getValue();
609 // If this is a 32-bit instruction then all bits above 32 should be the
610 // same: either of these is fine because signed/unsigned values should be
612 if (RegWidth == 32) {
613 if ((Value >> 32) != 0 && (Value >> 32) != 0xffffffff)
616 Value &= 0xffffffffULL;
619 return isValidImm(RegWidth, Value, UImm16, Shift);
622 bool isMSRWithReg() const {
623 if (!isSysReg()) return false;
625 bool IsKnownRegister;
626 StringRef Name(SysReg.Data, SysReg.Length);
627 A64SysReg::MSRMapper().fromString(Name, IsKnownRegister);
629 return IsKnownRegister;
632 bool isMSRPState() const {
633 if (!isSysReg()) return false;
635 bool IsKnownRegister;
636 StringRef Name(SysReg.Data, SysReg.Length);
637 A64PState::PStateMapper().fromString(Name, IsKnownRegister);
639 return IsKnownRegister;
643 if (!isSysReg()) return false;
645 // First check against specific MSR-only (write-only) registers
646 bool IsKnownRegister;
647 StringRef Name(SysReg.Data, SysReg.Length);
648 A64SysReg::MRSMapper().fromString(Name, IsKnownRegister);
650 return IsKnownRegister;
653 bool isPRFM() const {
654 if (!isImm()) return false;
656 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
661 return CE->getValue() >= 0 && CE->getValue() <= 31;
664 template<A64SE::ShiftExtSpecifiers SHKind> bool isRegExtend() const {
665 if (!isShiftOrExtend()) return false;
667 if (ShiftExtend.ShiftType != SHKind)
670 return ShiftExtend.Amount <= 4;
673 bool isRegExtendLSL() const {
674 if (!isShiftOrExtend()) return false;
676 if (ShiftExtend.ShiftType != A64SE::LSL)
679 return !ShiftExtend.ImplicitAmount && ShiftExtend.Amount <= 4;
682 // if 0 < value <= w, return true
683 bool isShrFixedWidth(int w) const {
686 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
689 int64_t Value = CE->getValue();
690 return Value > 0 && Value <= w;
693 bool isShrImm8() const { return isShrFixedWidth(8); }
695 bool isShrImm16() const { return isShrFixedWidth(16); }
697 bool isShrImm32() const { return isShrFixedWidth(32); }
699 bool isShrImm64() const { return isShrFixedWidth(64); }
701 // if 0 <= value < w, return true
702 bool isShlFixedWidth(int w) const {
705 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
708 int64_t Value = CE->getValue();
709 return Value >= 0 && Value < w;
712 bool isShlImm8() const { return isShlFixedWidth(8); }
714 bool isShlImm16() const { return isShlFixedWidth(16); }
716 bool isShlImm32() const { return isShlFixedWidth(32); }
718 bool isShlImm64() const { return isShlFixedWidth(64); }
720 bool isNeonMovImmShiftLSL() const {
721 if (!isShiftOrExtend())
724 if (ShiftExtend.ShiftType != A64SE::LSL)
727 // Valid shift amount is 0, 8, 16 and 24.
728 return ShiftExtend.Amount % 8 == 0 && ShiftExtend.Amount <= 24;
731 bool isNeonMovImmShiftLSLH() const {
732 if (!isShiftOrExtend())
735 if (ShiftExtend.ShiftType != A64SE::LSL)
738 // Valid shift amount is 0 and 8.
739 return ShiftExtend.Amount == 0 || ShiftExtend.Amount == 8;
742 bool isNeonMovImmShiftMSL() const {
743 if (!isShiftOrExtend())
746 if (ShiftExtend.ShiftType != A64SE::MSL)
749 // Valid shift amount is 8 and 16.
750 return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16;
753 template <A64Layout::VectorLayout Layout, unsigned Count>
754 bool isVectorList() const {
755 return Kind == k_VectorList && VectorList.Layout == Layout &&
756 VectorList.Count == Count;
759 template <int MemSize> bool isSImm7Scaled() const {
763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764 if (!CE) return false;
766 int64_t Val = CE->getValue();
767 if (Val % MemSize != 0) return false;
771 return Val >= -64 && Val < 64;
774 template<int BitWidth>
775 bool isSImm() const {
776 if (!isImm()) return false;
778 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779 if (!CE) return false;
781 return CE->getValue() >= -(1LL << (BitWidth - 1))
782 && CE->getValue() < (1LL << (BitWidth - 1));
785 template<int bitWidth>
786 bool isUImm() const {
787 if (!isImm()) return false;
789 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
790 if (!CE) return false;
792 return CE->getValue() >= 0 && CE->getValue() < (1LL << bitWidth);
795 bool isUImm() const {
796 if (!isImm()) return false;
798 return isa<MCConstantExpr>(getImm());
801 bool isNeonUImm64Mask() const {
805 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
809 uint64_t Value = CE->getValue();
811 // i64 value with each byte being either 0x00 or 0xff.
812 for (unsigned i = 0; i < 8; ++i, Value >>= 8)
813 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff)
818 // if value == N, return true
820 bool isExactImm() const {
821 if (!isImm()) return false;
823 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
824 if (!CE) return false;
826 return CE->getValue() == N;
829 static AArch64Operand *CreateImmWithLSL(const MCExpr *Val,
830 unsigned ShiftAmount,
833 AArch64Operand *Op = new AArch64Operand(k_ImmWithLSL, S, E);
834 Op->ImmWithLSL.Val = Val;
835 Op->ImmWithLSL.ShiftAmount = ShiftAmount;
836 Op->ImmWithLSL.ImplicitAmount = ImplicitAmount;
840 static AArch64Operand *CreateCondCode(A64CC::CondCodes Code,
842 AArch64Operand *Op = new AArch64Operand(k_CondCode, S, E);
843 Op->CondCode.Code = Code;
847 static AArch64Operand *CreateFPImm(double Val,
849 AArch64Operand *Op = new AArch64Operand(k_FPImmediate, S, E);
854 static AArch64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
855 AArch64Operand *Op = new AArch64Operand(k_Immediate, S, E);
860 static AArch64Operand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
861 AArch64Operand *Op = new AArch64Operand(k_Register, S, E);
862 Op->Reg.RegNum = RegNum;
866 static AArch64Operand *CreateWrappedReg(unsigned RegNum, SMLoc S, SMLoc E) {
867 AArch64Operand *Op = new AArch64Operand(k_WrappedRegister, S, E);
868 Op->Reg.RegNum = RegNum;
872 static AArch64Operand *CreateShiftExtend(A64SE::ShiftExtSpecifiers ShiftTyp,
876 AArch64Operand *Op = new AArch64Operand(k_ShiftExtend, S, E);
877 Op->ShiftExtend.ShiftType = ShiftTyp;
878 Op->ShiftExtend.Amount = Amount;
879 Op->ShiftExtend.ImplicitAmount = ImplicitAmount;
883 static AArch64Operand *CreateSysReg(StringRef Str, SMLoc S) {
884 AArch64Operand *Op = new AArch64Operand(k_SysReg, S, S);
885 Op->Tok.Data = Str.data();
886 Op->Tok.Length = Str.size();
890 static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
891 A64Layout::VectorLayout Layout,
893 AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E);
894 Op->VectorList.RegNum = RegNum;
895 Op->VectorList.Count = Count;
896 Op->VectorList.Layout = Layout;
902 static AArch64Operand *CreateToken(StringRef Str, SMLoc S) {
903 AArch64Operand *Op = new AArch64Operand(k_Token, S, S);
904 Op->Tok.Data = Str.data();
905 Op->Tok.Length = Str.size();
910 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
911 // Add as immediates when possible.
912 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
913 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
915 Inst.addOperand(MCOperand::CreateExpr(Expr));
918 template<unsigned RegWidth>
919 void addBFILSBOperands(MCInst &Inst, unsigned N) const {
920 assert(N == 1 && "Invalid number of operands!");
921 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
922 unsigned EncodedVal = (RegWidth - CE->getValue()) % RegWidth;
923 Inst.addOperand(MCOperand::CreateImm(EncodedVal));
926 void addBFIWidthOperands(MCInst &Inst, unsigned N) const {
927 assert(N == 1 && "Invalid number of operands!");
928 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
929 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
932 void addBFXWidthOperands(MCInst &Inst, unsigned N) const {
933 assert(N == 1 && "Invalid number of operands!");
935 uint64_t LSB = Inst.getOperand(Inst.getNumOperands()-1).getImm();
936 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
938 Inst.addOperand(MCOperand::CreateImm(LSB + CE->getValue() - 1));
941 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
942 assert(N == 1 && "Invalid number of operands!");
943 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
946 void addCVTFixedPosOperands(MCInst &Inst, unsigned N) const {
947 assert(N == 1 && "Invalid number of operands!");
949 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
950 Inst.addOperand(MCOperand::CreateImm(64 - CE->getValue()));
953 void addFMOVImmOperands(MCInst &Inst, unsigned N) const {
954 assert(N == 1 && "Invalid number of operands!");
956 APFloat RealVal(FPImm.Val);
958 A64Imms::isFPImm(RealVal, ImmVal);
960 Inst.addOperand(MCOperand::CreateImm(ImmVal));
963 void addFPZeroOperands(MCInst &Inst, unsigned N) const {
964 assert(N == 1 && "Invalid number of operands");
965 Inst.addOperand(MCOperand::CreateImm(0));
968 void addInvCondCodeOperands(MCInst &Inst, unsigned N) const {
969 assert(N == 1 && "Invalid number of operands!");
970 unsigned Encoded = A64InvertCondCode(getCondCode());
971 Inst.addOperand(MCOperand::CreateImm(Encoded));
974 void addRegOperands(MCInst &Inst, unsigned N) const {
975 assert(N == 1 && "Invalid number of operands!");
976 Inst.addOperand(MCOperand::CreateReg(getReg()));
979 void addImmOperands(MCInst &Inst, unsigned N) const {
980 assert(N == 1 && "Invalid number of operands!");
981 addExpr(Inst, getImm());
984 template<int MemSize>
985 void addSImm7ScaledOperands(MCInst &Inst, unsigned N) const {
986 assert(N == 1 && "Invalid number of operands!");
988 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
989 uint64_t Val = CE->getValue() / MemSize;
990 Inst.addOperand(MCOperand::CreateImm(Val & 0x7f));
993 template<int BitWidth>
994 void addSImmOperands(MCInst &Inst, unsigned N) const {
995 assert(N == 1 && "Invalid number of operands!");
997 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
998 uint64_t Val = CE->getValue();
999 Inst.addOperand(MCOperand::CreateImm(Val & ((1ULL << BitWidth) - 1)));
1002 void addImmWithLSLOperands(MCInst &Inst, unsigned N) const {
1003 assert (N == 1 && "Invalid number of operands!");
1005 addExpr(Inst, ImmWithLSL.Val);
1008 template<unsigned field_width, unsigned scale>
1009 void addLabelOperands(MCInst &Inst, unsigned N) const {
1010 assert(N == 1 && "Invalid number of operands!");
1012 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1015 addExpr(Inst, Imm.Val);
1019 int64_t Val = CE->getValue();
1020 assert(Val % scale == 0 && "Unaligned immediate in instruction");
1023 Inst.addOperand(MCOperand::CreateImm(Val & ((1LL << field_width) - 1)));
1026 template<int MemSize>
1027 void addOffsetUImm12Operands(MCInst &Inst, unsigned N) const {
1028 assert(N == 1 && "Invalid number of operands!");
1030 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
1031 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / MemSize));
1033 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1037 template<unsigned RegWidth>
1038 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
1039 assert(N == 1 && "Invalid number of operands");
1040 const MCConstantExpr *CE = cast<MCConstantExpr>(Imm.Val);
1043 A64Imms::isLogicalImm(RegWidth, CE->getValue(), Bits);
1045 Inst.addOperand(MCOperand::CreateImm(Bits));
1048 void addMRSOperands(MCInst &Inst, unsigned N) const {
1049 assert(N == 1 && "Invalid number of operands!");
1052 StringRef Name(SysReg.Data, SysReg.Length);
1053 uint32_t Bits = A64SysReg::MRSMapper().fromString(Name, Valid);
1055 Inst.addOperand(MCOperand::CreateImm(Bits));
1058 void addMSRWithRegOperands(MCInst &Inst, unsigned N) const {
1059 assert(N == 1 && "Invalid number of operands!");
1062 StringRef Name(SysReg.Data, SysReg.Length);
1063 uint32_t Bits = A64SysReg::MSRMapper().fromString(Name, Valid);
1065 Inst.addOperand(MCOperand::CreateImm(Bits));
1068 void addMSRPStateOperands(MCInst &Inst, unsigned N) const {
1069 assert(N == 1 && "Invalid number of operands!");
1072 StringRef Name(SysReg.Data, SysReg.Length);
1073 uint32_t Bits = A64PState::PStateMapper().fromString(Name, Valid);
1075 Inst.addOperand(MCOperand::CreateImm(Bits));
1078 void addMoveWideImmOperands(MCInst &Inst, unsigned N) const {
1079 assert(N == 2 && "Invalid number of operands!");
1081 addExpr(Inst, ImmWithLSL.Val);
1083 AArch64MCExpr::VariantKind Variant;
1084 if (!isNonConstantExpr(ImmWithLSL.Val, Variant)) {
1085 Inst.addOperand(MCOperand::CreateImm(ImmWithLSL.ShiftAmount / 16));
1089 // We know it's relocated
1091 case AArch64MCExpr::VK_AARCH64_ABS_G0:
1092 case AArch64MCExpr::VK_AARCH64_ABS_G0_NC:
1093 case AArch64MCExpr::VK_AARCH64_SABS_G0:
1094 case AArch64MCExpr::VK_AARCH64_DTPREL_G0:
1095 case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC:
1096 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC:
1097 case AArch64MCExpr::VK_AARCH64_TPREL_G0:
1098 case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC:
1099 Inst.addOperand(MCOperand::CreateImm(0));
1101 case AArch64MCExpr::VK_AARCH64_ABS_G1:
1102 case AArch64MCExpr::VK_AARCH64_ABS_G1_NC:
1103 case AArch64MCExpr::VK_AARCH64_SABS_G1:
1104 case AArch64MCExpr::VK_AARCH64_DTPREL_G1:
1105 case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC:
1106 case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1:
1107 case AArch64MCExpr::VK_AARCH64_TPREL_G1:
1108 case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC:
1109 Inst.addOperand(MCOperand::CreateImm(1));
1111 case AArch64MCExpr::VK_AARCH64_ABS_G2:
1112 case AArch64MCExpr::VK_AARCH64_ABS_G2_NC:
1113 case AArch64MCExpr::VK_AARCH64_SABS_G2:
1114 case AArch64MCExpr::VK_AARCH64_DTPREL_G2:
1115 case AArch64MCExpr::VK_AARCH64_TPREL_G2:
1116 Inst.addOperand(MCOperand::CreateImm(2));
1118 case AArch64MCExpr::VK_AARCH64_ABS_G3:
1119 Inst.addOperand(MCOperand::CreateImm(3));
1121 default: llvm_unreachable("Inappropriate move wide relocation");
1125 template<int RegWidth, bool isValidImm(int, uint64_t, int&, int&)>
1126 void addMoveWideMovAliasOperands(MCInst &Inst, unsigned N) const {
1127 assert(N == 2 && "Invalid number of operands!");
1130 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1131 uint64_t Value = CE->getValue();
1133 if (RegWidth == 32) {
1134 Value &= 0xffffffffULL;
1137 bool Valid = isValidImm(RegWidth, Value, UImm16, Shift);
1139 assert(Valid && "Invalid immediates should have been weeded out by now");
1141 Inst.addOperand(MCOperand::CreateImm(UImm16));
1142 Inst.addOperand(MCOperand::CreateImm(Shift));
1145 void addPRFMOperands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1148 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1149 assert(CE->getValue() >= 0 && CE->getValue() <= 31
1150 && "PRFM operand should be 5-bits");
1152 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1155 // For Add-sub (extended register) operands.
1156 void addRegExtendOperands(MCInst &Inst, unsigned N) const {
1157 assert(N == 1 && "Invalid number of operands!");
1159 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1162 // For Vector Immediates shifted imm operands.
1163 void addNeonMovImmShiftLSLOperands(MCInst &Inst, unsigned N) const {
1164 assert(N == 1 && "Invalid number of operands!");
1166 if (ShiftExtend.Amount % 8 != 0 || ShiftExtend.Amount > 24)
1167 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1169 // Encode LSL shift amount 0, 8, 16, 24 as 0, 1, 2, 3.
1170 int64_t Imm = ShiftExtend.Amount / 8;
1171 Inst.addOperand(MCOperand::CreateImm(Imm));
1174 void addNeonMovImmShiftLSLHOperands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1177 if (ShiftExtend.Amount != 0 && ShiftExtend.Amount != 8)
1178 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1180 // Encode LSLH shift amount 0, 8 as 0, 1.
1181 int64_t Imm = ShiftExtend.Amount / 8;
1182 Inst.addOperand(MCOperand::CreateImm(Imm));
1185 void addNeonMovImmShiftMSLOperands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1188 if (ShiftExtend.Amount != 8 && ShiftExtend.Amount != 16)
1189 llvm_unreachable("Invalid shift amount for vector immediate inst.");
1191 // Encode MSL shift amount 8, 16 as 0, 1.
1192 int64_t Imm = ShiftExtend.Amount / 8 - 1;
1193 Inst.addOperand(MCOperand::CreateImm(Imm));
1196 // For the extend in load-store (register offset) instructions.
1197 template<unsigned MemSize>
1198 void addAddrRegExtendOperands(MCInst &Inst, unsigned N) const {
1199 addAddrRegExtendOperands(Inst, N, MemSize);
1202 void addAddrRegExtendOperands(MCInst &Inst, unsigned N,
1203 unsigned MemSize) const {
1204 assert(N == 1 && "Invalid number of operands!");
1206 // First bit of Option is set in instruction classes, the high two bits are
1208 unsigned OptionHi = 0;
1209 switch (ShiftExtend.ShiftType) {
1219 llvm_unreachable("Invalid extend type for register offset");
1223 if (MemSize == 1 && !ShiftExtend.ImplicitAmount)
1225 else if (MemSize != 1 && ShiftExtend.Amount != 0)
1228 Inst.addOperand(MCOperand::CreateImm((OptionHi << 1) | S));
1230 void addShiftOperands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1233 Inst.addOperand(MCOperand::CreateImm(ShiftExtend.Amount));
1236 void addNeonUImm64MaskOperands(MCInst &Inst, unsigned N) const {
1237 assert(N == 1 && "Invalid number of operands!");
1239 // A bit from each byte in the constant forms the encoded immediate
1240 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1241 uint64_t Value = CE->getValue();
1244 for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1245 Imm |= (Value & 1) << i;
1247 Inst.addOperand(MCOperand::CreateImm(Imm));
1250 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1251 assert(N == 1 && "Invalid number of operands!");
1252 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1256 } // end anonymous namespace.
1258 AArch64AsmParser::OperandMatchResultTy
1259 AArch64AsmParser::ParseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1260 StringRef Mnemonic) {
1262 // See if the operand has a custom parser
1263 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1265 // It could either succeed, fail or just not care.
1266 if (ResTy != MatchOperand_NoMatch)
1269 switch (getLexer().getKind()) {
1271 Error(Parser.getTok().getLoc(), "unexpected token in operand");
1272 return MatchOperand_ParseFail;
1273 case AsmToken::Identifier: {
1274 // It might be in the LSL/UXTB family ...
1275 OperandMatchResultTy GotShift = ParseShiftExtend(Operands);
1277 // We can only continue if no tokens were eaten.
1278 if (GotShift != MatchOperand_NoMatch)
1281 // ... or it might be a register ...
1282 uint32_t NumLanes = 0;
1283 OperandMatchResultTy GotReg = ParseRegister(Operands, NumLanes);
1284 assert(GotReg != MatchOperand_ParseFail
1285 && "register parsing shouldn't partially succeed");
1287 if (GotReg == MatchOperand_Success) {
1288 if (Parser.getTok().is(AsmToken::LBrac))
1289 return ParseNEONLane(Operands, NumLanes);
1291 return MatchOperand_Success;
1293 // ... or it might be a symbolish thing
1296 case AsmToken::LParen: // E.g. (strcmp-4)
1297 case AsmToken::Integer: // 1f, 2b labels
1298 case AsmToken::String: // quoted labels
1299 case AsmToken::Dot: // . is Current location
1300 case AsmToken::Dollar: // $ is PC
1301 case AsmToken::Colon: {
1302 SMLoc StartLoc = Parser.getTok().getLoc();
1304 const MCExpr *ImmVal = 0;
1306 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1307 return MatchOperand_ParseFail;
1309 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1310 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1311 return MatchOperand_Success;
1313 case AsmToken::Hash: { // Immediates
1314 SMLoc StartLoc = Parser.getTok().getLoc();
1316 const MCExpr *ImmVal = 0;
1319 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1320 return MatchOperand_ParseFail;
1322 EndLoc = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
1323 Operands.push_back(AArch64Operand::CreateImm(ImmVal, StartLoc, EndLoc));
1324 return MatchOperand_Success;
1326 case AsmToken::LBrac: {
1327 SMLoc Loc = Parser.getTok().getLoc();
1328 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1329 Parser.Lex(); // Eat '['
1331 // There's no comma after a '[', so we can parse the next operand
1333 return ParseOperand(Operands, Mnemonic);
1335 // The following will likely be useful later, but not in very early cases
1336 case AsmToken::LCurly: // SIMD vector list is not parsed here
1337 llvm_unreachable("Don't know how to deal with '{' in operand");
1338 return MatchOperand_ParseFail;
1342 AArch64AsmParser::OperandMatchResultTy
1343 AArch64AsmParser::ParseImmediate(const MCExpr *&ExprVal) {
1344 if (getLexer().is(AsmToken::Colon)) {
1345 AArch64MCExpr::VariantKind RefKind;
1347 OperandMatchResultTy ResTy = ParseRelocPrefix(RefKind);
1348 if (ResTy != MatchOperand_Success)
1351 const MCExpr *SubExprVal;
1352 if (getParser().parseExpression(SubExprVal))
1353 return MatchOperand_ParseFail;
1355 ExprVal = AArch64MCExpr::Create(RefKind, SubExprVal, getContext());
1356 return MatchOperand_Success;
1359 // No weird AArch64MCExpr prefix
1360 return getParser().parseExpression(ExprVal)
1361 ? MatchOperand_ParseFail : MatchOperand_Success;
1364 // A lane attached to a NEON register. "[N]", which should yield three tokens:
1365 // '[', N, ']'. A hash is not allowed to precede the immediate here.
1366 AArch64AsmParser::OperandMatchResultTy
1367 AArch64AsmParser::ParseNEONLane(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1368 uint32_t NumLanes) {
1369 SMLoc Loc = Parser.getTok().getLoc();
1371 assert(Parser.getTok().is(AsmToken::LBrac) && "inappropriate operand");
1372 Operands.push_back(AArch64Operand::CreateToken("[", Loc));
1373 Parser.Lex(); // Eat '['
1375 if (Parser.getTok().isNot(AsmToken::Integer)) {
1376 Error(Parser.getTok().getLoc(), "expected lane number");
1377 return MatchOperand_ParseFail;
1380 if (Parser.getTok().getIntVal() >= NumLanes) {
1381 Error(Parser.getTok().getLoc(), "lane number incompatible with layout");
1382 return MatchOperand_ParseFail;
1385 const MCExpr *Lane = MCConstantExpr::Create(Parser.getTok().getIntVal(),
1387 SMLoc S = Parser.getTok().getLoc();
1388 Parser.Lex(); // Eat actual lane
1389 SMLoc E = Parser.getTok().getLoc();
1390 Operands.push_back(AArch64Operand::CreateImm(Lane, S, E));
1393 if (Parser.getTok().isNot(AsmToken::RBrac)) {
1394 Error(Parser.getTok().getLoc(), "expected ']' after lane");
1395 return MatchOperand_ParseFail;
1398 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
1399 Parser.Lex(); // Eat ']'
1401 return MatchOperand_Success;
1404 AArch64AsmParser::OperandMatchResultTy
1405 AArch64AsmParser::ParseRelocPrefix(AArch64MCExpr::VariantKind &RefKind) {
1406 assert(getLexer().is(AsmToken::Colon) && "expected a ':'");
1409 if (getLexer().isNot(AsmToken::Identifier)) {
1410 Error(Parser.getTok().getLoc(),
1411 "expected relocation specifier in operand after ':'");
1412 return MatchOperand_ParseFail;
1415 std::string LowerCase = Parser.getTok().getIdentifier().lower();
1416 RefKind = StringSwitch<AArch64MCExpr::VariantKind>(LowerCase)
1417 .Case("got", AArch64MCExpr::VK_AARCH64_GOT)
1418 .Case("got_lo12", AArch64MCExpr::VK_AARCH64_GOT_LO12)
1419 .Case("lo12", AArch64MCExpr::VK_AARCH64_LO12)
1420 .Case("abs_g0", AArch64MCExpr::VK_AARCH64_ABS_G0)
1421 .Case("abs_g0_nc", AArch64MCExpr::VK_AARCH64_ABS_G0_NC)
1422 .Case("abs_g1", AArch64MCExpr::VK_AARCH64_ABS_G1)
1423 .Case("abs_g1_nc", AArch64MCExpr::VK_AARCH64_ABS_G1_NC)
1424 .Case("abs_g2", AArch64MCExpr::VK_AARCH64_ABS_G2)
1425 .Case("abs_g2_nc", AArch64MCExpr::VK_AARCH64_ABS_G2_NC)
1426 .Case("abs_g3", AArch64MCExpr::VK_AARCH64_ABS_G3)
1427 .Case("abs_g0_s", AArch64MCExpr::VK_AARCH64_SABS_G0)
1428 .Case("abs_g1_s", AArch64MCExpr::VK_AARCH64_SABS_G1)
1429 .Case("abs_g2_s", AArch64MCExpr::VK_AARCH64_SABS_G2)
1430 .Case("dtprel_g2", AArch64MCExpr::VK_AARCH64_DTPREL_G2)
1431 .Case("dtprel_g1", AArch64MCExpr::VK_AARCH64_DTPREL_G1)
1432 .Case("dtprel_g1_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC)
1433 .Case("dtprel_g0", AArch64MCExpr::VK_AARCH64_DTPREL_G0)
1434 .Case("dtprel_g0_nc", AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC)
1435 .Case("dtprel_hi12", AArch64MCExpr::VK_AARCH64_DTPREL_HI12)
1436 .Case("dtprel_lo12", AArch64MCExpr::VK_AARCH64_DTPREL_LO12)
1437 .Case("dtprel_lo12_nc", AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC)
1438 .Case("gottprel_g1", AArch64MCExpr::VK_AARCH64_GOTTPREL_G1)
1439 .Case("gottprel_g0_nc", AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC)
1440 .Case("gottprel", AArch64MCExpr::VK_AARCH64_GOTTPREL)
1441 .Case("gottprel_lo12", AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12)
1442 .Case("tprel_g2", AArch64MCExpr::VK_AARCH64_TPREL_G2)
1443 .Case("tprel_g1", AArch64MCExpr::VK_AARCH64_TPREL_G1)
1444 .Case("tprel_g1_nc", AArch64MCExpr::VK_AARCH64_TPREL_G1_NC)
1445 .Case("tprel_g0", AArch64MCExpr::VK_AARCH64_TPREL_G0)
1446 .Case("tprel_g0_nc", AArch64MCExpr::VK_AARCH64_TPREL_G0_NC)
1447 .Case("tprel_hi12", AArch64MCExpr::VK_AARCH64_TPREL_HI12)
1448 .Case("tprel_lo12", AArch64MCExpr::VK_AARCH64_TPREL_LO12)
1449 .Case("tprel_lo12_nc", AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC)
1450 .Case("tlsdesc", AArch64MCExpr::VK_AARCH64_TLSDESC)
1451 .Case("tlsdesc_lo12", AArch64MCExpr::VK_AARCH64_TLSDESC_LO12)
1452 .Default(AArch64MCExpr::VK_AARCH64_None);
1454 if (RefKind == AArch64MCExpr::VK_AARCH64_None) {
1455 Error(Parser.getTok().getLoc(),
1456 "expected relocation specifier in operand after ':'");
1457 return MatchOperand_ParseFail;
1459 Parser.Lex(); // Eat identifier
1461 if (getLexer().isNot(AsmToken::Colon)) {
1462 Error(Parser.getTok().getLoc(),
1463 "expected ':' after relocation specifier");
1464 return MatchOperand_ParseFail;
1467 return MatchOperand_Success;
1470 AArch64AsmParser::OperandMatchResultTy
1471 AArch64AsmParser::ParseImmWithLSLOperand(
1472 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1473 // FIXME?: I want to live in a world where immediates must start with
1474 // #. Please don't dash my hopes (well, do if you have a good reason).
1475 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1477 SMLoc S = Parser.getTok().getLoc();
1478 Parser.Lex(); // Eat '#'
1481 if (ParseImmediate(Imm) != MatchOperand_Success)
1482 return MatchOperand_ParseFail;
1483 else if (Parser.getTok().isNot(AsmToken::Comma)) {
1484 SMLoc E = Parser.getTok().getLoc();
1485 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, 0, true, S, E));
1486 return MatchOperand_Success;
1492 // The optional operand must be "lsl #N" where N is non-negative.
1493 if (Parser.getTok().is(AsmToken::Identifier)
1494 && Parser.getTok().getIdentifier().equals_lower("lsl")) {
1497 if (Parser.getTok().is(AsmToken::Hash)) {
1500 if (Parser.getTok().isNot(AsmToken::Integer)) {
1501 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
1502 return MatchOperand_ParseFail;
1507 int64_t ShiftAmount = Parser.getTok().getIntVal();
1509 if (ShiftAmount < 0) {
1510 Error(Parser.getTok().getLoc(), "positive shift amount required");
1511 return MatchOperand_ParseFail;
1513 Parser.Lex(); // Eat the number
1515 SMLoc E = Parser.getTok().getLoc();
1516 Operands.push_back(AArch64Operand::CreateImmWithLSL(Imm, ShiftAmount,
1518 return MatchOperand_Success;
1522 AArch64AsmParser::OperandMatchResultTy
1523 AArch64AsmParser::ParseCondCodeOperand(
1524 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1525 if (Parser.getTok().isNot(AsmToken::Identifier))
1526 return MatchOperand_NoMatch;
1528 StringRef Tok = Parser.getTok().getIdentifier();
1529 A64CC::CondCodes CondCode = A64StringToCondCode(Tok);
1531 if (CondCode == A64CC::Invalid)
1532 return MatchOperand_NoMatch;
1534 SMLoc S = Parser.getTok().getLoc();
1535 Parser.Lex(); // Eat condition code
1536 SMLoc E = Parser.getTok().getLoc();
1538 Operands.push_back(AArch64Operand::CreateCondCode(CondCode, S, E));
1539 return MatchOperand_Success;
1542 AArch64AsmParser::OperandMatchResultTy
1543 AArch64AsmParser::ParseCRxOperand(
1544 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1545 SMLoc S = Parser.getTok().getLoc();
1546 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1547 Error(S, "Expected cN operand where 0 <= N <= 15");
1548 return MatchOperand_ParseFail;
1551 StringRef Tok = Parser.getTok().getIdentifier();
1552 if (Tok[0] != 'c' && Tok[0] != 'C') {
1553 Error(S, "Expected cN operand where 0 <= N <= 15");
1554 return MatchOperand_ParseFail;
1558 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1559 if (BadNum || CRNum > 15) {
1560 Error(S, "Expected cN operand where 0 <= N <= 15");
1561 return MatchOperand_ParseFail;
1564 const MCExpr *CRImm = MCConstantExpr::Create(CRNum, getContext());
1567 SMLoc E = Parser.getTok().getLoc();
1569 Operands.push_back(AArch64Operand::CreateImm(CRImm, S, E));
1570 return MatchOperand_Success;
1573 AArch64AsmParser::OperandMatchResultTy
1574 AArch64AsmParser::ParseFPImmOperand(
1575 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1577 // FIXME?: I want to live in a world where immediates must start with
1578 // #. Please don't dash my hopes (well, do if you have a good reason).
1579 if (Parser.getTok().isNot(AsmToken::Hash)) return MatchOperand_NoMatch;
1581 SMLoc S = Parser.getTok().getLoc();
1582 Parser.Lex(); // Eat '#'
1584 bool Negative = false;
1585 if (Parser.getTok().is(AsmToken::Minus)) {
1587 Parser.Lex(); // Eat '-'
1588 } else if (Parser.getTok().is(AsmToken::Plus)) {
1589 Parser.Lex(); // Eat '+'
1592 if (Parser.getTok().isNot(AsmToken::Real)) {
1593 Error(S, "Expected floating-point immediate");
1594 return MatchOperand_ParseFail;
1597 APFloat RealVal(APFloat::IEEEdouble, Parser.getTok().getString());
1598 if (Negative) RealVal.changeSign();
1599 double DblVal = RealVal.convertToDouble();
1601 Parser.Lex(); // Eat real number
1602 SMLoc E = Parser.getTok().getLoc();
1604 Operands.push_back(AArch64Operand::CreateFPImm(DblVal, S, E));
1605 return MatchOperand_Success;
1609 // Automatically generated
1610 static unsigned MatchRegisterName(StringRef Name);
1613 AArch64AsmParser::IdentifyRegister(unsigned &RegNum, SMLoc &RegEndLoc,
1615 SMLoc &LayoutLoc) const {
1616 const AsmToken &Tok = Parser.getTok();
1618 if (Tok.isNot(AsmToken::Identifier))
1621 std::string LowerReg = Tok.getString().lower();
1622 size_t DotPos = LowerReg.find('.');
1624 bool IsVec128 = false;
1625 SMLoc S = Tok.getLoc();
1626 RegEndLoc = SMLoc::getFromPointer(S.getPointer() + DotPos);
1628 if (DotPos == std::string::npos) {
1629 Layout = StringRef();
1631 // Everything afterwards needs to be a literal token, expected to be
1632 // '.2d','.b' etc for vector registers.
1634 // This StringSwitch validates the input and (perhaps more importantly)
1635 // gives us a permanent string to use in the token (a pointer into LowerReg
1636 // would go out of scope when we return).
1637 LayoutLoc = SMLoc::getFromPointer(S.getPointer() + DotPos + 1);
1638 StringRef LayoutText = StringRef(LowerReg).substr(DotPos);
1640 // See if it's a 128-bit layout first.
1641 Layout = StringSwitch<const char *>(LayoutText)
1642 .Case(".q", ".q").Case(".1q", ".1q")
1643 .Case(".d", ".d").Case(".2d", ".2d")
1644 .Case(".s", ".s").Case(".4s", ".4s")
1645 .Case(".h", ".h").Case(".8h", ".8h")
1646 .Case(".b", ".b").Case(".16b", ".16b")
1649 if (Layout.size() != 0)
1652 Layout = StringSwitch<const char *>(LayoutText)
1660 if (Layout.size() == 0) {
1661 // If we've still not pinned it down the register is malformed.
1666 RegNum = MatchRegisterName(LowerReg.substr(0, DotPos));
1667 if (RegNum == AArch64::NoRegister) {
1668 RegNum = StringSwitch<unsigned>(LowerReg.substr(0, DotPos))
1669 .Case("ip0", AArch64::X16)
1670 .Case("ip1", AArch64::X17)
1671 .Case("fp", AArch64::X29)
1672 .Case("lr", AArch64::X30)
1673 .Case("v0", IsVec128 ? AArch64::Q0 : AArch64::D0)
1674 .Case("v1", IsVec128 ? AArch64::Q1 : AArch64::D1)
1675 .Case("v2", IsVec128 ? AArch64::Q2 : AArch64::D2)
1676 .Case("v3", IsVec128 ? AArch64::Q3 : AArch64::D3)
1677 .Case("v4", IsVec128 ? AArch64::Q4 : AArch64::D4)
1678 .Case("v5", IsVec128 ? AArch64::Q5 : AArch64::D5)
1679 .Case("v6", IsVec128 ? AArch64::Q6 : AArch64::D6)
1680 .Case("v7", IsVec128 ? AArch64::Q7 : AArch64::D7)
1681 .Case("v8", IsVec128 ? AArch64::Q8 : AArch64::D8)
1682 .Case("v9", IsVec128 ? AArch64::Q9 : AArch64::D9)
1683 .Case("v10", IsVec128 ? AArch64::Q10 : AArch64::D10)
1684 .Case("v11", IsVec128 ? AArch64::Q11 : AArch64::D11)
1685 .Case("v12", IsVec128 ? AArch64::Q12 : AArch64::D12)
1686 .Case("v13", IsVec128 ? AArch64::Q13 : AArch64::D13)
1687 .Case("v14", IsVec128 ? AArch64::Q14 : AArch64::D14)
1688 .Case("v15", IsVec128 ? AArch64::Q15 : AArch64::D15)
1689 .Case("v16", IsVec128 ? AArch64::Q16 : AArch64::D16)
1690 .Case("v17", IsVec128 ? AArch64::Q17 : AArch64::D17)
1691 .Case("v18", IsVec128 ? AArch64::Q18 : AArch64::D18)
1692 .Case("v19", IsVec128 ? AArch64::Q19 : AArch64::D19)
1693 .Case("v20", IsVec128 ? AArch64::Q20 : AArch64::D20)
1694 .Case("v21", IsVec128 ? AArch64::Q21 : AArch64::D21)
1695 .Case("v22", IsVec128 ? AArch64::Q22 : AArch64::D22)
1696 .Case("v23", IsVec128 ? AArch64::Q23 : AArch64::D23)
1697 .Case("v24", IsVec128 ? AArch64::Q24 : AArch64::D24)
1698 .Case("v25", IsVec128 ? AArch64::Q25 : AArch64::D25)
1699 .Case("v26", IsVec128 ? AArch64::Q26 : AArch64::D26)
1700 .Case("v27", IsVec128 ? AArch64::Q27 : AArch64::D27)
1701 .Case("v28", IsVec128 ? AArch64::Q28 : AArch64::D28)
1702 .Case("v29", IsVec128 ? AArch64::Q29 : AArch64::D29)
1703 .Case("v30", IsVec128 ? AArch64::Q30 : AArch64::D30)
1704 .Case("v31", IsVec128 ? AArch64::Q31 : AArch64::D31)
1705 .Default(AArch64::NoRegister);
1707 if (RegNum == AArch64::NoRegister)
1713 AArch64AsmParser::OperandMatchResultTy
1714 AArch64AsmParser::ParseRegister(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
1715 uint32_t &NumLanes) {
1718 SMLoc RegEndLoc, LayoutLoc;
1719 SMLoc S = Parser.getTok().getLoc();
1721 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1722 return MatchOperand_NoMatch;
1724 Operands.push_back(AArch64Operand::CreateReg(RegNum, S, RegEndLoc));
1726 if (Layout.size() != 0) {
1727 unsigned long long TmpLanes = 0;
1728 llvm::getAsUnsignedInteger(Layout.substr(1), 10, TmpLanes);
1729 if (TmpLanes != 0) {
1730 NumLanes = TmpLanes;
1732 // If the number of lanes isn't specified explicitly, a valid instruction
1733 // will have an element specifier and be capable of acting on the entire
1735 switch (Layout.back()) {
1736 default: llvm_unreachable("Invalid layout specifier");
1737 case 'b': NumLanes = 16; break;
1738 case 'h': NumLanes = 8; break;
1739 case 's': NumLanes = 4; break;
1740 case 'd': NumLanes = 2; break;
1741 case 'q': NumLanes = 1; break;
1745 Operands.push_back(AArch64Operand::CreateToken(Layout, LayoutLoc));
1749 return MatchOperand_Success;
1753 AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1755 // This callback is used for things like DWARF frame directives in
1756 // assembly. They don't care about things like NEON layouts or lanes, they
1757 // just want to be able to produce the DWARF register number.
1758 StringRef LayoutSpec;
1759 SMLoc RegEndLoc, LayoutLoc;
1760 StartLoc = Parser.getTok().getLoc();
1762 if (!IdentifyRegister(RegNo, RegEndLoc, LayoutSpec, LayoutLoc))
1766 EndLoc = Parser.getTok().getLoc();
1771 AArch64AsmParser::OperandMatchResultTy
1772 AArch64AsmParser::ParseNamedImmOperand(const NamedImmMapper &Mapper,
1773 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1774 // Since these operands occur in very limited circumstances, without
1775 // alternatives, we actually signal an error if there is no match. If relaxing
1776 // this, beware of unintended consequences: an immediate will be accepted
1777 // during matching, no matter how it gets into the AArch64Operand.
1778 const AsmToken &Tok = Parser.getTok();
1779 SMLoc S = Tok.getLoc();
1781 if (Tok.is(AsmToken::Identifier)) {
1783 uint32_t Code = Mapper.fromString(Tok.getString().lower(), ValidName);
1786 Error(S, "operand specifier not recognised");
1787 return MatchOperand_ParseFail;
1790 Parser.Lex(); // We're done with the identifier. Eat it
1792 SMLoc E = Parser.getTok().getLoc();
1793 const MCExpr *Imm = MCConstantExpr::Create(Code, getContext());
1794 Operands.push_back(AArch64Operand::CreateImm(Imm, S, E));
1795 return MatchOperand_Success;
1796 } else if (Tok.is(AsmToken::Hash)) {
1799 const MCExpr *ImmVal;
1800 if (ParseImmediate(ImmVal) != MatchOperand_Success)
1801 return MatchOperand_ParseFail;
1803 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
1804 if (!CE || CE->getValue() < 0 || !Mapper.validImm(CE->getValue())) {
1805 Error(S, "Invalid immediate for instruction");
1806 return MatchOperand_ParseFail;
1809 SMLoc E = Parser.getTok().getLoc();
1810 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E));
1811 return MatchOperand_Success;
1814 Error(S, "unexpected operand for instruction");
1815 return MatchOperand_ParseFail;
1818 AArch64AsmParser::OperandMatchResultTy
1819 AArch64AsmParser::ParseSysRegOperand(
1820 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1821 const AsmToken &Tok = Parser.getTok();
1823 // Any MSR/MRS operand will be an identifier, and we want to store it as some
1824 // kind of string: SPSel is valid for two different forms of MSR with two
1825 // different encodings. There's no collision at the moment, but the potential
1827 if (!Tok.is(AsmToken::Identifier)) {
1828 return MatchOperand_NoMatch;
1831 SMLoc S = Tok.getLoc();
1832 Operands.push_back(AArch64Operand::CreateSysReg(Tok.getString(), S));
1833 Parser.Lex(); // Eat identifier
1835 return MatchOperand_Success;
1838 AArch64AsmParser::OperandMatchResultTy
1839 AArch64AsmParser::ParseLSXAddressOperand(
1840 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1841 SMLoc S = Parser.getTok().getLoc();
1844 SMLoc RegEndLoc, LayoutLoc;
1846 if(!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)
1847 || !AArch64MCRegisterClasses[AArch64::GPR64xspRegClassID].contains(RegNum)
1848 || Layout.size() != 0) {
1849 // Check Layout.size because we don't want to let "x3.4s" or similar
1851 return MatchOperand_NoMatch;
1853 Parser.Lex(); // Eat register
1855 if (Parser.getTok().is(AsmToken::RBrac)) {
1857 SMLoc E = Parser.getTok().getLoc();
1858 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1859 return MatchOperand_Success;
1862 // Otherwise, only ", #0" is valid
1864 if (Parser.getTok().isNot(AsmToken::Comma)) {
1865 Error(Parser.getTok().getLoc(), "expected ',' or ']' after register");
1866 return MatchOperand_ParseFail;
1868 Parser.Lex(); // Eat ','
1870 if (Parser.getTok().isNot(AsmToken::Hash)) {
1871 Error(Parser.getTok().getLoc(), "expected '#0'");
1872 return MatchOperand_ParseFail;
1874 Parser.Lex(); // Eat '#'
1876 if (Parser.getTok().isNot(AsmToken::Integer)
1877 || Parser.getTok().getIntVal() != 0 ) {
1878 Error(Parser.getTok().getLoc(), "expected '#0'");
1879 return MatchOperand_ParseFail;
1881 Parser.Lex(); // Eat '0'
1883 SMLoc E = Parser.getTok().getLoc();
1884 Operands.push_back(AArch64Operand::CreateWrappedReg(RegNum, S, E));
1885 return MatchOperand_Success;
1888 AArch64AsmParser::OperandMatchResultTy
1889 AArch64AsmParser::ParseShiftExtend(
1890 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
1891 StringRef IDVal = Parser.getTok().getIdentifier();
1892 std::string LowerID = IDVal.lower();
1894 A64SE::ShiftExtSpecifiers Spec =
1895 StringSwitch<A64SE::ShiftExtSpecifiers>(LowerID)
1896 .Case("lsl", A64SE::LSL)
1897 .Case("msl", A64SE::MSL)
1898 .Case("lsr", A64SE::LSR)
1899 .Case("asr", A64SE::ASR)
1900 .Case("ror", A64SE::ROR)
1901 .Case("uxtb", A64SE::UXTB)
1902 .Case("uxth", A64SE::UXTH)
1903 .Case("uxtw", A64SE::UXTW)
1904 .Case("uxtx", A64SE::UXTX)
1905 .Case("sxtb", A64SE::SXTB)
1906 .Case("sxth", A64SE::SXTH)
1907 .Case("sxtw", A64SE::SXTW)
1908 .Case("sxtx", A64SE::SXTX)
1909 .Default(A64SE::Invalid);
1911 if (Spec == A64SE::Invalid)
1912 return MatchOperand_NoMatch;
1916 S = Parser.getTok().getLoc();
1919 if (Spec != A64SE::LSL && Spec != A64SE::LSR && Spec != A64SE::ASR &&
1920 Spec != A64SE::ROR && Spec != A64SE::MSL) {
1921 // The shift amount can be omitted for the extending versions, but not real
1923 // add x0, x0, x0, uxtb
1924 // is valid, and equivalent to
1925 // add x0, x0, x0, uxtb #0
1927 if (Parser.getTok().is(AsmToken::Comma) ||
1928 Parser.getTok().is(AsmToken::EndOfStatement) ||
1929 Parser.getTok().is(AsmToken::RBrac)) {
1930 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, 0, true,
1932 return MatchOperand_Success;
1936 // Eat # at beginning of immediate
1937 if (!Parser.getTok().is(AsmToken::Hash)) {
1938 Error(Parser.getTok().getLoc(),
1939 "expected #imm after shift specifier");
1940 return MatchOperand_ParseFail;
1944 // Make sure we do actually have a number
1945 if (!Parser.getTok().is(AsmToken::Integer)) {
1946 Error(Parser.getTok().getLoc(),
1947 "expected integer shift amount");
1948 return MatchOperand_ParseFail;
1950 unsigned Amount = Parser.getTok().getIntVal();
1952 E = Parser.getTok().getLoc();
1954 Operands.push_back(AArch64Operand::CreateShiftExtend(Spec, Amount, false,
1957 return MatchOperand_Success;
1960 /// Try to parse a vector register token, If it is a vector register,
1961 /// the token is eaten and return true. Otherwise return false.
1962 bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc,
1963 StringRef &Layout, SMLoc &LayoutLoc) {
1964 bool IsVector = true;
1966 if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc))
1968 else if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID]
1969 .contains(RegNum) &&
1970 !AArch64MCRegisterClasses[AArch64::FPR128RegClassID]
1973 else if (Layout.size() == 0)
1977 Error(Parser.getTok().getLoc(), "expected vector type register");
1979 Parser.Lex(); // Eat this token.
1984 // A vector list contains 1-4 consecutive registers.
1985 // Now there are two kinds of vector list when number of vector > 1:
1986 // (1) {Vn.layout, Vn+1.layout, ... , Vm.layout}
1987 // (2) {Vn.layout - Vm.layout}
1988 // If the layout is like .b/.h/.s/.d, also parse the lane.
1989 AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList(
1990 SmallVectorImpl<MCParsedAsmOperand *> &Operands) {
1991 if (Parser.getTok().isNot(AsmToken::LCurly)) {
1992 Error(Parser.getTok().getLoc(), "'{' expected");
1993 return MatchOperand_ParseFail;
1995 SMLoc SLoc = Parser.getTok().getLoc();
1996 Parser.Lex(); // Eat '{' token.
1998 unsigned Reg, Count = 1;
1999 StringRef LayoutStr;
2000 SMLoc RegEndLoc, LayoutLoc;
2001 if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc))
2002 return MatchOperand_ParseFail;
2004 if (Parser.getTok().is(AsmToken::Minus)) {
2005 Parser.Lex(); // Eat the minus.
2008 StringRef LayoutStr2;
2009 SMLoc RegEndLoc2, LayoutLoc2;
2010 SMLoc RegLoc2 = Parser.getTok().getLoc();
2012 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2013 return MatchOperand_ParseFail;
2014 unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg);
2016 if (LayoutStr != LayoutStr2) {
2017 Error(LayoutLoc2, "expected the same vector layout");
2018 return MatchOperand_ParseFail;
2020 if (Space == 0 || Space > 3) {
2021 Error(RegLoc2, "invalid number of vectors");
2022 return MatchOperand_ParseFail;
2027 unsigned LastReg = Reg;
2028 while (Parser.getTok().is(AsmToken::Comma)) {
2029 Parser.Lex(); // Eat the comma.
2031 StringRef LayoutStr2;
2032 SMLoc RegEndLoc2, LayoutLoc2;
2033 SMLoc RegLoc2 = Parser.getTok().getLoc();
2035 if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2))
2036 return MatchOperand_ParseFail;
2037 unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg)
2038 : (Reg2 + 32 - LastReg);
2041 // The space between two vectors should be 1. And they should have the same layout.
2042 // Total count shouldn't be great than 4
2044 Error(RegLoc2, "invalid space between two vectors");
2045 return MatchOperand_ParseFail;
2047 if (LayoutStr != LayoutStr2) {
2048 Error(LayoutLoc2, "expected the same vector layout");
2049 return MatchOperand_ParseFail;
2052 Error(RegLoc2, "invalid number of vectors");
2053 return MatchOperand_ParseFail;
2060 if (Parser.getTok().isNot(AsmToken::RCurly)) {
2061 Error(Parser.getTok().getLoc(), "'}' expected");
2062 return MatchOperand_ParseFail;
2064 SMLoc ELoc = Parser.getTok().getLoc();
2065 Parser.Lex(); // Eat '}' token.
2067 A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr);
2068 if (Count > 1) { // If count > 1, create vector list using super register.
2069 bool IsVec64 = (Layout < A64Layout::VL_16B);
2070 static unsigned SupRegIDs[3][2] = {
2071 { AArch64::QPairRegClassID, AArch64::DPairRegClassID },
2072 { AArch64::QTripleRegClassID, AArch64::DTripleRegClassID },
2073 { AArch64::QQuadRegClassID, AArch64::DQuadRegClassID }
2075 unsigned SupRegID = SupRegIDs[Count - 2][static_cast<int>(IsVec64)];
2076 unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0;
2077 const MCRegisterInfo *MRI = getContext().getRegisterInfo();
2078 Reg = MRI->getMatchingSuperReg(Reg, Sub0,
2079 &AArch64MCRegisterClasses[SupRegID]);
2082 AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc));
2084 if (Parser.getTok().is(AsmToken::LBrac)) {
2085 uint32_t NumLanes = 0;
2087 case A64Layout::VL_B : NumLanes = 16; break;
2088 case A64Layout::VL_H : NumLanes = 8; break;
2089 case A64Layout::VL_S : NumLanes = 4; break;
2090 case A64Layout::VL_D : NumLanes = 2; break;
2092 SMLoc Loc = getLexer().getLoc();
2093 Error(Loc, "expected comma before next operand");
2094 return MatchOperand_ParseFail;
2096 return ParseNEONLane(Operands, NumLanes);
2098 return MatchOperand_Success;
2102 // FIXME: We would really like to be able to tablegen'erate this.
2103 bool AArch64AsmParser::
2104 validateInstruction(MCInst &Inst,
2105 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2106 switch (Inst.getOpcode()) {
2107 case AArch64::BFIwwii:
2108 case AArch64::BFIxxii:
2109 case AArch64::SBFIZwwii:
2110 case AArch64::SBFIZxxii:
2111 case AArch64::UBFIZwwii:
2112 case AArch64::UBFIZxxii: {
2113 unsigned ImmOps = Inst.getNumOperands() - 2;
2114 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2115 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2117 if (ImmR != 0 && ImmS >= ImmR) {
2118 return Error(Operands[4]->getStartLoc(),
2119 "requested insert overflows register");
2123 case AArch64::BFXILwwii:
2124 case AArch64::BFXILxxii:
2125 case AArch64::SBFXwwii:
2126 case AArch64::SBFXxxii:
2127 case AArch64::UBFXwwii:
2128 case AArch64::UBFXxxii: {
2129 unsigned ImmOps = Inst.getNumOperands() - 2;
2130 int64_t ImmR = Inst.getOperand(ImmOps).getImm();
2131 int64_t ImmS = Inst.getOperand(ImmOps+1).getImm();
2132 int64_t RegWidth = 0;
2133 switch (Inst.getOpcode()) {
2134 case AArch64::SBFXxxii: case AArch64::UBFXxxii: case AArch64::BFXILxxii:
2137 case AArch64::SBFXwwii: case AArch64::UBFXwwii: case AArch64::BFXILwwii:
2142 if (ImmS >= RegWidth || ImmS < ImmR) {
2143 return Error(Operands[4]->getStartLoc(),
2144 "requested extract overflows register");
2148 case AArch64::ICix: {
2149 int64_t ImmVal = Inst.getOperand(0).getImm();
2150 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2151 if (!A64IC::NeedsRegister(ICOp)) {
2152 return Error(Operands[1]->getStartLoc(),
2153 "specified IC op does not use a register");
2157 case AArch64::ICi: {
2158 int64_t ImmVal = Inst.getOperand(0).getImm();
2159 A64IC::ICValues ICOp = static_cast<A64IC::ICValues>(ImmVal);
2160 if (A64IC::NeedsRegister(ICOp)) {
2161 return Error(Operands[1]->getStartLoc(),
2162 "specified IC op requires a register");
2166 case AArch64::TLBIix: {
2167 int64_t ImmVal = Inst.getOperand(0).getImm();
2168 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2169 if (!A64TLBI::NeedsRegister(TLBIOp)) {
2170 return Error(Operands[1]->getStartLoc(),
2171 "specified TLBI op does not use a register");
2175 case AArch64::TLBIi: {
2176 int64_t ImmVal = Inst.getOperand(0).getImm();
2177 A64TLBI::TLBIValues TLBIOp = static_cast<A64TLBI::TLBIValues>(ImmVal);
2178 if (A64TLBI::NeedsRegister(TLBIOp)) {
2179 return Error(Operands[1]->getStartLoc(),
2180 "specified TLBI op requires a register");
2190 // Parses the instruction *together with* all operands, appending each parsed
2191 // operand to the "Operands" list
2192 bool AArch64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2193 StringRef Name, SMLoc NameLoc,
2194 SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2195 size_t CondCodePos = Name.find('.');
2197 StringRef Mnemonic = Name.substr(0, CondCodePos);
2198 Operands.push_back(AArch64Operand::CreateToken(Mnemonic, NameLoc));
2200 if (CondCodePos != StringRef::npos) {
2201 // We have a condition code
2202 SMLoc S = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 1);
2203 StringRef CondStr = Name.substr(CondCodePos + 1, StringRef::npos);
2204 A64CC::CondCodes Code;
2206 Code = A64StringToCondCode(CondStr);
2208 if (Code == A64CC::Invalid) {
2209 Error(S, "invalid condition code");
2210 Parser.eatToEndOfStatement();
2214 SMLoc DotL = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos);
2216 Operands.push_back(AArch64Operand::CreateToken(".", DotL));
2217 SMLoc E = SMLoc::getFromPointer(NameLoc.getPointer() + CondCodePos + 3);
2218 Operands.push_back(AArch64Operand::CreateCondCode(Code, S, E));
2221 // Now we parse the operands of this instruction
2222 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2223 // Read the first operand.
2224 if (ParseOperand(Operands, Mnemonic)) {
2225 Parser.eatToEndOfStatement();
2229 while (getLexer().is(AsmToken::Comma)) {
2230 Parser.Lex(); // Eat the comma.
2232 // Parse and remember the operand.
2233 if (ParseOperand(Operands, Mnemonic)) {
2234 Parser.eatToEndOfStatement();
2239 // After successfully parsing some operands there are two special cases to
2240 // consider (i.e. notional operands not separated by commas). Both are due
2241 // to memory specifiers:
2242 // + An RBrac will end an address for load/store/prefetch
2243 // + An '!' will indicate a pre-indexed operation.
2245 // It's someone else's responsibility to make sure these tokens are sane
2246 // in the given context!
2247 if (Parser.getTok().is(AsmToken::RBrac)) {
2248 SMLoc Loc = Parser.getTok().getLoc();
2249 Operands.push_back(AArch64Operand::CreateToken("]", Loc));
2253 if (Parser.getTok().is(AsmToken::Exclaim)) {
2254 SMLoc Loc = Parser.getTok().getLoc();
2255 Operands.push_back(AArch64Operand::CreateToken("!", Loc));
2261 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2262 SMLoc Loc = getLexer().getLoc();
2263 Parser.eatToEndOfStatement();
2264 return Error(Loc, "expected comma before next operand");
2267 // Eat the EndOfStatement
2273 bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
2274 StringRef IDVal = DirectiveID.getIdentifier();
2275 if (IDVal == ".hword")
2276 return ParseDirectiveWord(2, DirectiveID.getLoc());
2277 else if (IDVal == ".word")
2278 return ParseDirectiveWord(4, DirectiveID.getLoc());
2279 else if (IDVal == ".xword")
2280 return ParseDirectiveWord(8, DirectiveID.getLoc());
2281 else if (IDVal == ".tlsdesccall")
2282 return ParseDirectiveTLSDescCall(DirectiveID.getLoc());
2287 /// parseDirectiveWord
2288 /// ::= .word [ expression (, expression)* ]
2289 bool AArch64AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
2290 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2292 const MCExpr *Value;
2293 if (getParser().parseExpression(Value))
2296 getParser().getStreamer().EmitValue(Value, Size);
2298 if (getLexer().is(AsmToken::EndOfStatement))
2301 // FIXME: Improve diagnostic.
2302 if (getLexer().isNot(AsmToken::Comma))
2303 return Error(L, "unexpected token in directive");
2312 // parseDirectiveTLSDescCall:
2313 // ::= .tlsdesccall symbol
2314 bool AArch64AsmParser::ParseDirectiveTLSDescCall(SMLoc L) {
2316 if (getParser().parseIdentifier(Name))
2317 return Error(L, "expected symbol after directive");
2319 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
2320 const MCSymbolRefExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
2323 Inst.setOpcode(AArch64::TLSDESCCALL);
2324 Inst.addOperand(MCOperand::CreateExpr(Expr));
2326 getParser().getStreamer().EmitInstruction(Inst);
2331 bool AArch64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2332 SmallVectorImpl<MCParsedAsmOperand*> &Operands,
2333 MCStreamer &Out, unsigned &ErrorInfo,
2334 bool MatchingInlineAsm) {
2336 unsigned MatchResult;
2337 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
2340 if (ErrorInfo != ~0U && ErrorInfo >= Operands.size())
2341 return Error(IDLoc, "too few operands for instruction");
2343 switch (MatchResult) {
2346 if (validateInstruction(Inst, Operands))
2349 Out.EmitInstruction(Inst);
2351 case Match_MissingFeature:
2352 Error(IDLoc, "instruction requires a CPU feature not currently enabled");
2354 case Match_InvalidOperand: {
2355 SMLoc ErrorLoc = IDLoc;
2356 if (ErrorInfo != ~0U) {
2357 ErrorLoc = ((AArch64Operand*)Operands[ErrorInfo])->getStartLoc();
2358 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
2361 return Error(ErrorLoc, "invalid operand for instruction");
2363 case Match_MnemonicFail:
2364 return Error(IDLoc, "invalid instruction");
2366 case Match_AddSubRegExtendSmall:
2367 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2368 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
2369 case Match_AddSubRegExtendLarge:
2370 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2371 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
2372 case Match_AddSubRegShift32:
2373 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2374 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
2375 case Match_AddSubRegShift64:
2376 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2377 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
2378 case Match_AddSubSecondSource:
2379 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2380 "expected compatible register, symbol or integer in range [0, 4095]");
2381 case Match_CVTFixedPos32:
2382 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2383 "expected integer in range [1, 32]");
2384 case Match_CVTFixedPos64:
2385 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2386 "expected integer in range [1, 64]");
2387 case Match_CondCode:
2388 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2389 "expected AArch64 condition code");
2391 // Any situation which allows a nontrivial floating-point constant also
2392 // allows a register.
2393 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2394 "expected compatible register or floating-point constant");
2396 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2397 "expected floating-point constant #0.0 or invalid register type");
2399 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2400 "expected label or encodable integer pc offset");
2402 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2403 "expected lane specifier '[1]'");
2404 case Match_LoadStoreExtend32_1:
2405 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2406 "expected 'uxtw' or 'sxtw' with optional shift of #0");
2407 case Match_LoadStoreExtend32_2:
2408 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2409 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
2410 case Match_LoadStoreExtend32_4:
2411 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2412 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
2413 case Match_LoadStoreExtend32_8:
2414 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2415 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
2416 case Match_LoadStoreExtend32_16:
2417 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2418 "expected 'lsl' or 'sxtw' with optional shift of #0 or #4");
2419 case Match_LoadStoreExtend64_1:
2420 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2421 "expected 'lsl' or 'sxtx' with optional shift of #0");
2422 case Match_LoadStoreExtend64_2:
2423 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2424 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
2425 case Match_LoadStoreExtend64_4:
2426 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2427 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
2428 case Match_LoadStoreExtend64_8:
2429 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2430 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
2431 case Match_LoadStoreExtend64_16:
2432 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2433 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
2434 case Match_LoadStoreSImm7_4:
2435 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2436 "expected integer multiple of 4 in range [-256, 252]");
2437 case Match_LoadStoreSImm7_8:
2438 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2439 "expected integer multiple of 8 in range [-512, 508]");
2440 case Match_LoadStoreSImm7_16:
2441 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2442 "expected integer multiple of 16 in range [-1024, 1016]");
2443 case Match_LoadStoreSImm9:
2444 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2445 "expected integer in range [-256, 255]");
2446 case Match_LoadStoreUImm12_1:
2447 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2448 "expected symbolic reference or integer in range [0, 4095]");
2449 case Match_LoadStoreUImm12_2:
2450 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2451 "expected symbolic reference or integer in range [0, 8190]");
2452 case Match_LoadStoreUImm12_4:
2453 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2454 "expected symbolic reference or integer in range [0, 16380]");
2455 case Match_LoadStoreUImm12_8:
2456 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2457 "expected symbolic reference or integer in range [0, 32760]");
2458 case Match_LoadStoreUImm12_16:
2459 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2460 "expected symbolic reference or integer in range [0, 65520]");
2461 case Match_LogicalSecondSource:
2462 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2463 "expected compatible register or logical immediate");
2464 case Match_MOVWUImm16:
2465 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2466 "expected relocated symbol or integer in range [0, 65535]");
2468 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2469 "expected readable system register");
2471 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2472 "expected writable system register or pstate");
2473 case Match_NamedImm_at:
2474 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2475 "expected symbolic 'at' operand: s1e[0-3][rw] or s12e[01][rw]");
2476 case Match_NamedImm_dbarrier:
2477 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2478 "expected integer in range [0, 15] or symbolic barrier operand");
2479 case Match_NamedImm_dc:
2480 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2481 "expected symbolic 'dc' operand");
2482 case Match_NamedImm_ic:
2483 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2484 "expected 'ic' operand: 'ialluis', 'iallu' or 'ivau'");
2485 case Match_NamedImm_isb:
2486 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2487 "expected integer in range [0, 15] or 'sy'");
2488 case Match_NamedImm_prefetch:
2489 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2490 "expected prefetch hint: p(ld|st|i)l[123](strm|keep)");
2491 case Match_NamedImm_tlbi:
2492 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2493 "expected translation buffer invalidation operand");
2495 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2496 "expected integer in range [0, 65535]");
2498 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2499 "expected integer in range [0, 7]");
2501 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2502 "expected integer in range [0, 15]");
2504 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2505 "expected integer in range [0, 31]");
2507 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2508 "expected integer in range [0, 63]");
2510 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2511 "expected integer in range [0, 127]");
2513 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2514 "expected integer in range [<lsb>, 31]");
2516 return Error(((AArch64Operand*)Operands[ErrorInfo])->getStartLoc(),
2517 "expected integer in range [<lsb>, 63]");
2519 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2520 "expected integer in range [1, 8]");
2521 case Match_ShrImm16:
2522 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2523 "expected integer in range [1, 16]");
2524 case Match_ShrImm32:
2525 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2526 "expected integer in range [1, 32]");
2527 case Match_ShrImm64:
2528 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2529 "expected integer in range [1, 64]");
2531 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2532 "expected integer in range [0, 7]");
2533 case Match_ShlImm16:
2534 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2535 "expected integer in range [0, 15]");
2536 case Match_ShlImm32:
2537 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2538 "expected integer in range [0, 31]");
2539 case Match_ShlImm64:
2540 return Error(((AArch64Operand *)Operands[ErrorInfo])->getStartLoc(),
2541 "expected integer in range [0, 63]");
2544 llvm_unreachable("Implement any new match types added!");
2548 void AArch64Operand::print(raw_ostream &OS) const {
2551 OS << "<CondCode: " << CondCode.Code << ">";
2554 OS << "<fpimm: " << FPImm.Val << ">";
2557 OS << "<immwithlsl: imm=" << ImmWithLSL.Val
2558 << ", shift=" << ImmWithLSL.ShiftAmount << ">";
2561 getImm()->print(OS);
2564 OS << "<register " << getReg() << '>';
2567 OS << '\'' << getToken() << '\'';
2570 OS << "<shift: type=" << ShiftExtend.ShiftType
2571 << ", amount=" << ShiftExtend.Amount << ">";
2574 StringRef Name(SysReg.Data, SysReg.Length);
2575 OS << "<sysreg: " << Name << '>';
2579 llvm_unreachable("No idea how to print this kind of operand");
2584 void AArch64Operand::dump() const {
2589 /// Force static initialization.
2590 extern "C" void LLVMInitializeAArch64AsmParser() {
2591 RegisterMCAsmParser<AArch64AsmParser> X(TheAArch64Target);
2594 #define GET_REGISTER_MATCHER
2595 #define GET_MATCHER_IMPLEMENTATION
2596 #include "AArch64GenAsmMatcher.inc"