1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64BaseInfo.h"
12 #include "MCTargetDesc/ARM64MCExpr.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
80 unsigned &ErrorInfo, bool MatchingInlineAsm);
81 /// @name Auto-generated Match Functions
84 #define GET_ASSEMBLER_HEADER
85 #include "ARM64GenAsmMatcher.inc"
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseCPSRField(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
94 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
95 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum ARM64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "ARM64GenAsmMatcher.inc"
106 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII)
108 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
109 MCAsmParserExtension::Initialize(_Parser);
112 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
113 SMLoc NameLoc, OperandVector &Operands);
114 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
115 virtual bool ParseDirective(AsmToken DirectiveID);
116 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
118 static bool classifySymbolRef(const MCExpr *Expr,
119 ARM64MCExpr::VariantKind &ELFRefKind,
120 MCSymbolRefExpr::VariantKind &DarwinRefKind,
121 const MCConstantExpr *&Addend);
123 } // end anonymous namespace
127 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
129 class ARM64Operand : public MCParsedAsmOperand {
132 ImmediateOffset, // pre-indexed, no writeback
133 RegisterOffset // register offset, with optional extend
154 SMLoc StartLoc, EndLoc, OffsetLoc;
159 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
167 struct VectorListOp {
170 unsigned NumElements;
171 unsigned ElementKind;
174 struct VectorIndexOp {
183 unsigned Val; // Encoded 8-bit representation.
187 unsigned Val; // Not the enum since not all values have names.
190 struct SystemRegisterOp {
191 // 16-bit immediate, usually from the ARM64SYS::SystermRegister enum,
192 // but not limited to those values.
197 ARM64SYS::CPSRField Field;
216 // This is for all forms of ARM64 address expressions
218 unsigned BaseRegNum, OffsetRegNum;
219 ARM64_AM::ExtendType ExtType;
222 const MCExpr *OffsetImm;
229 struct VectorListOp VectorList;
230 struct VectorIndexOp VectorIndex;
232 struct FPImmOp FPImm;
233 struct BarrierOp Barrier;
234 struct SystemRegisterOp SystemRegister;
235 struct CPSRFieldOp CPSRField;
236 struct SysCRImmOp SysCRImm;
237 struct PrefetchOp Prefetch;
238 struct ShifterOp Shifter;
239 struct ExtendOp Extend;
243 // Keep the MCContext around as the MCExprs may need manipulated during
244 // the add<>Operands() calls.
247 ARM64Operand(KindTy K, MCContext &_Ctx)
248 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
251 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
253 StartLoc = o.StartLoc;
268 case k_SystemRegister:
269 SystemRegister = o.SystemRegister;
272 CPSRField = o.CPSRField;
278 VectorList = o.VectorList;
281 VectorIndex = o.VectorIndex;
284 SysCRImm = o.SysCRImm;
287 Prefetch = o.Prefetch;
301 /// getStartLoc - Get the location of the first token of this operand.
302 SMLoc getStartLoc() const { return StartLoc; }
303 /// getEndLoc - Get the location of the last token of this operand.
304 SMLoc getEndLoc() const { return EndLoc; }
305 /// getOffsetLoc - Get the location of the offset of this memory operand.
306 SMLoc getOffsetLoc() const { return OffsetLoc; }
308 StringRef getToken() const {
309 assert(Kind == k_Token && "Invalid access!");
310 return StringRef(Tok.Data, Tok.Length);
313 bool isTokenSuffix() const {
314 assert(Kind == k_Token && "Invalid access!");
318 const MCExpr *getImm() const {
319 assert(Kind == k_Immediate && "Invalid access!");
323 unsigned getFPImm() const {
324 assert(Kind == k_FPImm && "Invalid access!");
328 unsigned getBarrier() const {
329 assert(Kind == k_Barrier && "Invalid access!");
333 uint16_t getSystemRegister() const {
334 assert(Kind == k_SystemRegister && "Invalid access!");
335 return SystemRegister.Val;
338 ARM64SYS::CPSRField getCPSRField() const {
339 assert(Kind == k_CPSRField && "Invalid access!");
340 return CPSRField.Field;
343 unsigned getReg() const {
344 assert(Kind == k_Register && "Invalid access!");
348 unsigned getVectorListStart() const {
349 assert(Kind == k_VectorList && "Invalid access!");
350 return VectorList.RegNum;
353 unsigned getVectorListCount() const {
354 assert(Kind == k_VectorList && "Invalid access!");
355 return VectorList.Count;
358 unsigned getVectorIndex() const {
359 assert(Kind == k_VectorIndex && "Invalid access!");
360 return VectorIndex.Val;
363 unsigned getSysCR() const {
364 assert(Kind == k_SysCR && "Invalid access!");
368 unsigned getPrefetch() const {
369 assert(Kind == k_Prefetch && "Invalid access!");
373 unsigned getShifter() const {
374 assert(Kind == k_Shifter && "Invalid access!");
378 unsigned getExtend() const {
379 assert(Kind == k_Extend && "Invalid access!");
383 bool isImm() const { return Kind == k_Immediate; }
384 bool isSImm9() const {
387 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
390 int64_t Val = MCE->getValue();
391 return (Val >= -256 && Val < 256);
393 bool isSImm7s4() const {
396 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
399 int64_t Val = MCE->getValue();
400 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
402 bool isSImm7s8() const {
405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
408 int64_t Val = MCE->getValue();
409 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
411 bool isSImm7s16() const {
414 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
417 int64_t Val = MCE->getValue();
418 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
420 bool isImm0_7() const {
423 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
426 int64_t Val = MCE->getValue();
427 return (Val >= 0 && Val < 8);
429 bool isImm1_8() const {
432 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
435 int64_t Val = MCE->getValue();
436 return (Val > 0 && Val < 9);
438 bool isImm0_15() const {
441 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
444 int64_t Val = MCE->getValue();
445 return (Val >= 0 && Val < 16);
447 bool isImm1_16() const {
450 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
453 int64_t Val = MCE->getValue();
454 return (Val > 0 && Val < 17);
456 bool isImm0_31() const {
459 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
462 int64_t Val = MCE->getValue();
463 return (Val >= 0 && Val < 32);
465 bool isImm1_31() const {
468 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
471 int64_t Val = MCE->getValue();
472 return (Val >= 1 && Val < 32);
474 bool isImm1_32() const {
477 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
480 int64_t Val = MCE->getValue();
481 return (Val >= 1 && Val < 33);
483 bool isImm0_63() const {
486 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
489 int64_t Val = MCE->getValue();
490 return (Val >= 0 && Val < 64);
492 bool isImm1_63() const {
495 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
498 int64_t Val = MCE->getValue();
499 return (Val >= 1 && Val < 64);
501 bool isImm1_64() const {
504 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
507 int64_t Val = MCE->getValue();
508 return (Val >= 1 && Val < 65);
510 bool isImm0_127() const {
513 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
516 int64_t Val = MCE->getValue();
517 return (Val >= 0 && Val < 128);
519 bool isImm0_255() const {
522 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
525 int64_t Val = MCE->getValue();
526 return (Val >= 0 && Val < 256);
528 bool isImm0_65535() const {
531 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
534 int64_t Val = MCE->getValue();
535 return (Val >= 0 && Val < 65536);
537 bool isLogicalImm32() const {
540 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
543 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
545 bool isLogicalImm64() const {
548 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
551 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
553 bool isSIMDImmType10() const {
556 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
559 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
561 bool isBranchTarget26() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
570 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
572 bool isBranchTarget19() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
581 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
583 bool isBranchTarget14() const {
586 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
589 int64_t Val = MCE->getValue();
592 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
595 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
599 ARM64MCExpr::VariantKind ELFRefKind;
600 MCSymbolRefExpr::VariantKind DarwinRefKind;
601 const MCConstantExpr *Addend;
602 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
606 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
609 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
610 if (ELFRefKind == AllowedModifiers[i])
617 bool isMovZSymbolG3() const {
618 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
619 return isMovWSymbol(Variants);
622 bool isMovZSymbolG2() const {
623 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
624 ARM64MCExpr::VK_TPREL_G2,
625 ARM64MCExpr::VK_DTPREL_G2 };
626 return isMovWSymbol(Variants);
629 bool isMovZSymbolG1() const {
630 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
631 ARM64MCExpr::VK_GOTTPREL_G1,
632 ARM64MCExpr::VK_TPREL_G1,
633 ARM64MCExpr::VK_DTPREL_G1, };
634 return isMovWSymbol(Variants);
637 bool isMovZSymbolG0() const {
638 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
639 ARM64MCExpr::VK_TPREL_G0,
640 ARM64MCExpr::VK_DTPREL_G0 };
641 return isMovWSymbol(Variants);
644 bool isMovKSymbolG2() const {
645 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
646 return isMovWSymbol(Variants);
649 bool isMovKSymbolG1() const {
650 static ARM64MCExpr::VariantKind Variants[] = {
651 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
652 ARM64MCExpr::VK_DTPREL_G1_NC
654 return isMovWSymbol(Variants);
657 bool isMovKSymbolG0() const {
658 static ARM64MCExpr::VariantKind Variants[] = {
659 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
660 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
662 return isMovWSymbol(Variants);
665 bool isFPImm() const { return Kind == k_FPImm; }
666 bool isBarrier() const { return Kind == k_Barrier; }
667 bool isSystemRegister() const {
668 if (Kind == k_SystemRegister)
670 // SPSel is legal for both the system register and the CPSR-field
671 // variants of MSR, so special case that. Fugly.
672 return (Kind == k_CPSRField && getCPSRField() == ARM64SYS::cpsr_SPSel);
674 bool isSystemCPSRField() const { return Kind == k_CPSRField; }
675 bool isReg() const { return Kind == k_Register && !Reg.isVector; }
676 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
678 /// Is this a vector list with the type implicit (presumably attached to the
679 /// instruction itself)?
680 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
681 return Kind == k_VectorList && VectorList.Count == NumRegs &&
682 !VectorList.ElementKind;
685 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
686 bool isTypedVectorList() const {
687 if (Kind != k_VectorList)
689 if (VectorList.Count != NumRegs)
691 if (VectorList.ElementKind != ElementKind)
693 return VectorList.NumElements == NumElements;
696 bool isVectorIndexB() const {
697 return Kind == k_VectorIndex && VectorIndex.Val < 16;
699 bool isVectorIndexH() const {
700 return Kind == k_VectorIndex && VectorIndex.Val < 8;
702 bool isVectorIndexS() const {
703 return Kind == k_VectorIndex && VectorIndex.Val < 4;
705 bool isVectorIndexD() const {
706 return Kind == k_VectorIndex && VectorIndex.Val < 2;
708 bool isToken() const { return Kind == k_Token; }
709 bool isTokenEqual(StringRef Str) const {
710 return Kind == k_Token && getToken() == Str;
712 bool isMem() const { return Kind == k_Memory; }
713 bool isSysCR() const { return Kind == k_SysCR; }
714 bool isPrefetch() const { return Kind == k_Prefetch; }
715 bool isShifter() const { return Kind == k_Shifter; }
716 bool isExtend() const {
717 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
719 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
720 return ST == ARM64_AM::LSL;
722 return Kind == k_Extend;
724 bool isExtend64() const {
725 if (Kind != k_Extend)
727 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
728 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
729 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
731 bool isExtendLSL64() const {
732 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
734 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
735 return ST == ARM64_AM::LSL;
737 if (Kind != k_Extend)
739 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
740 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
743 bool isArithmeticShifter() const {
747 // An arithmetic shifter is LSL, LSR, or ASR.
748 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
749 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
752 bool isMovImm32Shifter() const {
756 // A MOVi shifter is LSL of 0, 16, 32, or 48.
757 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
758 if (ST != ARM64_AM::LSL)
760 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
761 return (Val == 0 || Val == 16);
764 bool isMovImm64Shifter() const {
768 // A MOVi shifter is LSL of 0 or 16.
769 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
770 if (ST != ARM64_AM::LSL)
772 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
773 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
776 bool isAddSubShifter() const {
780 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
781 unsigned Val = Shifter.Val;
782 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
783 (ARM64_AM::getShiftValue(Val) == 0 ||
784 ARM64_AM::getShiftValue(Val) == 12);
787 bool isLogicalVecShifter() const {
791 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
792 unsigned Val = Shifter.Val;
793 unsigned Shift = ARM64_AM::getShiftValue(Val);
794 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
795 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
798 bool isLogicalVecHalfWordShifter() const {
799 if (!isLogicalVecShifter())
802 // A logical vector shifter is a left shift by 0 or 8.
803 unsigned Val = Shifter.Val;
804 unsigned Shift = ARM64_AM::getShiftValue(Val);
805 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
806 (Shift == 0 || Shift == 8);
809 bool isMoveVecShifter() const {
813 // A logical vector shifter is a left shift by 8 or 16.
814 unsigned Val = Shifter.Val;
815 unsigned Shift = ARM64_AM::getShiftValue(Val);
816 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
817 (Shift == 8 || Shift == 16);
820 bool isMemoryRegisterOffset8() const {
821 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
824 bool isMemoryRegisterOffset16() const {
825 return isMem() && Mem.Mode == RegisterOffset &&
826 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
829 bool isMemoryRegisterOffset32() const {
830 return isMem() && Mem.Mode == RegisterOffset &&
831 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
834 bool isMemoryRegisterOffset64() const {
835 return isMem() && Mem.Mode == RegisterOffset &&
836 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
839 bool isMemoryRegisterOffset128() const {
840 return isMem() && Mem.Mode == RegisterOffset &&
841 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
844 bool isMemoryUnscaled() const {
847 if (Mem.Mode != ImmediateOffset)
851 // Make sure the immediate value is valid.
852 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
855 // The offset must fit in a signed 9-bit unscaled immediate.
856 int64_t Value = CE->getValue();
857 return (Value >= -256 && Value < 256);
859 // Fallback unscaled operands are for aliases of LDR/STR that fall back
860 // to LDUR/STUR when the offset is not legal for the former but is for
861 // the latter. As such, in addition to checking for being a legal unscaled
862 // address, also check that it is not a legal scaled address. This avoids
863 // ambiguity in the matcher.
864 bool isMemoryUnscaledFB8() const {
865 return isMemoryUnscaled() && !isMemoryIndexed8();
867 bool isMemoryUnscaledFB16() const {
868 return isMemoryUnscaled() && !isMemoryIndexed16();
870 bool isMemoryUnscaledFB32() const {
871 return isMemoryUnscaled() && !isMemoryIndexed32();
873 bool isMemoryUnscaledFB64() const {
874 return isMemoryUnscaled() && !isMemoryIndexed64();
876 bool isMemoryUnscaledFB128() const {
877 return isMemoryUnscaled() && !isMemoryIndexed128();
879 bool isMemoryIndexed(unsigned Scale) const {
882 if (Mem.Mode != ImmediateOffset)
886 // Make sure the immediate value is valid.
887 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
890 // The offset must be a positive multiple of the scale and in range of
891 // encoding with a 12-bit immediate.
892 int64_t Value = CE->getValue();
893 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
896 // If it's not a constant, check for some expressions we know.
897 const MCExpr *Expr = Mem.OffsetImm;
898 ARM64MCExpr::VariantKind ELFRefKind;
899 MCSymbolRefExpr::VariantKind DarwinRefKind;
900 const MCConstantExpr *Addend;
901 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
903 // If we don't understand the expression, assume the best and
904 // let the fixup and relocation code deal with it.
908 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
909 ELFRefKind == ARM64MCExpr::VK_LO12 ||
910 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
911 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
912 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
913 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
914 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
915 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
916 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
917 // Note that we don't range-check the addend. It's adjusted modulo page
918 // size when converted, so there is no "out of range" condition when using
920 int64_t Value = Addend ? Addend->getValue() : 0;
921 return Value >= 0 && (Value % Scale) == 0;
922 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
923 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
924 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
930 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
931 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
932 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
933 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
934 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
935 bool isMemoryNoIndex() const {
938 if (Mem.Mode != ImmediateOffset)
943 // Make sure the immediate value is valid. Only zero is allowed.
944 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
945 if (!CE || CE->getValue() != 0)
949 bool isMemorySIMDNoIndex() const {
952 if (Mem.Mode != ImmediateOffset)
954 return Mem.OffsetImm == 0;
956 bool isMemoryIndexedSImm9() const {
957 if (!isMem() || Mem.Mode != ImmediateOffset)
961 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
962 assert(CE && "Non-constant pre-indexed offset!");
963 int64_t Value = CE->getValue();
964 return Value >= -256 && Value <= 255;
966 bool isMemoryIndexed32SImm7() const {
967 if (!isMem() || Mem.Mode != ImmediateOffset)
971 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
972 assert(CE && "Non-constant pre-indexed offset!");
973 int64_t Value = CE->getValue();
974 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
976 bool isMemoryIndexed64SImm7() const {
977 if (!isMem() || Mem.Mode != ImmediateOffset)
981 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
982 assert(CE && "Non-constant pre-indexed offset!");
983 int64_t Value = CE->getValue();
984 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
986 bool isMemoryIndexed128SImm7() const {
987 if (!isMem() || Mem.Mode != ImmediateOffset)
991 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
992 assert(CE && "Non-constant pre-indexed offset!");
993 int64_t Value = CE->getValue();
994 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
997 bool isAdrpLabel() const {
998 // Validation was handled during parsing, so we just sanity check that
999 // something didn't go haywire.
1003 bool isAdrLabel() const {
1004 // Validation was handled during parsing, so we just sanity check that
1005 // something didn't go haywire.
1009 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1010 // Add as immediates when possible. Null MCExpr = 0.
1012 Inst.addOperand(MCOperand::CreateImm(0));
1013 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1014 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1016 Inst.addOperand(MCOperand::CreateExpr(Expr));
1019 void addRegOperands(MCInst &Inst, unsigned N) const {
1020 assert(N == 1 && "Invalid number of operands!");
1021 Inst.addOperand(MCOperand::CreateReg(getReg()));
1024 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1025 assert(N == 1 && "Invalid number of operands!");
1026 Inst.addOperand(MCOperand::CreateReg(getReg()));
1029 template <unsigned NumRegs>
1030 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1031 assert(N == 1 && "Invalid number of operands!");
1032 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1033 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1034 unsigned FirstReg = FirstRegs[NumRegs - 1];
1037 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1040 template <unsigned NumRegs>
1041 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1042 assert(N == 1 && "Invalid number of operands!");
1043 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1044 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1045 unsigned FirstReg = FirstRegs[NumRegs - 1];
1048 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1051 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1052 assert(N == 1 && "Invalid number of operands!");
1053 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1056 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1057 assert(N == 1 && "Invalid number of operands!");
1058 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1061 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1062 assert(N == 1 && "Invalid number of operands!");
1063 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1066 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1067 assert(N == 1 && "Invalid number of operands!");
1068 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1071 void addImmOperands(MCInst &Inst, unsigned N) const {
1072 assert(N == 1 && "Invalid number of operands!");
1073 // If this is a pageoff symrefexpr with an addend, adjust the addend
1074 // to be only the page-offset portion. Otherwise, just add the expr
1076 addExpr(Inst, getImm());
1079 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1080 addImmOperands(Inst, N);
1083 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1084 addImmOperands(Inst, N);
1087 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1088 assert(N == 1 && "Invalid number of operands!");
1089 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1090 assert(MCE && "Invalid constant immediate operand!");
1091 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1094 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!");
1096 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1097 assert(MCE && "Invalid constant immediate operand!");
1098 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1101 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1102 assert(N == 1 && "Invalid number of operands!");
1103 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1104 assert(MCE && "Invalid constant immediate operand!");
1105 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1108 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1109 assert(N == 1 && "Invalid number of operands!");
1110 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1111 assert(MCE && "Invalid constant immediate operand!");
1112 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1115 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1116 assert(N == 1 && "Invalid number of operands!");
1117 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1118 assert(MCE && "Invalid constant immediate operand!");
1119 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1122 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1123 assert(N == 1 && "Invalid number of operands!");
1124 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1125 assert(MCE && "Invalid constant immediate operand!");
1126 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1129 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1130 assert(N == 1 && "Invalid number of operands!");
1131 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1132 assert(MCE && "Invalid constant immediate operand!");
1133 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1136 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1137 assert(N == 1 && "Invalid number of operands!");
1138 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1139 assert(MCE && "Invalid constant immediate operand!");
1140 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1143 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1144 assert(N == 1 && "Invalid number of operands!");
1145 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1146 assert(MCE && "Invalid constant immediate operand!");
1147 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1150 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1151 assert(N == 1 && "Invalid number of operands!");
1152 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1153 assert(MCE && "Invalid constant immediate operand!");
1154 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1157 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1158 assert(N == 1 && "Invalid number of operands!");
1159 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1160 assert(MCE && "Invalid constant immediate operand!");
1161 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1164 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1166 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1167 assert(MCE && "Invalid constant immediate operand!");
1168 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1171 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1172 assert(N == 1 && "Invalid number of operands!");
1173 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1174 assert(MCE && "Invalid constant immediate operand!");
1175 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1178 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1179 assert(N == 1 && "Invalid number of operands!");
1180 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1181 assert(MCE && "Invalid constant immediate operand!");
1182 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1185 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1186 assert(N == 1 && "Invalid number of operands!");
1187 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1188 assert(MCE && "Invalid constant immediate operand!");
1189 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1192 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1193 assert(N == 1 && "Invalid number of operands!");
1194 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1195 assert(MCE && "Invalid constant immediate operand!");
1196 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1199 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1200 assert(N == 1 && "Invalid number of operands!");
1201 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1202 assert(MCE && "Invalid constant immediate operand!");
1203 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1206 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1207 assert(N == 1 && "Invalid number of operands!");
1208 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1209 assert(MCE && "Invalid logical immediate operand!");
1210 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1211 Inst.addOperand(MCOperand::CreateImm(encoding));
1214 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1215 assert(N == 1 && "Invalid number of operands!");
1216 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1217 assert(MCE && "Invalid logical immediate operand!");
1218 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1219 Inst.addOperand(MCOperand::CreateImm(encoding));
1222 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1223 assert(N == 1 && "Invalid number of operands!");
1224 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1225 assert(MCE && "Invalid immediate operand!");
1226 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1227 Inst.addOperand(MCOperand::CreateImm(encoding));
1230 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1231 // Branch operands don't encode the low bits, so shift them off
1232 // here. If it's a label, however, just put it on directly as there's
1233 // not enough information now to do anything.
1234 assert(N == 1 && "Invalid number of operands!");
1235 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1237 addExpr(Inst, getImm());
1240 assert(MCE && "Invalid constant immediate operand!");
1241 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1244 void addBranchTarget19Operands(MCInst &Inst, unsigned N) const {
1245 // Branch operands don't encode the low bits, so shift them off
1246 // here. If it's a label, however, just put it on directly as there's
1247 // not enough information now to do anything.
1248 assert(N == 1 && "Invalid number of operands!");
1249 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1251 addExpr(Inst, getImm());
1254 assert(MCE && "Invalid constant immediate operand!");
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1258 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1259 // Branch operands don't encode the low bits, so shift them off
1260 // here. If it's a label, however, just put it on directly as there's
1261 // not enough information now to do anything.
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1265 addExpr(Inst, getImm());
1268 assert(MCE && "Invalid constant immediate operand!");
1269 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1272 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1273 assert(N == 1 && "Invalid number of operands!");
1274 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1277 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1278 assert(N == 1 && "Invalid number of operands!");
1279 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1282 void addSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1283 assert(N == 1 && "Invalid number of operands!");
1284 if (Kind == k_SystemRegister)
1285 Inst.addOperand(MCOperand::CreateImm(getSystemRegister()));
1287 assert(Kind == k_CPSRField && getCPSRField() == ARM64SYS::cpsr_SPSel);
1288 Inst.addOperand(MCOperand::CreateImm(ARM64SYS::SPSel));
1292 void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
1293 assert(N == 1 && "Invalid number of operands!");
1294 Inst.addOperand(MCOperand::CreateImm(getCPSRField()));
1297 void addSysCROperands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
1299 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1302 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1303 assert(N == 1 && "Invalid number of operands!");
1304 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1307 void addShifterOperands(MCInst &Inst, unsigned N) const {
1308 assert(N == 1 && "Invalid number of operands!");
1309 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1312 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1313 assert(N == 1 && "Invalid number of operands!");
1314 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1317 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1318 assert(N == 1 && "Invalid number of operands!");
1319 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1322 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1323 assert(N == 1 && "Invalid number of operands!");
1324 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1327 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1328 assert(N == 1 && "Invalid number of operands!");
1329 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1332 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1333 assert(N == 1 && "Invalid number of operands!");
1334 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1337 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1338 assert(N == 1 && "Invalid number of operands!");
1339 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1342 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1343 assert(N == 1 && "Invalid number of operands!");
1344 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1347 void addExtendOperands(MCInst &Inst, unsigned N) const {
1348 assert(N == 1 && "Invalid number of operands!");
1349 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1351 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1352 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1353 ARM64_AM::getShiftValue(getShifter()));
1354 Inst.addOperand(MCOperand::CreateImm(imm));
1356 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1359 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1360 assert(N == 1 && "Invalid number of operands!");
1361 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1364 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1365 assert(N == 1 && "Invalid number of operands!");
1366 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1368 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1369 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1370 ARM64_AM::getShiftValue(getShifter()));
1371 Inst.addOperand(MCOperand::CreateImm(imm));
1373 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1376 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1377 assert(N == 3 && "Invalid number of operands!");
1379 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1380 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum));
1381 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1382 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1385 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1386 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1389 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1390 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1393 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1394 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1397 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1398 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1401 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1402 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1405 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1406 unsigned Scale) const {
1407 // Add the base register operand.
1408 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1410 if (!Mem.OffsetImm) {
1411 // There isn't an offset.
1412 Inst.addOperand(MCOperand::CreateImm(0));
1416 // Add the offset operand.
1417 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1418 assert(CE->getValue() % Scale == 0 &&
1419 "Offset operand must be multiple of the scale!");
1421 // The MCInst offset operand doesn't include the low bits (like the
1422 // instruction encoding).
1423 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1426 // If this is a pageoff symrefexpr with an addend, the linker will
1427 // do the scaling of the addend.
1429 // Otherwise we don't know what this is, so just add the scaling divide to
1430 // the expression and let the MC fixup evaluation code deal with it.
1431 const MCExpr *Expr = Mem.OffsetImm;
1432 ARM64MCExpr::VariantKind ELFRefKind;
1433 MCSymbolRefExpr::VariantKind DarwinRefKind;
1434 const MCConstantExpr *Addend;
1436 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1438 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1439 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1443 Inst.addOperand(MCOperand::CreateExpr(Expr));
1446 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1447 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1448 // Add the base register operand.
1449 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1451 // Add the offset operand.
1453 Inst.addOperand(MCOperand::CreateImm(0));
1455 // Only constant offsets supported.
1456 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1457 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1461 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1462 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1463 addMemoryIndexedOperands(Inst, N, 16);
1466 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1467 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1468 addMemoryIndexedOperands(Inst, N, 8);
1471 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1472 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1473 addMemoryIndexedOperands(Inst, N, 4);
1476 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1477 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1478 addMemoryIndexedOperands(Inst, N, 2);
1481 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1482 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1483 addMemoryIndexedOperands(Inst, N, 1);
1486 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1487 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1488 // Add the base register operand (the offset is always zero, so ignore it).
1489 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1492 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1493 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1494 // Add the base register operand (the offset is always zero, so ignore it).
1495 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1498 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1499 unsigned Scale) const {
1500 assert(N == 2 && "Invalid number of operands!");
1502 // Add the base register operand.
1503 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1505 // Add the offset operand.
1507 if (Mem.OffsetImm) {
1508 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1509 assert(CE && "Non-constant indexed offset operand!");
1510 Offset = CE->getValue();
1514 assert(Offset % Scale == 0 &&
1515 "Offset operand must be a multiple of the scale!");
1519 Inst.addOperand(MCOperand::CreateImm(Offset));
1522 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1523 addMemoryWritebackIndexedOperands(Inst, N, 1);
1526 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1527 addMemoryWritebackIndexedOperands(Inst, N, 4);
1530 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1531 addMemoryWritebackIndexedOperands(Inst, N, 8);
1534 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1535 addMemoryWritebackIndexedOperands(Inst, N, 16);
1538 virtual void print(raw_ostream &OS) const;
1540 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1542 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1543 Op->Tok.Data = Str.data();
1544 Op->Tok.Length = Str.size();
1545 Op->Tok.IsSuffix = IsSuffix;
1551 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1552 SMLoc E, MCContext &Ctx) {
1553 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1554 Op->Reg.RegNum = RegNum;
1555 Op->Reg.isVector = isVector;
1561 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1562 unsigned NumElements, char ElementKind,
1563 SMLoc S, SMLoc E, MCContext &Ctx) {
1564 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1565 Op->VectorList.RegNum = RegNum;
1566 Op->VectorList.Count = Count;
1567 Op->VectorList.NumElements = NumElements;
1568 Op->VectorList.ElementKind = ElementKind;
1574 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1576 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1577 Op->VectorIndex.Val = Idx;
1583 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1585 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1592 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1593 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1594 Op->FPImm.Val = Val;
1600 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1601 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1602 Op->Barrier.Val = Val;
1608 static ARM64Operand *CreateSystemRegister(uint16_t Val, SMLoc S,
1610 ARM64Operand *Op = new ARM64Operand(k_SystemRegister, Ctx);
1611 Op->SystemRegister.Val = Val;
1617 static ARM64Operand *CreateCPSRField(ARM64SYS::CPSRField Field, SMLoc S,
1619 ARM64Operand *Op = new ARM64Operand(k_CPSRField, Ctx);
1620 Op->CPSRField.Field = Field;
1626 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1627 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1629 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1630 Op->Mem.BaseRegNum = BaseRegNum;
1631 Op->Mem.OffsetRegNum = 0;
1632 Op->Mem.OffsetImm = Off;
1633 Op->Mem.ExtType = ARM64_AM::UXTX;
1634 Op->Mem.ShiftVal = 0;
1635 Op->Mem.ExplicitShift = false;
1636 Op->Mem.Mode = ImmediateOffset;
1637 Op->OffsetLoc = OffsetLoc;
1643 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1644 ARM64_AM::ExtendType ExtType,
1645 unsigned ShiftVal, bool ExplicitShift,
1646 SMLoc S, SMLoc E, MCContext &Ctx) {
1647 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1648 Op->Mem.BaseRegNum = BaseReg;
1649 Op->Mem.OffsetRegNum = OffsetReg;
1650 Op->Mem.OffsetImm = 0;
1651 Op->Mem.ExtType = ExtType;
1652 Op->Mem.ShiftVal = ShiftVal;
1653 Op->Mem.ExplicitShift = ExplicitShift;
1654 Op->Mem.Mode = RegisterOffset;
1660 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1662 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1663 Op->SysCRImm.Val = Val;
1669 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1670 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1671 Op->Prefetch.Val = Val;
1677 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1678 SMLoc S, SMLoc E, MCContext &Ctx) {
1679 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1680 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1686 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1687 SMLoc S, SMLoc E, MCContext &Ctx) {
1688 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1689 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1696 } // end anonymous namespace.
1698 void ARM64Operand::print(raw_ostream &OS) const {
1701 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1706 ARM64SYS::getBarrierOptName((ARM64SYS::BarrierOption)getBarrier());
1715 case k_SystemRegister: {
1716 const char *Name = ARM64SYS::getSystemRegisterName(
1717 (ARM64SYS::SystemRegister)getSystemRegister());
1718 OS << "<systemreg ";
1722 OS << "#" << getSystemRegister();
1727 const char *Name = ARM64SYS::getCPSRFieldName(getCPSRField());
1728 OS << "<cpsrfield " << Name << ">";
1732 getImm()->print(OS);
1738 OS << "<register " << getReg() << ">";
1740 case k_VectorList: {
1741 OS << "<vectorlist ";
1742 unsigned Reg = getVectorListStart();
1743 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1744 OS << Reg + i << " ";
1749 OS << "<vectorindex " << getVectorIndex() << ">";
1752 OS << "'" << getToken() << "'";
1755 OS << "c" << getSysCR();
1759 if (ARM64_AM::isNamedPrefetchOp(getPrefetch()))
1760 OS << ARM64_AM::getPrefetchOpName((ARM64_AM::PrefetchOp)getPrefetch());
1762 OS << "#" << getPrefetch();
1766 unsigned Val = getShifter();
1767 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1768 << ARM64_AM::getShiftValue(Val) << ">";
1772 unsigned Val = getExtend();
1773 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1774 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1780 /// @name Auto-generated Match Functions
1783 static unsigned MatchRegisterName(StringRef Name);
1787 static unsigned matchVectorRegName(StringRef Name) {
1788 return StringSwitch<unsigned>(Name)
1789 .Case("v0", ARM64::Q0)
1790 .Case("v1", ARM64::Q1)
1791 .Case("v2", ARM64::Q2)
1792 .Case("v3", ARM64::Q3)
1793 .Case("v4", ARM64::Q4)
1794 .Case("v5", ARM64::Q5)
1795 .Case("v6", ARM64::Q6)
1796 .Case("v7", ARM64::Q7)
1797 .Case("v8", ARM64::Q8)
1798 .Case("v9", ARM64::Q9)
1799 .Case("v10", ARM64::Q10)
1800 .Case("v11", ARM64::Q11)
1801 .Case("v12", ARM64::Q12)
1802 .Case("v13", ARM64::Q13)
1803 .Case("v14", ARM64::Q14)
1804 .Case("v15", ARM64::Q15)
1805 .Case("v16", ARM64::Q16)
1806 .Case("v17", ARM64::Q17)
1807 .Case("v18", ARM64::Q18)
1808 .Case("v19", ARM64::Q19)
1809 .Case("v20", ARM64::Q20)
1810 .Case("v21", ARM64::Q21)
1811 .Case("v22", ARM64::Q22)
1812 .Case("v23", ARM64::Q23)
1813 .Case("v24", ARM64::Q24)
1814 .Case("v25", ARM64::Q25)
1815 .Case("v26", ARM64::Q26)
1816 .Case("v27", ARM64::Q27)
1817 .Case("v28", ARM64::Q28)
1818 .Case("v29", ARM64::Q29)
1819 .Case("v30", ARM64::Q30)
1820 .Case("v31", ARM64::Q31)
1824 static bool isValidVectorKind(StringRef Name) {
1825 return StringSwitch<bool>(Name.lower())
1835 // Accept the width neutral ones, too, for verbose syntax. If those
1836 // aren't used in the right places, the token operand won't match so
1837 // all will work out.
1845 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1846 char &ElementKind) {
1847 assert(isValidVectorKind(Name));
1849 ElementKind = Name.lower()[Name.size() - 1];
1852 if (Name.size() == 2)
1855 // Parse the lane count
1856 Name = Name.drop_front();
1857 while (isdigit(Name.front())) {
1858 NumElements = 10 * NumElements + (Name.front() - '0');
1859 Name = Name.drop_front();
1863 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1865 StartLoc = getLoc();
1866 RegNo = tryParseRegister();
1867 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1868 return (RegNo == (unsigned)-1);
1871 /// tryParseRegister - Try to parse a register name. The token must be an
1872 /// Identifier when called, and if it is a register name the token is eaten and
1873 /// the register is added to the operand list.
1874 int ARM64AsmParser::tryParseRegister() {
1875 const AsmToken &Tok = Parser.getTok();
1876 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1878 std::string lowerCase = Tok.getString().lower();
1879 unsigned RegNum = MatchRegisterName(lowerCase);
1880 // Also handle a few aliases of registers.
1882 RegNum = StringSwitch<unsigned>(lowerCase)
1883 .Case("x29", ARM64::FP)
1884 .Case("x30", ARM64::LR)
1885 .Case("x31", ARM64::XZR)
1886 .Case("w31", ARM64::WZR)
1892 Parser.Lex(); // Eat identifier token.
1896 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1897 /// kind specifier. If it is a register specifier, eat the token and return it.
1898 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind) {
1899 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1900 TokError("vector register expected");
1904 StringRef Name = Parser.getTok().getString();
1905 // If there is a kind specifier, it's separated from the register name by
1907 size_t Start = 0, Next = Name.find('.');
1908 StringRef Head = Name.slice(Start, Next);
1909 unsigned RegNum = matchVectorRegName(Head);
1911 if (Next != StringRef::npos) {
1912 Kind = Name.slice(Next, StringRef::npos);
1913 if (!isValidVectorKind(Kind)) {
1914 TokError("invalid vector kind qualifier");
1918 Parser.Lex(); // Eat the register token.
1924 static int MatchSysCRName(StringRef Name) {
1925 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1927 switch (Name.size()) {
1931 if (Name[0] != 'c' && Name[0] != 'C')
1959 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
1980 llvm_unreachable("Unhandled SysCR operand string!");
1984 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1985 ARM64AsmParser::OperandMatchResultTy
1986 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1988 const AsmToken &Tok = Parser.getTok();
1989 if (Tok.isNot(AsmToken::Identifier))
1990 return MatchOperand_NoMatch;
1992 int Num = MatchSysCRName(Tok.getString());
1994 return MatchOperand_NoMatch;
1996 Parser.Lex(); // Eat identifier token.
1997 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
1998 return MatchOperand_Success;
2001 /// tryParsePrefetch - Try to parse a prefetch operand.
2002 ARM64AsmParser::OperandMatchResultTy
2003 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2005 const AsmToken &Tok = Parser.getTok();
2006 // Either an identifier for named values or a 5-bit immediate.
2007 if (Tok.is(AsmToken::Hash)) {
2008 Parser.Lex(); // Eat hash token.
2009 const MCExpr *ImmVal;
2010 if (getParser().parseExpression(ImmVal))
2011 return MatchOperand_ParseFail;
2013 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2015 TokError("immediate value expected for prefetch operand");
2016 return MatchOperand_ParseFail;
2018 unsigned prfop = MCE->getValue();
2020 TokError("prefetch operand out of range, [0,31] expected");
2021 return MatchOperand_ParseFail;
2024 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2025 return MatchOperand_Success;
2028 if (Tok.isNot(AsmToken::Identifier)) {
2029 TokError("pre-fetch hint expected");
2030 return MatchOperand_ParseFail;
2033 unsigned prfop = StringSwitch<unsigned>(Tok.getString())
2034 .Case("pldl1keep", ARM64_AM::PLDL1KEEP)
2035 .Case("pldl1strm", ARM64_AM::PLDL1STRM)
2036 .Case("pldl2keep", ARM64_AM::PLDL2KEEP)
2037 .Case("pldl2strm", ARM64_AM::PLDL2STRM)
2038 .Case("pldl3keep", ARM64_AM::PLDL3KEEP)
2039 .Case("pldl3strm", ARM64_AM::PLDL3STRM)
2040 .Case("pstl1keep", ARM64_AM::PSTL1KEEP)
2041 .Case("pstl1strm", ARM64_AM::PSTL1STRM)
2042 .Case("pstl2keep", ARM64_AM::PSTL2KEEP)
2043 .Case("pstl2strm", ARM64_AM::PSTL2STRM)
2044 .Case("pstl3keep", ARM64_AM::PSTL3KEEP)
2045 .Case("pstl3strm", ARM64_AM::PSTL3STRM)
2047 if (prfop == 0xff) {
2048 TokError("pre-fetch hint expected");
2049 return MatchOperand_ParseFail;
2052 Parser.Lex(); // Eat identifier token.
2053 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2054 return MatchOperand_Success;
2057 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2059 ARM64AsmParser::OperandMatchResultTy
2060 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2063 if (parseSymbolicImmVal(Expr))
2064 return MatchOperand_ParseFail;
2066 ARM64MCExpr::VariantKind ELFRefKind;
2067 MCSymbolRefExpr::VariantKind DarwinRefKind;
2068 const MCConstantExpr *Addend;
2069 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2070 Error(S, "modified label reference + constant expected");
2071 return MatchOperand_ParseFail;
2074 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2075 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2076 // No modifier was specified at all; this is the syntax for an ELF basic
2077 // ADRP relocation (unfortunately).
2078 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2079 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2080 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2082 Error(S, "gotpage label reference not allowed an addend");
2083 return MatchOperand_ParseFail;
2084 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2085 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2086 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2087 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2088 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2089 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2090 // The operand must be an @page or @gotpage qualified symbolref.
2091 Error(S, "page or gotpage label reference expected");
2092 return MatchOperand_ParseFail;
2095 // We have a label reference possibly with addend. The addend is a raw value
2096 // here. The linker will adjust it to only reference the page.
2097 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2098 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2100 return MatchOperand_Success;
2103 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2105 ARM64AsmParser::OperandMatchResultTy
2106 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2109 if (getParser().parseExpression(Expr))
2110 return MatchOperand_ParseFail;
2112 // The operand must be an un-qualified assembler local symbolref.
2113 // FIXME: wrong for ELF.
2114 if (const MCSymbolRefExpr *SRE = dyn_cast<const MCSymbolRefExpr>(Expr)) {
2115 // FIXME: Should reference the MachineAsmInfo to get the private prefix.
2116 bool isTemporary = SRE->getSymbol().getName().startswith("L");
2117 if (!isTemporary || SRE->getKind() != MCSymbolRefExpr::VK_None) {
2118 Error(S, "unqualified, assembler-local label name expected");
2119 return MatchOperand_ParseFail;
2123 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2124 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2126 return MatchOperand_Success;
2129 /// tryParseFPImm - A floating point immediate expression operand.
2130 ARM64AsmParser::OperandMatchResultTy
2131 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2134 if (Parser.getTok().isNot(AsmToken::Hash))
2135 return MatchOperand_NoMatch;
2136 Parser.Lex(); // Eat the '#'.
2138 // Handle negation, as that still comes through as a separate token.
2139 bool isNegative = false;
2140 if (Parser.getTok().is(AsmToken::Minus)) {
2144 const AsmToken &Tok = Parser.getTok();
2145 if (Tok.is(AsmToken::Real)) {
2146 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2147 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2148 // If we had a '-' in front, toggle the sign bit.
2149 IntVal ^= (uint64_t)isNegative << 63;
2150 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2151 Parser.Lex(); // Eat the token.
2152 // Check for out of range values. As an exception, we let Zero through,
2153 // as we handle that special case in post-processing before matching in
2154 // order to use the zero register for it.
2155 if (Val == -1 && !RealVal.isZero()) {
2156 TokError("floating point value out of range");
2157 return MatchOperand_ParseFail;
2159 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2160 return MatchOperand_Success;
2162 if (Tok.is(AsmToken::Integer)) {
2164 if (!isNegative && Tok.getString().startswith("0x")) {
2165 Val = Tok.getIntVal();
2166 if (Val > 255 || Val < 0) {
2167 TokError("encoded floating point value out of range");
2168 return MatchOperand_ParseFail;
2171 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2172 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2173 // If we had a '-' in front, toggle the sign bit.
2174 IntVal ^= (uint64_t)isNegative << 63;
2175 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2177 Parser.Lex(); // Eat the token.
2178 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2179 return MatchOperand_Success;
2182 TokError("invalid floating point immediate");
2183 return MatchOperand_ParseFail;
2186 /// parseCondCodeString - Parse a Condition Code string.
2187 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2188 unsigned CC = StringSwitch<unsigned>(Cond)
2189 .Case("eq", ARM64CC::EQ)
2190 .Case("ne", ARM64CC::NE)
2191 .Case("cs", ARM64CC::CS)
2192 .Case("hs", ARM64CC::CS)
2193 .Case("cc", ARM64CC::CC)
2194 .Case("lo", ARM64CC::CC)
2195 .Case("mi", ARM64CC::MI)
2196 .Case("pl", ARM64CC::PL)
2197 .Case("vs", ARM64CC::VS)
2198 .Case("vc", ARM64CC::VC)
2199 .Case("hi", ARM64CC::HI)
2200 .Case("ls", ARM64CC::LS)
2201 .Case("ge", ARM64CC::GE)
2202 .Case("lt", ARM64CC::LT)
2203 .Case("gt", ARM64CC::GT)
2204 .Case("le", ARM64CC::LE)
2205 .Case("al", ARM64CC::AL)
2206 // Upper case works too. Not mixed case, though.
2207 .Case("EQ", ARM64CC::EQ)
2208 .Case("NE", ARM64CC::NE)
2209 .Case("CS", ARM64CC::CS)
2210 .Case("HS", ARM64CC::CS)
2211 .Case("CC", ARM64CC::CC)
2212 .Case("LO", ARM64CC::CC)
2213 .Case("MI", ARM64CC::MI)
2214 .Case("PL", ARM64CC::PL)
2215 .Case("VS", ARM64CC::VS)
2216 .Case("VC", ARM64CC::VC)
2217 .Case("HI", ARM64CC::HI)
2218 .Case("LS", ARM64CC::LS)
2219 .Case("GE", ARM64CC::GE)
2220 .Case("LT", ARM64CC::LT)
2221 .Case("GT", ARM64CC::GT)
2222 .Case("LE", ARM64CC::LE)
2223 .Case("AL", ARM64CC::AL)
2228 /// parseCondCode - Parse a Condition Code operand.
2229 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2230 bool invertCondCode) {
2232 const AsmToken &Tok = Parser.getTok();
2233 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2235 StringRef Cond = Tok.getString();
2236 unsigned CC = parseCondCodeString(Cond);
2238 return TokError("invalid condition code");
2239 Parser.Lex(); // Eat identifier token.
2242 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2244 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2246 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2250 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2251 /// them if present.
2252 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2253 const AsmToken &Tok = Parser.getTok();
2254 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2255 .Case("lsl", ARM64_AM::LSL)
2256 .Case("lsr", ARM64_AM::LSR)
2257 .Case("asr", ARM64_AM::ASR)
2258 .Case("ror", ARM64_AM::ROR)
2259 .Case("msl", ARM64_AM::MSL)
2260 .Case("LSL", ARM64_AM::LSL)
2261 .Case("LSR", ARM64_AM::LSR)
2262 .Case("ASR", ARM64_AM::ASR)
2263 .Case("ROR", ARM64_AM::ROR)
2264 .Case("MSL", ARM64_AM::MSL)
2265 .Default(ARM64_AM::InvalidShift);
2266 if (ShOp == ARM64_AM::InvalidShift)
2269 SMLoc S = Tok.getLoc();
2272 // We expect a number here.
2273 if (getLexer().isNot(AsmToken::Hash))
2274 return TokError("immediate value expected for shifter operand");
2275 Parser.Lex(); // Eat the '#'.
2277 SMLoc ExprLoc = getLoc();
2278 const MCExpr *ImmVal;
2279 if (getParser().parseExpression(ImmVal))
2282 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2284 return TokError("immediate value expected for shifter operand");
2286 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2287 return Error(ExprLoc, "immediate value too large for shifter operand");
2289 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2291 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2295 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2296 /// them if present.
2297 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2298 const AsmToken &Tok = Parser.getTok();
2299 ARM64_AM::ExtendType ExtOp =
2300 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2301 .Case("uxtb", ARM64_AM::UXTB)
2302 .Case("uxth", ARM64_AM::UXTH)
2303 .Case("uxtw", ARM64_AM::UXTW)
2304 .Case("uxtx", ARM64_AM::UXTX)
2305 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2306 .Case("sxtb", ARM64_AM::SXTB)
2307 .Case("sxth", ARM64_AM::SXTH)
2308 .Case("sxtw", ARM64_AM::SXTW)
2309 .Case("sxtx", ARM64_AM::SXTX)
2310 .Case("UXTB", ARM64_AM::UXTB)
2311 .Case("UXTH", ARM64_AM::UXTH)
2312 .Case("UXTW", ARM64_AM::UXTW)
2313 .Case("UXTX", ARM64_AM::UXTX)
2314 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2315 .Case("SXTB", ARM64_AM::SXTB)
2316 .Case("SXTH", ARM64_AM::SXTH)
2317 .Case("SXTW", ARM64_AM::SXTW)
2318 .Case("SXTX", ARM64_AM::SXTX)
2319 .Default(ARM64_AM::InvalidExtend);
2320 if (ExtOp == ARM64_AM::InvalidExtend)
2323 SMLoc S = Tok.getLoc();
2326 if (getLexer().is(AsmToken::EndOfStatement) ||
2327 getLexer().is(AsmToken::Comma)) {
2328 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2330 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2334 if (getLexer().isNot(AsmToken::Hash)) {
2335 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2337 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2341 Parser.Lex(); // Eat the '#'.
2343 const MCExpr *ImmVal;
2344 if (getParser().parseExpression(ImmVal))
2347 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2349 return TokError("immediate value expected for extend operand");
2351 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2353 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2357 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2358 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2359 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2360 OperandVector &Operands) {
2361 if (Name.find('.') != StringRef::npos)
2362 return TokError("invalid operand");
2366 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2368 const AsmToken &Tok = Parser.getTok();
2369 StringRef Op = Tok.getString();
2370 SMLoc S = Tok.getLoc();
2372 const MCExpr *Expr = 0;
2374 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2376 Expr = MCConstantExpr::Create(op1, getContext()); \
2377 Operands.push_back( \
2378 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2379 Operands.push_back( \
2380 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2381 Operands.push_back( \
2382 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2383 Expr = MCConstantExpr::Create(op2, getContext()); \
2384 Operands.push_back( \
2385 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2388 if (Mnemonic == "ic") {
2389 if (!Op.compare_lower("ialluis")) {
2390 // SYS #0, C7, C1, #0
2391 SYS_ALIAS(0, 7, 1, 0);
2392 } else if (!Op.compare_lower("iallu")) {
2393 // SYS #0, C7, C5, #0
2394 SYS_ALIAS(0, 7, 5, 0);
2395 } else if (!Op.compare_lower("ivau")) {
2396 // SYS #3, C7, C5, #1
2397 SYS_ALIAS(3, 7, 5, 1);
2399 return TokError("invalid operand for IC instruction");
2401 } else if (Mnemonic == "dc") {
2402 if (!Op.compare_lower("zva")) {
2403 // SYS #3, C7, C4, #1
2404 SYS_ALIAS(3, 7, 4, 1);
2405 } else if (!Op.compare_lower("ivac")) {
2406 // SYS #3, C7, C6, #1
2407 SYS_ALIAS(0, 7, 6, 1);
2408 } else if (!Op.compare_lower("isw")) {
2409 // SYS #0, C7, C6, #2
2410 SYS_ALIAS(0, 7, 6, 2);
2411 } else if (!Op.compare_lower("cvac")) {
2412 // SYS #3, C7, C10, #1
2413 SYS_ALIAS(3, 7, 10, 1);
2414 } else if (!Op.compare_lower("csw")) {
2415 // SYS #0, C7, C10, #2
2416 SYS_ALIAS(0, 7, 10, 2);
2417 } else if (!Op.compare_lower("cvau")) {
2418 // SYS #3, C7, C11, #1
2419 SYS_ALIAS(3, 7, 11, 1);
2420 } else if (!Op.compare_lower("civac")) {
2421 // SYS #3, C7, C14, #1
2422 SYS_ALIAS(3, 7, 14, 1);
2423 } else if (!Op.compare_lower("cisw")) {
2424 // SYS #0, C7, C14, #2
2425 SYS_ALIAS(0, 7, 14, 2);
2427 return TokError("invalid operand for DC instruction");
2429 } else if (Mnemonic == "at") {
2430 if (!Op.compare_lower("s1e1r")) {
2431 // SYS #0, C7, C8, #0
2432 SYS_ALIAS(0, 7, 8, 0);
2433 } else if (!Op.compare_lower("s1e2r")) {
2434 // SYS #4, C7, C8, #0
2435 SYS_ALIAS(4, 7, 8, 0);
2436 } else if (!Op.compare_lower("s1e3r")) {
2437 // SYS #6, C7, C8, #0
2438 SYS_ALIAS(6, 7, 8, 0);
2439 } else if (!Op.compare_lower("s1e1w")) {
2440 // SYS #0, C7, C8, #1
2441 SYS_ALIAS(0, 7, 8, 1);
2442 } else if (!Op.compare_lower("s1e2w")) {
2443 // SYS #4, C7, C8, #1
2444 SYS_ALIAS(4, 7, 8, 1);
2445 } else if (!Op.compare_lower("s1e3w")) {
2446 // SYS #6, C7, C8, #1
2447 SYS_ALIAS(6, 7, 8, 1);
2448 } else if (!Op.compare_lower("s1e0r")) {
2449 // SYS #0, C7, C8, #3
2450 SYS_ALIAS(0, 7, 8, 2);
2451 } else if (!Op.compare_lower("s1e0w")) {
2452 // SYS #0, C7, C8, #3
2453 SYS_ALIAS(0, 7, 8, 3);
2454 } else if (!Op.compare_lower("s12e1r")) {
2455 // SYS #4, C7, C8, #4
2456 SYS_ALIAS(4, 7, 8, 4);
2457 } else if (!Op.compare_lower("s12e1w")) {
2458 // SYS #4, C7, C8, #5
2459 SYS_ALIAS(4, 7, 8, 5);
2460 } else if (!Op.compare_lower("s12e0r")) {
2461 // SYS #4, C7, C8, #6
2462 SYS_ALIAS(4, 7, 8, 6);
2463 } else if (!Op.compare_lower("s12e0w")) {
2464 // SYS #4, C7, C8, #7
2465 SYS_ALIAS(4, 7, 8, 7);
2467 return TokError("invalid operand for AT instruction");
2469 } else if (Mnemonic == "tlbi") {
2470 if (!Op.compare_lower("vmalle1is")) {
2471 // SYS #0, C8, C3, #0
2472 SYS_ALIAS(0, 8, 3, 0);
2473 } else if (!Op.compare_lower("alle2is")) {
2474 // SYS #4, C8, C3, #0
2475 SYS_ALIAS(4, 8, 3, 0);
2476 } else if (!Op.compare_lower("alle3is")) {
2477 // SYS #6, C8, C3, #0
2478 SYS_ALIAS(6, 8, 3, 0);
2479 } else if (!Op.compare_lower("vae1is")) {
2480 // SYS #0, C8, C3, #1
2481 SYS_ALIAS(0, 8, 3, 1);
2482 } else if (!Op.compare_lower("vae2is")) {
2483 // SYS #4, C8, C3, #1
2484 SYS_ALIAS(4, 8, 3, 1);
2485 } else if (!Op.compare_lower("vae3is")) {
2486 // SYS #6, C8, C3, #1
2487 SYS_ALIAS(6, 8, 3, 1);
2488 } else if (!Op.compare_lower("aside1is")) {
2489 // SYS #0, C8, C3, #2
2490 SYS_ALIAS(0, 8, 3, 2);
2491 } else if (!Op.compare_lower("vaae1is")) {
2492 // SYS #0, C8, C3, #3
2493 SYS_ALIAS(0, 8, 3, 3);
2494 } else if (!Op.compare_lower("alle1is")) {
2495 // SYS #4, C8, C3, #4
2496 SYS_ALIAS(4, 8, 3, 4);
2497 } else if (!Op.compare_lower("vale1is")) {
2498 // SYS #0, C8, C3, #5
2499 SYS_ALIAS(0, 8, 3, 5);
2500 } else if (!Op.compare_lower("vaale1is")) {
2501 // SYS #0, C8, C3, #7
2502 SYS_ALIAS(0, 8, 3, 7);
2503 } else if (!Op.compare_lower("vmalle1")) {
2504 // SYS #0, C8, C7, #0
2505 SYS_ALIAS(0, 8, 7, 0);
2506 } else if (!Op.compare_lower("alle2")) {
2507 // SYS #4, C8, C7, #0
2508 SYS_ALIAS(4, 8, 7, 0);
2509 } else if (!Op.compare_lower("vale2is")) {
2510 // SYS #4, C8, C3, #5
2511 SYS_ALIAS(4, 8, 3, 5);
2512 } else if (!Op.compare_lower("vale3is")) {
2513 // SYS #6, C8, C3, #5
2514 SYS_ALIAS(6, 8, 3, 5);
2515 } else if (!Op.compare_lower("alle3")) {
2516 // SYS #6, C8, C7, #0
2517 SYS_ALIAS(6, 8, 7, 0);
2518 } else if (!Op.compare_lower("vae1")) {
2519 // SYS #0, C8, C7, #1
2520 SYS_ALIAS(0, 8, 7, 1);
2521 } else if (!Op.compare_lower("vae2")) {
2522 // SYS #4, C8, C7, #1
2523 SYS_ALIAS(4, 8, 7, 1);
2524 } else if (!Op.compare_lower("vae3")) {
2525 // SYS #6, C8, C7, #1
2526 SYS_ALIAS(6, 8, 7, 1);
2527 } else if (!Op.compare_lower("aside1")) {
2528 // SYS #0, C8, C7, #2
2529 SYS_ALIAS(0, 8, 7, 2);
2530 } else if (!Op.compare_lower("vaae1")) {
2531 // SYS #0, C8, C7, #3
2532 SYS_ALIAS(0, 8, 7, 3);
2533 } else if (!Op.compare_lower("alle1")) {
2534 // SYS #4, C8, C7, #4
2535 SYS_ALIAS(4, 8, 7, 4);
2536 } else if (!Op.compare_lower("vale1")) {
2537 // SYS #0, C8, C7, #5
2538 SYS_ALIAS(0, 8, 7, 5);
2539 } else if (!Op.compare_lower("vale2")) {
2540 // SYS #4, C8, C7, #5
2541 SYS_ALIAS(4, 8, 7, 5);
2542 } else if (!Op.compare_lower("vale3")) {
2543 // SYS #6, C8, C7, #5
2544 SYS_ALIAS(6, 8, 7, 5);
2545 } else if (!Op.compare_lower("vaale1")) {
2546 // SYS #0, C8, C7, #7
2547 SYS_ALIAS(0, 8, 7, 7);
2548 } else if (!Op.compare_lower("ipas2e1")) {
2549 // SYS #4, C8, C4, #1
2550 SYS_ALIAS(4, 8, 4, 1);
2551 } else if (!Op.compare_lower("ipas2le1")) {
2552 // SYS #4, C8, C4, #5
2553 SYS_ALIAS(4, 8, 4, 5);
2554 } else if (!Op.compare_lower("vmalls12e1")) {
2555 // SYS #4, C8, C7, #6
2556 SYS_ALIAS(4, 8, 7, 6);
2557 } else if (!Op.compare_lower("vmalls12e1is")) {
2558 // SYS #4, C8, C3, #6
2559 SYS_ALIAS(4, 8, 3, 6);
2561 return TokError("invalid operand for TLBI instruction");
2567 Parser.Lex(); // Eat operand.
2569 // Check for the optional register operand.
2570 if (getLexer().is(AsmToken::Comma)) {
2571 Parser.Lex(); // Eat comma.
2573 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2574 return TokError("expected register operand");
2577 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2578 Parser.eatToEndOfStatement();
2579 return TokError("unexpected token in argument list");
2582 Parser.Lex(); // Consume the EndOfStatement
2586 ARM64AsmParser::OperandMatchResultTy
2587 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2588 const AsmToken &Tok = Parser.getTok();
2590 // Can be either a #imm style literal or an option name
2591 if (Tok.is(AsmToken::Hash)) {
2592 // Immediate operand.
2593 Parser.Lex(); // Eat the '#'
2594 const MCExpr *ImmVal;
2595 SMLoc ExprLoc = getLoc();
2596 if (getParser().parseExpression(ImmVal))
2597 return MatchOperand_ParseFail;
2598 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2600 Error(ExprLoc, "immediate value expected for barrier operand");
2601 return MatchOperand_ParseFail;
2603 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2604 Error(ExprLoc, "barrier operand out of range");
2605 return MatchOperand_ParseFail;
2608 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2609 return MatchOperand_Success;
2612 if (Tok.isNot(AsmToken::Identifier)) {
2613 TokError("invalid operand for instruction");
2614 return MatchOperand_ParseFail;
2617 unsigned Opt = StringSwitch<unsigned>(Tok.getString())
2618 .Case("oshld", ARM64SYS::OSHLD)
2619 .Case("oshst", ARM64SYS::OSHST)
2620 .Case("osh", ARM64SYS::OSH)
2621 .Case("nshld", ARM64SYS::NSHLD)
2622 .Case("nshst", ARM64SYS::NSHST)
2623 .Case("nsh", ARM64SYS::NSH)
2624 .Case("ishld", ARM64SYS::ISHLD)
2625 .Case("ishst", ARM64SYS::ISHST)
2626 .Case("ish", ARM64SYS::ISH)
2627 .Case("ld", ARM64SYS::LD)
2628 .Case("st", ARM64SYS::ST)
2629 .Case("sy", ARM64SYS::SY)
2630 .Default(ARM64SYS::InvalidBarrier);
2631 if (Opt == ARM64SYS::InvalidBarrier) {
2632 TokError("invalid barrier option name");
2633 return MatchOperand_ParseFail;
2636 // The only valid named option for ISB is 'sy'
2637 if (Mnemonic == "isb" && Opt != ARM64SYS::SY) {
2638 TokError("'sy' or #imm operand expected");
2639 return MatchOperand_ParseFail;
2642 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2643 Parser.Lex(); // Consume the option
2645 return MatchOperand_Success;
2648 ARM64AsmParser::OperandMatchResultTy
2649 ARM64AsmParser::tryParseSystemRegister(OperandVector &Operands) {
2650 const AsmToken &Tok = Parser.getTok();
2652 // It can be specified as a symbolic name.
2653 if (Tok.isNot(AsmToken::Identifier))
2654 return MatchOperand_NoMatch;
2656 auto ID = Tok.getString().lower();
2657 ARM64SYS::SystemRegister Reg =
2658 StringSwitch<ARM64SYS::SystemRegister>(ID)
2659 .Case("spsr_el1", ARM64SYS::SPSR_svc)
2660 .Case("spsr_svc", ARM64SYS::SPSR_svc)
2661 .Case("elr_el1", ARM64SYS::ELR_EL1)
2662 .Case("sp_el0", ARM64SYS::SP_EL0)
2663 .Case("spsel", ARM64SYS::SPSel)
2664 .Case("daif", ARM64SYS::DAIF)
2665 .Case("currentel", ARM64SYS::CurrentEL)
2666 .Case("nzcv", ARM64SYS::NZCV)
2667 .Case("fpcr", ARM64SYS::FPCR)
2668 .Case("fpsr", ARM64SYS::FPSR)
2669 .Case("dspsr", ARM64SYS::DSPSR)
2670 .Case("dlr", ARM64SYS::DLR)
2671 .Case("spsr_el2", ARM64SYS::SPSR_hyp)
2672 .Case("spsr_hyp", ARM64SYS::SPSR_hyp)
2673 .Case("elr_el2", ARM64SYS::ELR_EL2)
2674 .Case("sp_el1", ARM64SYS::SP_EL1)
2675 .Case("spsr_irq", ARM64SYS::SPSR_irq)
2676 .Case("spsr_abt", ARM64SYS::SPSR_abt)
2677 .Case("spsr_und", ARM64SYS::SPSR_und)
2678 .Case("spsr_fiq", ARM64SYS::SPSR_fiq)
2679 .Case("spsr_el3", ARM64SYS::SPSR_EL3)
2680 .Case("elr_el3", ARM64SYS::ELR_EL3)
2681 .Case("sp_el2", ARM64SYS::SP_EL2)
2682 .Case("midr_el1", ARM64SYS::MIDR_EL1)
2683 .Case("ctr_el0", ARM64SYS::CTR_EL0)
2684 .Case("mpidr_el1", ARM64SYS::MPIDR_EL1)
2685 .Case("ecoidr_el1", ARM64SYS::ECOIDR_EL1)
2686 .Case("dczid_el0", ARM64SYS::DCZID_EL0)
2687 .Case("mvfr0_el1", ARM64SYS::MVFR0_EL1)
2688 .Case("mvfr1_el1", ARM64SYS::MVFR1_EL1)
2689 .Case("id_aa64pfr0_el1", ARM64SYS::ID_AA64PFR0_EL1)
2690 .Case("id_aa64pfr1_el1", ARM64SYS::ID_AA64PFR1_EL1)
2691 .Case("id_aa64dfr0_el1", ARM64SYS::ID_AA64DFR0_EL1)
2692 .Case("id_aa64dfr1_el1", ARM64SYS::ID_AA64DFR1_EL1)
2693 .Case("id_aa64isar0_el1", ARM64SYS::ID_AA64ISAR0_EL1)
2694 .Case("id_aa64isar1_el1", ARM64SYS::ID_AA64ISAR1_EL1)
2695 .Case("id_aa64mmfr0_el1", ARM64SYS::ID_AA64MMFR0_EL1)
2696 .Case("id_aa64mmfr1_el1", ARM64SYS::ID_AA64MMFR1_EL1)
2697 .Case("ccsidr_el1", ARM64SYS::CCSIDR_EL1)
2698 .Case("clidr_el1", ARM64SYS::CLIDR_EL1)
2699 .Case("aidr_el1", ARM64SYS::AIDR_EL1)
2700 .Case("csselr_el1", ARM64SYS::CSSELR_EL1)
2701 .Case("vpidr_el2", ARM64SYS::VPIDR_EL2)
2702 .Case("vmpidr_el2", ARM64SYS::VMPIDR_EL2)
2703 .Case("sctlr_el1", ARM64SYS::SCTLR_EL1)
2704 .Case("sctlr_el2", ARM64SYS::SCTLR_EL2)
2705 .Case("sctlr_el3", ARM64SYS::SCTLR_EL3)
2706 .Case("actlr_el1", ARM64SYS::ACTLR_EL1)
2707 .Case("actlr_el2", ARM64SYS::ACTLR_EL2)
2708 .Case("actlr_el3", ARM64SYS::ACTLR_EL3)
2709 .Case("cpacr_el1", ARM64SYS::CPACR_EL1)
2710 .Case("cptr_el2", ARM64SYS::CPTR_EL2)
2711 .Case("cptr_el3", ARM64SYS::CPTR_EL3)
2712 .Case("scr_el3", ARM64SYS::SCR_EL3)
2713 .Case("hcr_el2", ARM64SYS::HCR_EL2)
2714 .Case("mdcr_el2", ARM64SYS::MDCR_EL2)
2715 .Case("mdcr_el3", ARM64SYS::MDCR_EL3)
2716 .Case("hstr_el2", ARM64SYS::HSTR_EL2)
2717 .Case("hacr_el2", ARM64SYS::HACR_EL2)
2718 .Case("ttbr0_el1", ARM64SYS::TTBR0_EL1)
2719 .Case("ttbr1_el1", ARM64SYS::TTBR1_EL1)
2720 .Case("ttbr0_el2", ARM64SYS::TTBR0_EL2)
2721 .Case("ttbr0_el3", ARM64SYS::TTBR0_EL3)
2722 .Case("vttbr_el2", ARM64SYS::VTTBR_EL2)
2723 .Case("tcr_el1", ARM64SYS::TCR_EL1)
2724 .Case("tcr_el2", ARM64SYS::TCR_EL2)
2725 .Case("tcr_el3", ARM64SYS::TCR_EL3)
2726 .Case("vtcr_el2", ARM64SYS::VTCR_EL2)
2727 .Case("adfsr_el1", ARM64SYS::ADFSR_EL1)
2728 .Case("aifsr_el1", ARM64SYS::AIFSR_EL1)
2729 .Case("adfsr_el2", ARM64SYS::ADFSR_EL2)
2730 .Case("aifsr_el2", ARM64SYS::AIFSR_EL2)
2731 .Case("adfsr_el3", ARM64SYS::ADFSR_EL3)
2732 .Case("aifsr_el3", ARM64SYS::AIFSR_EL3)
2733 .Case("esr_el1", ARM64SYS::ESR_EL1)
2734 .Case("esr_el2", ARM64SYS::ESR_EL2)
2735 .Case("esr_el3", ARM64SYS::ESR_EL3)
2736 .Case("far_el1", ARM64SYS::FAR_EL1)
2737 .Case("far_el2", ARM64SYS::FAR_EL2)
2738 .Case("far_el3", ARM64SYS::FAR_EL3)
2739 .Case("hpfar_el2", ARM64SYS::HPFAR_EL2)
2740 .Case("par_el1", ARM64SYS::PAR_EL1)
2741 .Case("mair_el1", ARM64SYS::MAIR_EL1)
2742 .Case("mair_el2", ARM64SYS::MAIR_EL2)
2743 .Case("mair_el3", ARM64SYS::MAIR_EL3)
2744 .Case("amair_el1", ARM64SYS::AMAIR_EL1)
2745 .Case("amair_el2", ARM64SYS::AMAIR_EL2)
2746 .Case("amair_el3", ARM64SYS::AMAIR_EL3)
2747 .Case("vbar_el1", ARM64SYS::VBAR_EL1)
2748 .Case("vbar_el2", ARM64SYS::VBAR_EL2)
2749 .Case("vbar_el3", ARM64SYS::VBAR_EL3)
2750 .Case("rvbar_el1", ARM64SYS::RVBAR_EL1)
2751 .Case("rvbar_el2", ARM64SYS::RVBAR_EL2)
2752 .Case("rvbar_el3", ARM64SYS::RVBAR_EL3)
2753 .Case("isr_el1", ARM64SYS::ISR_EL1)
2754 .Case("contextidr_el1", ARM64SYS::CONTEXTIDR_EL1)
2755 .Case("tpidr_el0", ARM64SYS::TPIDR_EL0)
2756 .Case("tpidrro_el0", ARM64SYS::TPIDRRO_EL0)
2757 .Case("tpidr_el1", ARM64SYS::TPIDR_EL1)
2758 .Case("tpidr_el2", ARM64SYS::TPIDR_EL2)
2759 .Case("tpidr_el3", ARM64SYS::TPIDR_EL3)
2760 .Case("teecr32_el1", ARM64SYS::TEECR32_EL1)
2761 .Case("cntfrq_el0", ARM64SYS::CNTFRQ_EL0)
2762 .Case("cntpct_el0", ARM64SYS::CNTPCT_EL0)
2763 .Case("cntvct_el0", ARM64SYS::CNTVCT_EL0)
2764 .Case("cntvoff_el2", ARM64SYS::CNTVOFF_EL2)
2765 .Case("cntkctl_el1", ARM64SYS::CNTKCTL_EL1)
2766 .Case("cnthctl_el2", ARM64SYS::CNTHCTL_EL2)
2767 .Case("cntp_tval_el0", ARM64SYS::CNTP_TVAL_EL0)
2768 .Case("cntp_ctl_el0", ARM64SYS::CNTP_CTL_EL0)
2769 .Case("cntp_cval_el0", ARM64SYS::CNTP_CVAL_EL0)
2770 .Case("cntv_tval_el0", ARM64SYS::CNTV_TVAL_EL0)
2771 .Case("cntv_ctl_el0", ARM64SYS::CNTV_CTL_EL0)
2772 .Case("cntv_cval_el0", ARM64SYS::CNTV_CVAL_EL0)
2773 .Case("cnthp_tval_el2", ARM64SYS::CNTHP_TVAL_EL2)
2774 .Case("cnthp_ctl_el2", ARM64SYS::CNTHP_CTL_EL2)
2775 .Case("cnthp_cval_el2", ARM64SYS::CNTHP_CVAL_EL2)
2776 .Case("cntps_tval_el1", ARM64SYS::CNTPS_TVAL_EL1)
2777 .Case("cntps_ctl_el1", ARM64SYS::CNTPS_CTL_EL1)
2778 .Case("cntps_cval_el1", ARM64SYS::CNTPS_CVAL_EL1)
2779 .Case("dacr32_el2", ARM64SYS::DACR32_EL2)
2780 .Case("ifsr32_el2", ARM64SYS::IFSR32_EL2)
2781 .Case("teehbr32_el1", ARM64SYS::TEEHBR32_EL1)
2782 .Case("sder32_el3", ARM64SYS::SDER32_EL3)
2783 .Case("fpexc32_el2", ARM64SYS::FPEXC32_EL2)
2784 .Case("current_el", ARM64SYS::CurrentEL)
2785 .Case("pmevcntr0_el0", ARM64SYS::PMEVCNTR0_EL0)
2786 .Case("pmevcntr1_el0", ARM64SYS::PMEVCNTR1_EL0)
2787 .Case("pmevcntr2_el0", ARM64SYS::PMEVCNTR2_EL0)
2788 .Case("pmevcntr3_el0", ARM64SYS::PMEVCNTR3_EL0)
2789 .Case("pmevcntr4_el0", ARM64SYS::PMEVCNTR4_EL0)
2790 .Case("pmevcntr5_el0", ARM64SYS::PMEVCNTR5_EL0)
2791 .Case("pmevcntr6_el0", ARM64SYS::PMEVCNTR6_EL0)
2792 .Case("pmevcntr7_el0", ARM64SYS::PMEVCNTR7_EL0)
2793 .Case("pmevcntr8_el0", ARM64SYS::PMEVCNTR8_EL0)
2794 .Case("pmevcntr9_el0", ARM64SYS::PMEVCNTR9_EL0)
2795 .Case("pmevcntr10_el0", ARM64SYS::PMEVCNTR10_EL0)
2796 .Case("pmevcntr11_el0", ARM64SYS::PMEVCNTR11_EL0)
2797 .Case("pmevcntr12_el0", ARM64SYS::PMEVCNTR12_EL0)
2798 .Case("pmevcntr13_el0", ARM64SYS::PMEVCNTR13_EL0)
2799 .Case("pmevcntr14_el0", ARM64SYS::PMEVCNTR14_EL0)
2800 .Case("pmevcntr15_el0", ARM64SYS::PMEVCNTR15_EL0)
2801 .Case("pmevcntr16_el0", ARM64SYS::PMEVCNTR16_EL0)
2802 .Case("pmevcntr17_el0", ARM64SYS::PMEVCNTR17_EL0)
2803 .Case("pmevcntr18_el0", ARM64SYS::PMEVCNTR18_EL0)
2804 .Case("pmevcntr19_el0", ARM64SYS::PMEVCNTR19_EL0)
2805 .Case("pmevcntr20_el0", ARM64SYS::PMEVCNTR20_EL0)
2806 .Case("pmevcntr21_el0", ARM64SYS::PMEVCNTR21_EL0)
2807 .Case("pmevcntr22_el0", ARM64SYS::PMEVCNTR22_EL0)
2808 .Case("pmevcntr23_el0", ARM64SYS::PMEVCNTR23_EL0)
2809 .Case("pmevcntr24_el0", ARM64SYS::PMEVCNTR24_EL0)
2810 .Case("pmevcntr25_el0", ARM64SYS::PMEVCNTR25_EL0)
2811 .Case("pmevcntr26_el0", ARM64SYS::PMEVCNTR26_EL0)
2812 .Case("pmevcntr27_el0", ARM64SYS::PMEVCNTR27_EL0)
2813 .Case("pmevcntr28_el0", ARM64SYS::PMEVCNTR28_EL0)
2814 .Case("pmevcntr29_el0", ARM64SYS::PMEVCNTR29_EL0)
2815 .Case("pmevcntr30_el0", ARM64SYS::PMEVCNTR30_EL0)
2816 .Case("pmevtyper0_el0", ARM64SYS::PMEVTYPER0_EL0)
2817 .Case("pmevtyper1_el0", ARM64SYS::PMEVTYPER1_EL0)
2818 .Case("pmevtyper2_el0", ARM64SYS::PMEVTYPER2_EL0)
2819 .Case("pmevtyper3_el0", ARM64SYS::PMEVTYPER3_EL0)
2820 .Case("pmevtyper4_el0", ARM64SYS::PMEVTYPER4_EL0)
2821 .Case("pmevtyper5_el0", ARM64SYS::PMEVTYPER5_EL0)
2822 .Case("pmevtyper6_el0", ARM64SYS::PMEVTYPER6_EL0)
2823 .Case("pmevtyper7_el0", ARM64SYS::PMEVTYPER7_EL0)
2824 .Case("pmevtyper8_el0", ARM64SYS::PMEVTYPER8_EL0)
2825 .Case("pmevtyper9_el0", ARM64SYS::PMEVTYPER9_EL0)
2826 .Case("pmevtyper10_el0", ARM64SYS::PMEVTYPER10_EL0)
2827 .Case("pmevtyper11_el0", ARM64SYS::PMEVTYPER11_EL0)
2828 .Case("pmevtyper12_el0", ARM64SYS::PMEVTYPER12_EL0)
2829 .Case("pmevtyper13_el0", ARM64SYS::PMEVTYPER13_EL0)
2830 .Case("pmevtyper14_el0", ARM64SYS::PMEVTYPER14_EL0)
2831 .Case("pmevtyper15_el0", ARM64SYS::PMEVTYPER15_EL0)
2832 .Case("pmevtyper16_el0", ARM64SYS::PMEVTYPER16_EL0)
2833 .Case("pmevtyper17_el0", ARM64SYS::PMEVTYPER17_EL0)
2834 .Case("pmevtyper18_el0", ARM64SYS::PMEVTYPER18_EL0)
2835 .Case("pmevtyper19_el0", ARM64SYS::PMEVTYPER19_EL0)
2836 .Case("pmevtyper20_el0", ARM64SYS::PMEVTYPER20_EL0)
2837 .Case("pmevtyper21_el0", ARM64SYS::PMEVTYPER21_EL0)
2838 .Case("pmevtyper22_el0", ARM64SYS::PMEVTYPER22_EL0)
2839 .Case("pmevtyper23_el0", ARM64SYS::PMEVTYPER23_EL0)
2840 .Case("pmevtyper24_el0", ARM64SYS::PMEVTYPER24_EL0)
2841 .Case("pmevtyper25_el0", ARM64SYS::PMEVTYPER25_EL0)
2842 .Case("pmevtyper26_el0", ARM64SYS::PMEVTYPER26_EL0)
2843 .Case("pmevtyper27_el0", ARM64SYS::PMEVTYPER27_EL0)
2844 .Case("pmevtyper28_el0", ARM64SYS::PMEVTYPER28_EL0)
2845 .Case("pmevtyper29_el0", ARM64SYS::PMEVTYPER29_EL0)
2846 .Case("pmevtyper30_el0", ARM64SYS::PMEVTYPER30_EL0)
2847 .Case("pmccfiltr_el0", ARM64SYS::PMCCFILTR_EL0)
2848 .Case("rmr_el3", ARM64SYS::RMR_EL3)
2849 .Case("rmr_el2", ARM64SYS::RMR_EL2)
2850 .Case("rmr_el1", ARM64SYS::RMR_EL1)
2851 .Case("cpm_ioacc_ctl_el3", ARM64SYS::CPM_IOACC_CTL_EL3)
2852 .Case("mdccsr_el0", ARM64SYS::MDCCSR_EL0)
2853 .Case("mdccint_el1", ARM64SYS::MDCCINT_EL1)
2854 .Case("dbgdtr_el0", ARM64SYS::DBGDTR_EL0)
2855 .Case("dbgdtrrx_el0", ARM64SYS::DBGDTRRX_EL0)
2856 .Case("dbgdtrtx_el0", ARM64SYS::DBGDTRTX_EL0)
2857 .Case("dbgvcr32_el2", ARM64SYS::DBGVCR32_EL2)
2858 .Case("osdtrrx_el1", ARM64SYS::OSDTRRX_EL1)
2859 .Case("mdscr_el1", ARM64SYS::MDSCR_EL1)
2860 .Case("osdtrtx_el1", ARM64SYS::OSDTRTX_EL1)
2861 .Case("oseccr_el11", ARM64SYS::OSECCR_EL11)
2862 .Case("dbgbvr0_el1", ARM64SYS::DBGBVR0_EL1)
2863 .Case("dbgbvr1_el1", ARM64SYS::DBGBVR1_EL1)
2864 .Case("dbgbvr2_el1", ARM64SYS::DBGBVR2_EL1)
2865 .Case("dbgbvr3_el1", ARM64SYS::DBGBVR3_EL1)
2866 .Case("dbgbvr4_el1", ARM64SYS::DBGBVR4_EL1)
2867 .Case("dbgbvr5_el1", ARM64SYS::DBGBVR5_EL1)
2868 .Case("dbgbvr6_el1", ARM64SYS::DBGBVR6_EL1)
2869 .Case("dbgbvr7_el1", ARM64SYS::DBGBVR7_EL1)
2870 .Case("dbgbvr8_el1", ARM64SYS::DBGBVR8_EL1)
2871 .Case("dbgbvr9_el1", ARM64SYS::DBGBVR9_EL1)
2872 .Case("dbgbvr10_el1", ARM64SYS::DBGBVR10_EL1)
2873 .Case("dbgbvr11_el1", ARM64SYS::DBGBVR11_EL1)
2874 .Case("dbgbvr12_el1", ARM64SYS::DBGBVR12_EL1)
2875 .Case("dbgbvr13_el1", ARM64SYS::DBGBVR13_EL1)
2876 .Case("dbgbvr14_el1", ARM64SYS::DBGBVR14_EL1)
2877 .Case("dbgbvr15_el1", ARM64SYS::DBGBVR15_EL1)
2878 .Case("dbgbcr0_el1", ARM64SYS::DBGBCR0_EL1)
2879 .Case("dbgbcr1_el1", ARM64SYS::DBGBCR1_EL1)
2880 .Case("dbgbcr2_el1", ARM64SYS::DBGBCR2_EL1)
2881 .Case("dbgbcr3_el1", ARM64SYS::DBGBCR3_EL1)
2882 .Case("dbgbcr4_el1", ARM64SYS::DBGBCR4_EL1)
2883 .Case("dbgbcr5_el1", ARM64SYS::DBGBCR5_EL1)
2884 .Case("dbgbcr6_el1", ARM64SYS::DBGBCR6_EL1)
2885 .Case("dbgbcr7_el1", ARM64SYS::DBGBCR7_EL1)
2886 .Case("dbgbcr8_el1", ARM64SYS::DBGBCR8_EL1)
2887 .Case("dbgbcr9_el1", ARM64SYS::DBGBCR9_EL1)
2888 .Case("dbgbcr10_el1", ARM64SYS::DBGBCR10_EL1)
2889 .Case("dbgbcr11_el1", ARM64SYS::DBGBCR11_EL1)
2890 .Case("dbgbcr12_el1", ARM64SYS::DBGBCR12_EL1)
2891 .Case("dbgbcr13_el1", ARM64SYS::DBGBCR13_EL1)
2892 .Case("dbgbcr14_el1", ARM64SYS::DBGBCR14_EL1)
2893 .Case("dbgbcr15_el1", ARM64SYS::DBGBCR15_EL1)
2894 .Case("dbgwvr0_el1", ARM64SYS::DBGWVR0_EL1)
2895 .Case("dbgwvr1_el1", ARM64SYS::DBGWVR1_EL1)
2896 .Case("dbgwvr2_el1", ARM64SYS::DBGWVR2_EL1)
2897 .Case("dbgwvr3_el1", ARM64SYS::DBGWVR3_EL1)
2898 .Case("dbgwvr4_el1", ARM64SYS::DBGWVR4_EL1)
2899 .Case("dbgwvr5_el1", ARM64SYS::DBGWVR5_EL1)
2900 .Case("dbgwvr6_el1", ARM64SYS::DBGWVR6_EL1)
2901 .Case("dbgwvr7_el1", ARM64SYS::DBGWVR7_EL1)
2902 .Case("dbgwvr8_el1", ARM64SYS::DBGWVR8_EL1)
2903 .Case("dbgwvr9_el1", ARM64SYS::DBGWVR9_EL1)
2904 .Case("dbgwvr10_el1", ARM64SYS::DBGWVR10_EL1)
2905 .Case("dbgwvr11_el1", ARM64SYS::DBGWVR11_EL1)
2906 .Case("dbgwvr12_el1", ARM64SYS::DBGWVR12_EL1)
2907 .Case("dbgwvr13_el1", ARM64SYS::DBGWVR13_EL1)
2908 .Case("dbgwvr14_el1", ARM64SYS::DBGWVR14_EL1)
2909 .Case("dbgwvr15_el1", ARM64SYS::DBGWVR15_EL1)
2910 .Case("dbgwcr0_el1", ARM64SYS::DBGWCR0_EL1)
2911 .Case("dbgwcr1_el1", ARM64SYS::DBGWCR1_EL1)
2912 .Case("dbgwcr2_el1", ARM64SYS::DBGWCR2_EL1)
2913 .Case("dbgwcr3_el1", ARM64SYS::DBGWCR3_EL1)
2914 .Case("dbgwcr4_el1", ARM64SYS::DBGWCR4_EL1)
2915 .Case("dbgwcr5_el1", ARM64SYS::DBGWCR5_EL1)
2916 .Case("dbgwcr6_el1", ARM64SYS::DBGWCR6_EL1)
2917 .Case("dbgwcr7_el1", ARM64SYS::DBGWCR7_EL1)
2918 .Case("dbgwcr8_el1", ARM64SYS::DBGWCR8_EL1)
2919 .Case("dbgwcr9_el1", ARM64SYS::DBGWCR9_EL1)
2920 .Case("dbgwcr10_el1", ARM64SYS::DBGWCR10_EL1)
2921 .Case("dbgwcr11_el1", ARM64SYS::DBGWCR11_EL1)
2922 .Case("dbgwcr12_el1", ARM64SYS::DBGWCR12_EL1)
2923 .Case("dbgwcr13_el1", ARM64SYS::DBGWCR13_EL1)
2924 .Case("dbgwcr14_el1", ARM64SYS::DBGWCR14_EL1)
2925 .Case("dbgwcr15_el1", ARM64SYS::DBGWCR15_EL1)
2926 .Case("mdrar_el1", ARM64SYS::MDRAR_EL1)
2927 .Case("oslar_el1", ARM64SYS::OSLAR_EL1)
2928 .Case("oslsr_el1", ARM64SYS::OSLSR_EL1)
2929 .Case("osdlr_el1", ARM64SYS::OSDLR_EL1)
2930 .Case("dbgprcr_el1", ARM64SYS::DBGPRCR_EL1)
2931 .Case("dbgclaimset_el1", ARM64SYS::DBGCLAIMSET_EL1)
2932 .Case("dbgclaimclr_el1", ARM64SYS::DBGCLAIMCLR_EL1)
2933 .Case("dbgauthstatus_el1", ARM64SYS::DBGAUTHSTATUS_EL1)
2934 .Case("dbgdevid2", ARM64SYS::DBGDEVID2)
2935 .Case("dbgdevid1", ARM64SYS::DBGDEVID1)
2936 .Case("dbgdevid0", ARM64SYS::DBGDEVID0)
2937 .Case("id_pfr0_el1", ARM64SYS::ID_PFR0_EL1)
2938 .Case("id_pfr1_el1", ARM64SYS::ID_PFR1_EL1)
2939 .Case("id_dfr0_el1", ARM64SYS::ID_DFR0_EL1)
2940 .Case("id_afr0_el1", ARM64SYS::ID_AFR0_EL1)
2941 .Case("id_isar0_el1", ARM64SYS::ID_ISAR0_EL1)
2942 .Case("id_isar1_el1", ARM64SYS::ID_ISAR1_EL1)
2943 .Case("id_isar2_el1", ARM64SYS::ID_ISAR2_EL1)
2944 .Case("id_isar3_el1", ARM64SYS::ID_ISAR3_EL1)
2945 .Case("id_isar4_el1", ARM64SYS::ID_ISAR4_EL1)
2946 .Case("id_isar5_el1", ARM64SYS::ID_ISAR5_EL1)
2947 .Case("afsr1_el1", ARM64SYS::AFSR1_EL1)
2948 .Case("afsr0_el1", ARM64SYS::AFSR0_EL1)
2949 .Case("revidr_el1", ARM64SYS::REVIDR_EL1)
2950 .Default(ARM64SYS::InvalidSystemReg);
2951 if (Reg != ARM64SYS::InvalidSystemReg) {
2952 // We matched a reg name, so create the operand.
2954 ARM64Operand::CreateSystemRegister(Reg, getLoc(), getContext()));
2955 Parser.Lex(); // Consume the register name.
2956 return MatchOperand_Success;
2959 // Or we may have an identifier that encodes the sub-operands.
2960 // For example, s3_2_c15_c0_0.
2961 unsigned op0, op1, CRn, CRm, op2;
2962 std::string Desc = ID;
2963 if (std::sscanf(Desc.c_str(), "s%u_%u_c%u_c%u_%u", &op0, &op1, &CRn, &CRm,
2965 return MatchOperand_NoMatch;
2966 if ((op0 != 2 && op0 != 3) || op1 > 7 || CRn > 15 || CRm > 15 || op2 > 7)
2967 return MatchOperand_NoMatch;
2969 unsigned Val = op0 << 14 | op1 << 11 | CRn << 7 | CRm << 3 | op2;
2971 ARM64Operand::CreateSystemRegister(Val, getLoc(), getContext()));
2972 Parser.Lex(); // Consume the register name.
2974 return MatchOperand_Success;
2977 ARM64AsmParser::OperandMatchResultTy
2978 ARM64AsmParser::tryParseCPSRField(OperandVector &Operands) {
2979 const AsmToken &Tok = Parser.getTok();
2981 if (Tok.isNot(AsmToken::Identifier))
2982 return MatchOperand_NoMatch;
2984 ARM64SYS::CPSRField Field =
2985 StringSwitch<ARM64SYS::CPSRField>(Tok.getString().lower())
2986 .Case("spsel", ARM64SYS::cpsr_SPSel)
2987 .Case("daifset", ARM64SYS::cpsr_DAIFSet)
2988 .Case("daifclr", ARM64SYS::cpsr_DAIFClr)
2989 .Default(ARM64SYS::InvalidCPSRField);
2990 if (Field == ARM64SYS::InvalidCPSRField)
2991 return MatchOperand_NoMatch;
2993 ARM64Operand::CreateCPSRField(Field, getLoc(), getContext()));
2994 Parser.Lex(); // Consume the register name.
2996 return MatchOperand_Success;
2999 /// tryParseVectorRegister - Parse a vector register operand.
3000 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
3001 if (Parser.getTok().isNot(AsmToken::Identifier))
3005 // Check for a vector register specifier first.
3007 int64_t Reg = tryMatchVectorRegister(Kind);
3011 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
3012 // If there was an explicit qualifier, that goes on as a literal text
3015 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
3017 // If there is an index specifier following the register, parse that too.
3018 if (Parser.getTok().is(AsmToken::LBrac)) {
3019 SMLoc SIdx = getLoc();
3020 Parser.Lex(); // Eat left bracket token.
3022 const MCExpr *ImmVal;
3023 if (getParser().parseExpression(ImmVal))
3025 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3027 TokError("immediate value expected for vector index");
3032 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3033 Error(E, "']' expected");
3037 Parser.Lex(); // Eat right bracket token.
3039 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3046 /// parseRegister - Parse a non-vector register operand.
3047 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
3049 // Try for a vector register.
3050 if (!tryParseVectorRegister(Operands))
3053 // Try for a scalar register.
3054 int64_t Reg = tryParseRegister();
3058 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
3060 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
3061 // as a string token in the instruction itself.
3062 if (getLexer().getKind() == AsmToken::LBrac) {
3063 SMLoc LBracS = getLoc();
3065 const AsmToken &Tok = Parser.getTok();
3066 if (Tok.is(AsmToken::Integer)) {
3067 SMLoc IntS = getLoc();
3068 int64_t Val = Tok.getIntVal();
3071 if (getLexer().getKind() == AsmToken::RBrac) {
3072 SMLoc RBracS = getLoc();
3075 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
3077 ARM64Operand::CreateToken("1", false, IntS, getContext()));
3079 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
3089 /// tryParseNoIndexMemory - Custom parser method for memory operands that
3090 /// do not allow base regisrer writeback modes,
3091 /// or those that handle writeback separately from
3092 /// the memory operand (like the AdvSIMD ldX/stX
3094 ARM64AsmParser::OperandMatchResultTy
3095 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
3096 if (Parser.getTok().isNot(AsmToken::LBrac))
3097 return MatchOperand_NoMatch;
3099 Parser.Lex(); // Eat left bracket token.
3101 const AsmToken &BaseRegTok = Parser.getTok();
3102 if (BaseRegTok.isNot(AsmToken::Identifier)) {
3103 Error(BaseRegTok.getLoc(), "register expected");
3104 return MatchOperand_ParseFail;
3107 int64_t Reg = tryParseRegister();
3109 Error(BaseRegTok.getLoc(), "register expected");
3110 return MatchOperand_ParseFail;
3114 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3115 Error(E, "']' expected");
3116 return MatchOperand_ParseFail;
3119 Parser.Lex(); // Eat right bracket token.
3121 Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
3122 return MatchOperand_Success;
3125 /// parseMemory - Parse a memory operand for a basic load/store instruction.
3126 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
3127 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
3129 Parser.Lex(); // Eat left bracket token.
3131 const AsmToken &BaseRegTok = Parser.getTok();
3132 if (BaseRegTok.isNot(AsmToken::Identifier))
3133 return Error(BaseRegTok.getLoc(), "register expected");
3135 int64_t Reg = tryParseRegister();
3137 return Error(BaseRegTok.getLoc(), "register expected");
3139 // If there is an offset expression, parse it.
3140 const MCExpr *OffsetExpr = 0;
3142 if (Parser.getTok().is(AsmToken::Comma)) {
3143 Parser.Lex(); // Eat the comma.
3144 OffsetLoc = getLoc();
3147 const AsmToken &OffsetRegTok = Parser.getTok();
3148 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
3150 // Default shift is LSL, with an omitted shift. We use the third bit of
3151 // the extend value to indicate presence/omission of the immediate offset.
3152 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
3153 int64_t ShiftVal = 0;
3154 bool ExplicitShift = false;
3156 if (Parser.getTok().is(AsmToken::Comma)) {
3157 // Embedded extend operand.
3158 Parser.Lex(); // Eat the comma
3160 SMLoc ExtLoc = getLoc();
3161 const AsmToken &Tok = Parser.getTok();
3162 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
3163 .Case("uxtw", ARM64_AM::UXTW)
3164 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
3165 .Case("sxtw", ARM64_AM::SXTW)
3166 .Case("sxtx", ARM64_AM::SXTX)
3167 .Case("UXTW", ARM64_AM::UXTW)
3168 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
3169 .Case("SXTW", ARM64_AM::SXTW)
3170 .Case("SXTX", ARM64_AM::SXTX)
3171 .Default(ARM64_AM::InvalidExtend);
3172 if (ExtOp == ARM64_AM::InvalidExtend)
3173 return Error(ExtLoc, "expected valid extend operation");
3175 Parser.Lex(); // Eat the extend op.
3177 if (getLexer().is(AsmToken::RBrac)) {
3178 // No immediate operand.
3179 if (ExtOp == ARM64_AM::UXTX)
3180 return Error(ExtLoc, "LSL extend requires immediate operand");
3181 } else if (getLexer().is(AsmToken::Hash)) {
3182 // Immediate operand.
3183 Parser.Lex(); // Eat the '#'
3184 const MCExpr *ImmVal;
3185 SMLoc ExprLoc = getLoc();
3186 if (getParser().parseExpression(ImmVal))
3188 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3190 return TokError("immediate value expected for extend operand");
3192 ExplicitShift = true;
3193 ShiftVal = MCE->getValue();
3194 if (ShiftVal < 0 || ShiftVal > 4)
3195 return Error(ExprLoc, "immediate operand out of range");
3197 return Error(getLoc(), "expected immediate operand");
3200 if (Parser.getTok().isNot(AsmToken::RBrac))
3201 return Error(getLoc(), "']' expected");
3203 Parser.Lex(); // Eat right bracket token.
3206 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
3207 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
3210 // Immediate expressions.
3211 } else if (Parser.getTok().is(AsmToken::Hash)) {
3212 Parser.Lex(); // Eat hash token.
3214 if (parseSymbolicImmVal(OffsetExpr))
3217 // FIXME: We really should make sure that we're dealing with a LDR/STR
3218 // instruction that can legally have a symbolic expression here.
3219 // Symbol reference.
3220 if (Parser.getTok().isNot(AsmToken::Identifier) &&
3221 Parser.getTok().isNot(AsmToken::String))
3222 return Error(getLoc(), "identifier or immediate expression expected");
3223 if (getParser().parseExpression(OffsetExpr))
3225 // If this is a plain ref, Make sure a legal variant kind was specified.
3226 // Otherwise, it's a more complicated expression and we have to just
3227 // assume it's OK and let the relocation stuff puke if it's not.
3228 ARM64MCExpr::VariantKind ELFRefKind;
3229 MCSymbolRefExpr::VariantKind DarwinRefKind;
3230 const MCConstantExpr *Addend;
3231 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
3233 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
3234 "ELF symbol modifiers not supported here yet");
3236 switch (DarwinRefKind) {
3238 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
3239 case MCSymbolRefExpr::VK_GOTPAGEOFF:
3240 case MCSymbolRefExpr::VK_PAGEOFF:
3241 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
3242 // These are what we're expecting.
3250 if (Parser.getTok().isNot(AsmToken::RBrac))
3251 return Error(E, "']' expected");
3253 Parser.Lex(); // Eat right bracket token.
3255 // Create the memory operand.
3257 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
3259 // Check for a '!', indicating pre-indexed addressing with writeback.
3260 if (Parser.getTok().is(AsmToken::Exclaim)) {
3261 // There needs to have been an immediate or wback doesn't make sense.
3263 return Error(E, "missing offset for pre-indexed addressing");
3264 // Pre-indexed with writeback must have a constant expression for the
3265 // offset. FIXME: Theoretically, we'd like to allow fixups so long
3266 // as they don't require a relocation.
3267 if (!isa<MCConstantExpr>(OffsetExpr))
3268 return Error(OffsetLoc, "constant immediate expression expected");
3270 // Create the Token operand for the '!'.
3271 Operands.push_back(ARM64Operand::CreateToken(
3272 "!", false, Parser.getTok().getLoc(), getContext()));
3273 Parser.Lex(); // Eat the '!' token.
3279 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3280 bool HasELFModifier = false;
3281 ARM64MCExpr::VariantKind RefKind;
3283 if (Parser.getTok().is(AsmToken::Colon)) {
3284 Parser.Lex(); // Eat ':"
3285 HasELFModifier = true;
3287 if (Parser.getTok().isNot(AsmToken::Identifier)) {
3288 Error(Parser.getTok().getLoc(),
3289 "expect relocation specifier in operand after ':'");
3293 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3294 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
3295 .Case("lo12", ARM64MCExpr::VK_LO12)
3296 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
3297 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
3298 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
3299 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
3300 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3301 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3302 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3303 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3304 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3305 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3306 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3307 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3308 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3309 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3310 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3311 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3312 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3313 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3314 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3315 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3316 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3317 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3318 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3319 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3320 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3321 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3322 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3323 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3324 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3325 .Default(ARM64MCExpr::VK_INVALID);
3327 if (RefKind == ARM64MCExpr::VK_INVALID) {
3328 Error(Parser.getTok().getLoc(),
3329 "expect relocation specifier in operand after ':'");
3333 Parser.Lex(); // Eat identifier
3335 if (Parser.getTok().isNot(AsmToken::Colon)) {
3336 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3339 Parser.Lex(); // Eat ':'
3342 if (getParser().parseExpression(ImmVal))
3346 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3351 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3352 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3353 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3355 Parser.Lex(); // Eat left bracket token.
3357 int64_t FirstReg = tryMatchVectorRegister(Kind);
3359 return Error(getLoc(), "vector register expected");
3360 int64_t PrevReg = FirstReg;
3362 while (Parser.getTok().isNot(AsmToken::RCurly)) {
3363 if (Parser.getTok().is(AsmToken::EndOfStatement))
3364 Error(getLoc(), "'}' expected");
3366 if (Parser.getTok().isNot(AsmToken::Comma))
3367 return Error(getLoc(), "',' expected");
3368 Parser.Lex(); // Eat the comma token.
3370 SMLoc Loc = getLoc();
3372 int64_t Reg = tryMatchVectorRegister(NextKind);
3374 return Error(Loc, "vector register expected");
3375 // Any Kind suffices must match on all regs in the list.
3376 if (Kind != NextKind)
3377 return Error(Loc, "mismatched register size suffix");
3379 // Registers must be incremental (with wraparound at 31)
3380 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3381 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3382 return Error(Loc, "registers must be sequential");
3387 Parser.Lex(); // Eat the '}' token.
3389 unsigned NumElements = 0;
3390 char ElementKind = 0;
3392 parseValidVectorKind(Kind, NumElements, ElementKind);
3394 Operands.push_back(ARM64Operand::CreateVectorList(
3395 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3397 // If there is an index specifier following the list, parse that too.
3398 if (Parser.getTok().is(AsmToken::LBrac)) {
3399 SMLoc SIdx = getLoc();
3400 Parser.Lex(); // Eat left bracket token.
3402 const MCExpr *ImmVal;
3403 if (getParser().parseExpression(ImmVal))
3405 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3407 TokError("immediate value expected for vector index");
3412 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3413 Error(E, "']' expected");
3417 Parser.Lex(); // Eat right bracket token.
3419 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3425 /// parseOperand - Parse a arm instruction operand. For now this parses the
3426 /// operand regardless of the mnemonic.
3427 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3428 bool invertCondCode) {
3429 // Check if the current operand has a custom associated parser, if so, try to
3430 // custom parse the operand, or fallback to the general approach.
3431 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3432 if (ResTy == MatchOperand_Success)
3434 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3435 // there was a match, but an error occurred, in which case, just return that
3436 // the operand parsing failed.
3437 if (ResTy == MatchOperand_ParseFail)
3440 // Nothing custom, so do general case parsing.
3442 switch (getLexer().getKind()) {
3446 if (parseSymbolicImmVal(Expr))
3447 return Error(S, "invalid operand");
3449 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3450 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3453 case AsmToken::LBrac:
3454 return parseMemory(Operands);
3455 case AsmToken::LCurly:
3456 return parseVectorList(Operands);
3457 case AsmToken::Identifier: {
3458 // If we're expecting a Condition Code operand, then just parse that.
3460 return parseCondCode(Operands, invertCondCode);
3462 // If it's a register name, parse it.
3463 if (!parseRegister(Operands))
3466 // This could be an optional "shift" operand.
3467 if (!parseOptionalShift(Operands))
3470 // Or maybe it could be an optional "extend" operand.
3471 if (!parseOptionalExtend(Operands))
3474 // This was not a register so parse other operands that start with an
3475 // identifier (like labels) as expressions and create them as immediates.
3476 const MCExpr *IdVal;
3478 if (getParser().parseExpression(IdVal))
3481 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3482 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3485 case AsmToken::Hash: {
3486 // #42 -> immediate.
3490 // The only Real that should come through here is a literal #0.0 for
3491 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3492 // so convert the value.
3493 const AsmToken &Tok = Parser.getTok();
3494 if (Tok.is(AsmToken::Real)) {
3495 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3496 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3497 if (IntVal != 0 || (Mnemonic != "fcmp" && Mnemonic != "fcmpe"))
3498 return TokError("unexpected floating point literal");
3499 Parser.Lex(); // Eat the token.
3502 ARM64Operand::CreateToken("#0", false, S, getContext()));
3504 ARM64Operand::CreateToken(".0", false, S, getContext()));
3508 const MCExpr *ImmVal;
3509 if (parseSymbolicImmVal(ImmVal))
3512 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3513 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3519 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3521 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3522 StringRef Name, SMLoc NameLoc,
3523 OperandVector &Operands) {
3524 // Create the leading tokens for the mnemonic, split by '.' characters.
3525 size_t Start = 0, Next = Name.find('.');
3526 StringRef Head = Name.slice(Start, Next);
3528 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3529 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3530 return parseSysAlias(Head, NameLoc, Operands);
3533 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3536 // Handle condition codes for a branch mnemonic
3537 if (Head == "b" && Next != StringRef::npos) {
3539 Next = Name.find('.', Start + 1);
3540 Head = Name.slice(Start + 1, Next);
3542 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3543 (Head.data() - Name.data()));
3544 unsigned CC = parseCondCodeString(Head);
3546 return Error(SuffixLoc, "invalid condition code");
3547 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3549 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3552 // Add the remaining tokens in the mnemonic.
3553 while (Next != StringRef::npos) {
3555 Next = Name.find('.', Start + 1);
3556 Head = Name.slice(Start, Next);
3557 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3558 (Head.data() - Name.data()) + 1);
3560 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3563 // Conditional compare instructions have a Condition Code operand, which needs
3564 // to be parsed and an immediate operand created.
3565 bool condCodeFourthOperand =
3566 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3567 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3568 Head == "csinc" || Head == "csinv" || Head == "csneg");
3570 // These instructions are aliases to some of the conditional select
3571 // instructions. However, the condition code is inverted in the aliased
3574 // FIXME: Is this the correct way to handle these? Or should the parser
3575 // generate the aliased instructions directly?
3576 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3577 bool condCodeThirdOperand =
3578 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3580 // Read the remaining operands.
3581 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3582 // Read the first operand.
3583 if (parseOperand(Operands, false, false)) {
3584 Parser.eatToEndOfStatement();
3589 while (getLexer().is(AsmToken::Comma)) {
3590 Parser.Lex(); // Eat the comma.
3592 // Parse and remember the operand.
3593 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3594 (N == 3 && condCodeThirdOperand) ||
3595 (N == 2 && condCodeSecondOperand),
3596 condCodeSecondOperand || condCodeThirdOperand)) {
3597 Parser.eatToEndOfStatement();
3605 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3606 SMLoc Loc = Parser.getTok().getLoc();
3607 Parser.eatToEndOfStatement();
3608 return Error(Loc, "unexpected token in argument list");
3611 Parser.Lex(); // Consume the EndOfStatement
3615 /// isFPR32Register - Check if a register is in the FPR32 register class.
3616 /// (The parser does not have the target register info to check the register
3617 /// class directly.)
3618 static bool isFPR32Register(unsigned Reg) {
3619 using namespace ARM64;
3623 case S0: case S1: case S2: case S3: case S4: case S5: case S6:
3624 case S7: case S8: case S9: case S10: case S11: case S12: case S13:
3625 case S14: case S15: case S16: case S17: case S18: case S19: case S20:
3626 case S21: case S22: case S23: case S24: case S25: case S26: case S27:
3627 case S28: case S29: case S30: case S31:
3633 /// isGPR32Register - Check if a register is in the GPR32sp register class.
3634 /// (The parser does not have the target register info to check the register
3635 /// class directly.)
3636 static bool isGPR32Register(unsigned Reg) {
3637 using namespace ARM64;
3641 case W0: case W1: case W2: case W3: case W4: case W5: case W6:
3642 case W7: case W8: case W9: case W10: case W11: case W12: case W13:
3643 case W14: case W15: case W16: case W17: case W18: case W19: case W20:
3644 case W21: case W22: case W23: case W24: case W25: case W26: case W27:
3645 case W28: case W29: case W30: case WSP:
3651 static bool isGPR64Reg(unsigned Reg) {
3652 using namespace ARM64;
3654 case X0: case X1: case X2: case X3: case X4: case X5: case X6:
3655 case X7: case X8: case X9: case X10: case X11: case X12: case X13:
3656 case X14: case X15: case X16: case X17: case X18: case X19: case X20:
3657 case X21: case X22: case X23: case X24: case X25: case X26: case X27:
3658 case X28: case FP: case LR: case SP: case XZR:
3666 // FIXME: This entire function is a giant hack to provide us with decent
3667 // operand range validation/diagnostics until TableGen/MC can be extended
3668 // to support autogeneration of this kind of validation.
3669 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3670 SmallVectorImpl<SMLoc> &Loc) {
3671 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3672 // Check for indexed addressing modes w/ the base register being the
3673 // same as a destination/source register or pair load where
3674 // the Rt == Rt2. All of those are undefined behaviour.
3675 switch (Inst.getOpcode()) {
3676 case ARM64::LDPSWpre:
3677 case ARM64::LDPWpost:
3678 case ARM64::LDPWpre:
3679 case ARM64::LDPXpost:
3680 case ARM64::LDPXpre: {
3681 unsigned Rt = Inst.getOperand(0).getReg();
3682 unsigned Rt2 = Inst.getOperand(1).getReg();
3683 unsigned Rn = Inst.getOperand(2).getReg();
3684 if (RI->isSubRegisterEq(Rn, Rt))
3685 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3686 "is also a destination");
3687 if (RI->isSubRegisterEq(Rn, Rt2))
3688 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3689 "is also a destination");
3692 case ARM64::LDPDpost:
3693 case ARM64::LDPDpre:
3694 case ARM64::LDPQpost:
3695 case ARM64::LDPQpre:
3696 case ARM64::LDPSpost:
3697 case ARM64::LDPSpre:
3698 case ARM64::LDPSWpost:
3704 case ARM64::LDPXi: {
3705 unsigned Rt = Inst.getOperand(0).getReg();
3706 unsigned Rt2 = Inst.getOperand(1).getReg();
3708 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3711 case ARM64::STPDpost:
3712 case ARM64::STPDpre:
3713 case ARM64::STPQpost:
3714 case ARM64::STPQpre:
3715 case ARM64::STPSpost:
3716 case ARM64::STPSpre:
3717 case ARM64::STPWpost:
3718 case ARM64::STPWpre:
3719 case ARM64::STPXpost:
3720 case ARM64::STPXpre: {
3721 unsigned Rt = Inst.getOperand(0).getReg();
3722 unsigned Rt2 = Inst.getOperand(1).getReg();
3723 unsigned Rn = Inst.getOperand(2).getReg();
3724 if (RI->isSubRegisterEq(Rn, Rt))
3725 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3726 "is also a source");
3727 if (RI->isSubRegisterEq(Rn, Rt2))
3728 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3729 "is also a source");
3732 case ARM64::LDRBBpre:
3733 case ARM64::LDRBpre:
3734 case ARM64::LDRHHpre:
3735 case ARM64::LDRHpre:
3736 case ARM64::LDRSBWpre:
3737 case ARM64::LDRSBXpre:
3738 case ARM64::LDRSHWpre:
3739 case ARM64::LDRSHXpre:
3740 case ARM64::LDRSWpre:
3741 case ARM64::LDRWpre:
3742 case ARM64::LDRXpre:
3743 case ARM64::LDRBBpost:
3744 case ARM64::LDRBpost:
3745 case ARM64::LDRHHpost:
3746 case ARM64::LDRHpost:
3747 case ARM64::LDRSBWpost:
3748 case ARM64::LDRSBXpost:
3749 case ARM64::LDRSHWpost:
3750 case ARM64::LDRSHXpost:
3751 case ARM64::LDRSWpost:
3752 case ARM64::LDRWpost:
3753 case ARM64::LDRXpost: {
3754 unsigned Rt = Inst.getOperand(0).getReg();
3755 unsigned Rn = Inst.getOperand(1).getReg();
3756 if (RI->isSubRegisterEq(Rn, Rt))
3757 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3758 "is also a source");
3761 case ARM64::STRBBpost:
3762 case ARM64::STRBpost:
3763 case ARM64::STRHHpost:
3764 case ARM64::STRHpost:
3765 case ARM64::STRWpost:
3766 case ARM64::STRXpost:
3767 case ARM64::STRBBpre:
3768 case ARM64::STRBpre:
3769 case ARM64::STRHHpre:
3770 case ARM64::STRHpre:
3771 case ARM64::STRWpre:
3772 case ARM64::STRXpre: {
3773 unsigned Rt = Inst.getOperand(0).getReg();
3774 unsigned Rn = Inst.getOperand(1).getReg();
3775 if (RI->isSubRegisterEq(Rn, Rt))
3776 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3777 "is also a source");
3782 // Now check immediate ranges. Separate from the above as there is overlap
3783 // in the instructions being checked and this keeps the nested conditionals
3785 switch (Inst.getOpcode()) {
3787 case ARM64::ANDSWrs:
3789 case ARM64::ORRWrs: {
3790 if (!Inst.getOperand(3).isImm())
3791 return Error(Loc[3], "immediate value expected");
3792 int64_t shifter = Inst.getOperand(3).getImm();
3793 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3794 if (ST == ARM64_AM::LSL && shifter > 31)
3795 return Error(Loc[3], "shift value out of range");
3798 case ARM64::ADDSWri:
3799 case ARM64::ADDSXri:
3802 case ARM64::SUBSWri:
3803 case ARM64::SUBSXri:
3805 case ARM64::SUBXri: {
3806 if (!Inst.getOperand(3).isImm())
3807 return Error(Loc[3], "immediate value expected");
3808 int64_t shifter = Inst.getOperand(3).getImm();
3809 if (shifter != 0 && shifter != 12)
3810 return Error(Loc[3], "shift value out of range");
3811 // The imm12 operand can be an expression. Validate that it's legit.
3812 // FIXME: We really, really want to allow arbitrary expressions here
3813 // and resolve the value and validate the result at fixup time, but
3814 // that's hard as we have long since lost any source information we
3815 // need to generate good diagnostics by that point.
3816 if (Inst.getOpcode() == ARM64::ADDXri && Inst.getOperand(2).isExpr()) {
3817 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3818 ARM64MCExpr::VariantKind ELFRefKind;
3819 MCSymbolRefExpr::VariantKind DarwinRefKind;
3820 const MCConstantExpr *Addend;
3821 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3822 return Error(Loc[2], "invalid immediate expression");
3825 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3826 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
3827 ELFRefKind == ARM64MCExpr::VK_LO12 ||
3828 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3829 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3830 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3831 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3832 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3833 // Note that we don't range-check the addend. It's adjusted
3834 // modulo page size when converted, so there is no "out of range"
3835 // condition when using @pageoff. Any validity checking for the value
3836 // was done in the is*() predicate function.
3838 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3839 // @gotpageoff can only be used directly, not with an addend.
3843 // Otherwise, we're not sure, so don't allow it for now.
3844 return Error(Loc[2], "invalid immediate expression");
3847 // If it's anything but an immediate, it's not legit.
3848 if (!Inst.getOperand(2).isImm())
3849 return Error(Loc[2], "invalid immediate expression");
3850 int64_t imm = Inst.getOperand(2).getImm();
3851 if (imm > 4095 || imm < 0)
3852 return Error(Loc[2], "immediate value out of range");
3855 case ARM64::LDRBpre:
3856 case ARM64::LDRHpre:
3857 case ARM64::LDRSBWpre:
3858 case ARM64::LDRSBXpre:
3859 case ARM64::LDRSHWpre:
3860 case ARM64::LDRSHXpre:
3861 case ARM64::LDRWpre:
3862 case ARM64::LDRXpre:
3863 case ARM64::LDRSpre:
3864 case ARM64::LDRDpre:
3865 case ARM64::LDRQpre:
3866 case ARM64::STRBpre:
3867 case ARM64::STRHpre:
3868 case ARM64::STRWpre:
3869 case ARM64::STRXpre:
3870 case ARM64::STRSpre:
3871 case ARM64::STRDpre:
3872 case ARM64::STRQpre:
3873 case ARM64::LDRBpost:
3874 case ARM64::LDRHpost:
3875 case ARM64::LDRSBWpost:
3876 case ARM64::LDRSBXpost:
3877 case ARM64::LDRSHWpost:
3878 case ARM64::LDRSHXpost:
3879 case ARM64::LDRWpost:
3880 case ARM64::LDRXpost:
3881 case ARM64::LDRSpost:
3882 case ARM64::LDRDpost:
3883 case ARM64::LDRQpost:
3884 case ARM64::STRBpost:
3885 case ARM64::STRHpost:
3886 case ARM64::STRWpost:
3887 case ARM64::STRXpost:
3888 case ARM64::STRSpost:
3889 case ARM64::STRDpost:
3890 case ARM64::STRQpost:
3895 case ARM64::LDTRSHWi:
3896 case ARM64::LDTRSHXi:
3897 case ARM64::LDTRSBWi:
3898 case ARM64::LDTRSBXi:
3899 case ARM64::LDTRSWi:
3911 case ARM64::LDURSHWi:
3912 case ARM64::LDURSHXi:
3913 case ARM64::LDURSBWi:
3914 case ARM64::LDURSBXi:
3915 case ARM64::LDURSWi:
3923 case ARM64::STURBi: {
3924 // FIXME: Should accept expressions and error in fixup evaluation
3926 if (!Inst.getOperand(2).isImm())
3927 return Error(Loc[1], "immediate value expected");
3928 int64_t offset = Inst.getOperand(2).getImm();
3929 if (offset > 255 || offset < -256)
3930 return Error(Loc[1], "offset value out of range");
3935 case ARM64::LDRSWro:
3937 case ARM64::STRSro: {
3938 // FIXME: Should accept expressions and error in fixup evaluation
3940 if (!Inst.getOperand(3).isImm())
3941 return Error(Loc[1], "immediate value expected");
3942 int64_t shift = Inst.getOperand(3).getImm();
3943 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3944 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3945 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3946 return Error(Loc[1], "shift type invalid");
3955 case ARM64::STRQro: {
3956 // FIXME: Should accept expressions and error in fixup evaluation
3958 if (!Inst.getOperand(3).isImm())
3959 return Error(Loc[1], "immediate value expected");
3960 int64_t shift = Inst.getOperand(3).getImm();
3961 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3962 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3963 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3964 return Error(Loc[1], "shift type invalid");
3968 case ARM64::LDRHHro:
3969 case ARM64::LDRSHWro:
3970 case ARM64::LDRSHXro:
3972 case ARM64::STRHHro: {
3973 // FIXME: Should accept expressions and error in fixup evaluation
3975 if (!Inst.getOperand(3).isImm())
3976 return Error(Loc[1], "immediate value expected");
3977 int64_t shift = Inst.getOperand(3).getImm();
3978 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3979 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3980 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3981 return Error(Loc[1], "shift type invalid");
3985 case ARM64::LDRBBro:
3986 case ARM64::LDRSBWro:
3987 case ARM64::LDRSBXro:
3989 case ARM64::STRBBro: {
3990 // FIXME: Should accept expressions and error in fixup evaluation
3992 if (!Inst.getOperand(3).isImm())
3993 return Error(Loc[1], "immediate value expected");
3994 int64_t shift = Inst.getOperand(3).getImm();
3995 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3996 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3997 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3998 return Error(Loc[1], "shift type invalid");
4012 case ARM64::LDPWpre:
4013 case ARM64::LDPXpre:
4014 case ARM64::LDPSpre:
4015 case ARM64::LDPDpre:
4016 case ARM64::LDPQpre:
4017 case ARM64::LDPSWpre:
4018 case ARM64::STPWpre:
4019 case ARM64::STPXpre:
4020 case ARM64::STPSpre:
4021 case ARM64::STPDpre:
4022 case ARM64::STPQpre:
4023 case ARM64::LDPWpost:
4024 case ARM64::LDPXpost:
4025 case ARM64::LDPSpost:
4026 case ARM64::LDPDpost:
4027 case ARM64::LDPQpost:
4028 case ARM64::LDPSWpost:
4029 case ARM64::STPWpost:
4030 case ARM64::STPXpost:
4031 case ARM64::STPSpost:
4032 case ARM64::STPDpost:
4033 case ARM64::STPQpost:
4043 case ARM64::STNPQi: {
4044 // FIXME: Should accept expressions and error in fixup evaluation
4046 if (!Inst.getOperand(3).isImm())
4047 return Error(Loc[2], "immediate value expected");
4048 int64_t offset = Inst.getOperand(3).getImm();
4049 if (offset > 63 || offset < -64)
4050 return Error(Loc[2], "offset value out of range");
4058 static void rewriteMOV(ARM64AsmParser::OperandVector &Operands,
4059 StringRef mnemonic, uint64_t imm, unsigned shift,
4060 MCContext &Context) {
4061 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
4062 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4064 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
4066 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
4067 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
4068 Op2->getEndLoc(), Context);
4070 Operands.push_back(ARM64Operand::CreateShifter(
4071 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
4076 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
4078 case Match_MissingFeature:
4080 "instruction requires a CPU feature not currently enabled");
4081 case Match_InvalidOperand:
4082 return Error(Loc, "invalid operand for instruction");
4083 case Match_InvalidSuffix:
4084 return Error(Loc, "invalid type suffix for instruction");
4085 case Match_InvalidMemoryIndexedSImm9:
4086 return Error(Loc, "index must be an integer in range [-256,255].");
4087 case Match_InvalidMemoryIndexed32SImm7:
4088 return Error(Loc, "index must be a multiple of 4 in range [-256,252].");
4089 case Match_InvalidMemoryIndexed64SImm7:
4090 return Error(Loc, "index must be a multiple of 8 in range [-512,504].");
4091 case Match_InvalidMemoryIndexed128SImm7:
4092 return Error(Loc, "index must be a multiple of 16 in range [-1024,1008].");
4093 case Match_InvalidMemoryIndexed8:
4094 return Error(Loc, "index must be an integer in range [0,4095].");
4095 case Match_InvalidMemoryIndexed16:
4096 return Error(Loc, "index must be a multiple of 2 in range [0,8190].");
4097 case Match_InvalidMemoryIndexed32:
4098 return Error(Loc, "index must be a multiple of 4 in range [0,16380].");
4099 case Match_InvalidMemoryIndexed64:
4100 return Error(Loc, "index must be a multiple of 8 in range [0,32760].");
4101 case Match_InvalidMemoryIndexed128:
4102 return Error(Loc, "index must be a multiple of 16 in range [0,65520].");
4103 case Match_InvalidImm1_8:
4104 return Error(Loc, "immediate must be an integer in range [1,8].");
4105 case Match_InvalidImm1_16:
4106 return Error(Loc, "immediate must be an integer in range [1,16].");
4107 case Match_InvalidImm1_32:
4108 return Error(Loc, "immediate must be an integer in range [1,32].");
4109 case Match_InvalidImm1_64:
4110 return Error(Loc, "immediate must be an integer in range [1,64].");
4111 case Match_MnemonicFail:
4112 return Error(Loc, "unrecognized instruction mnemonic");
4114 assert(0 && "unexpected error code!");
4115 return Error(Loc, "invalid instruction format");
4119 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
4120 OperandVector &Operands,
4122 unsigned &ErrorInfo,
4123 bool MatchingInlineAsm) {
4124 assert(!Operands.empty() && "Unexpect empty operand list!");
4125 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
4126 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
4128 StringRef Tok = Op->getToken();
4129 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
4130 // This needs to be done before the special handling of ADD/SUB immediates.
4131 if (Tok == "cmp" || Tok == "cmn") {
4132 // Replace the opcode with either ADDS or SUBS.
4133 const char *Repl = StringSwitch<const char *>(Tok)
4134 .Case("cmp", "subs")
4135 .Case("cmn", "adds")
4137 assert(Repl && "Unknown compare instruction");
4139 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
4141 // Insert WZR or XZR as destination operand.
4142 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4144 if (RegOp->isReg() &&
4145 (isGPR32Register(RegOp->getReg()) || RegOp->getReg() == ARM64::WZR))
4146 ZeroReg = ARM64::WZR;
4148 ZeroReg = ARM64::XZR;
4150 Operands.begin() + 1,
4151 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
4152 // Update since we modified it above.
4153 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
4154 Tok = Op->getToken();
4157 unsigned NumOperands = Operands.size();
4159 if (Tok == "mov" && NumOperands == 3) {
4160 // The MOV mnemomic is aliased to movn/movz, depending on the value of
4161 // the immediate being instantiated.
4162 // FIXME: Catching this here is a total hack, and we should use tblgen
4163 // support to implement this instead as soon as it is available.
4165 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4167 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
4168 uint64_t Val = CE->getValue();
4169 uint64_t NVal = ~Val;
4171 // If this is a 32-bit register and the value has none of the upper
4172 // set, clear the complemented upper 32-bits so the logic below works
4173 // for 32-bit registers too.
4174 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4175 if (Op1->isReg() && isGPR32Register(Op1->getReg()) &&
4176 (Val & 0xFFFFFFFFULL) == Val)
4177 NVal &= 0x00000000FFFFFFFFULL;
4179 // MOVK Rd, imm << 0
4180 if ((Val & 0xFFFF) == Val)
4181 rewriteMOV(Operands, "movz", Val, 0, getContext());
4183 // MOVK Rd, imm << 16
4184 else if ((Val & 0xFFFF0000ULL) == Val)
4185 rewriteMOV(Operands, "movz", Val, 16, getContext());
4187 // MOVK Rd, imm << 32
4188 else if ((Val & 0xFFFF00000000ULL) == Val)
4189 rewriteMOV(Operands, "movz", Val, 32, getContext());
4191 // MOVK Rd, imm << 48
4192 else if ((Val & 0xFFFF000000000000ULL) == Val)
4193 rewriteMOV(Operands, "movz", Val, 48, getContext());
4195 // MOVN Rd, (~imm << 0)
4196 else if ((NVal & 0xFFFFULL) == NVal)
4197 rewriteMOV(Operands, "movn", NVal, 0, getContext());
4199 // MOVN Rd, ~(imm << 16)
4200 else if ((NVal & 0xFFFF0000ULL) == NVal)
4201 rewriteMOV(Operands, "movn", NVal, 16, getContext());
4203 // MOVN Rd, ~(imm << 32)
4204 else if ((NVal & 0xFFFF00000000ULL) == NVal)
4205 rewriteMOV(Operands, "movn", NVal, 32, getContext());
4207 // MOVN Rd, ~(imm << 48)
4208 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
4209 rewriteMOV(Operands, "movn", NVal, 48, getContext());
4212 } else if (NumOperands == 4) {
4213 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
4214 // Handle the uimm24 immediate form, where the shift is not specified.
4215 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4217 if (const MCConstantExpr *CE =
4218 dyn_cast<MCConstantExpr>(Op3->getImm())) {
4219 uint64_t Val = CE->getValue();
4220 if (Val >= (1 << 24)) {
4221 Error(IDLoc, "immediate value is too large");
4224 if (Val < (1 << 12)) {
4225 Operands.push_back(ARM64Operand::CreateShifter(
4226 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4227 } else if ((Val & 0xfff) == 0) {
4229 CE = MCConstantExpr::Create(Val >> 12, getContext());
4231 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
4232 Operands.push_back(ARM64Operand::CreateShifter(
4233 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
4235 Error(IDLoc, "immediate value is too large");
4239 Operands.push_back(ARM64Operand::CreateShifter(
4240 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4244 // FIXME: Horible hack to handle the LSL -> UBFM alias.
4245 } else if (NumOperands == 4 && Tok == "lsl") {
4246 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4247 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4248 if (Op2->isReg() && Op3->isImm()) {
4249 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4251 uint64_t Op3Val = Op3CE->getValue();
4252 uint64_t NewOp3Val = 0;
4253 uint64_t NewOp4Val = 0;
4254 if (isGPR32Register(Op2->getReg()) || Op2->getReg() == ARM64::WZR) {
4255 NewOp3Val = (32 - Op3Val) & 0x1f;
4256 NewOp4Val = 31 - Op3Val;
4258 NewOp3Val = (64 - Op3Val) & 0x3f;
4259 NewOp4Val = 63 - Op3Val;
4262 const MCExpr *NewOp3 =
4263 MCConstantExpr::Create(NewOp3Val, getContext());
4264 const MCExpr *NewOp4 =
4265 MCConstantExpr::Create(NewOp4Val, getContext());
4267 Operands[0] = ARM64Operand::CreateToken(
4268 "ubfm", false, Op->getStartLoc(), getContext());
4269 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4270 Op3->getEndLoc(), getContext());
4271 Operands.push_back(ARM64Operand::CreateImm(
4272 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
4278 // FIXME: Horrible hack to handle the optional LSL shift for vector
4280 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
4281 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4282 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4283 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4284 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4285 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
4286 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
4287 IDLoc, getContext()));
4288 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
4289 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4290 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4291 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4292 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4293 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
4294 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
4295 // Canonicalize on lower-case for ease of comparison.
4296 std::string CanonicalSuffix = Suffix.lower();
4297 if (Tok != "movi" ||
4298 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
4299 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
4300 Operands.push_back(ARM64Operand::CreateShifter(
4301 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4304 } else if (NumOperands == 5) {
4305 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4306 // UBFIZ -> UBFM aliases.
4307 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4308 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4309 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4310 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4312 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4313 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4314 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4316 if (Op3CE && Op4CE) {
4317 uint64_t Op3Val = Op3CE->getValue();
4318 uint64_t Op4Val = Op4CE->getValue();
4320 uint64_t NewOp3Val = 0;
4321 if (isGPR32Register(Op1->getReg()))
4322 NewOp3Val = (32 - Op3Val) & 0x1f;
4324 NewOp3Val = (64 - Op3Val) & 0x3f;
4326 uint64_t NewOp4Val = Op4Val - 1;
4328 const MCExpr *NewOp3 =
4329 MCConstantExpr::Create(NewOp3Val, getContext());
4330 const MCExpr *NewOp4 =
4331 MCConstantExpr::Create(NewOp4Val, getContext());
4332 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4333 Op3->getEndLoc(), getContext());
4334 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
4335 Op4->getEndLoc(), getContext());
4337 Operands[0] = ARM64Operand::CreateToken(
4338 "bfm", false, Op->getStartLoc(), getContext());
4339 else if (Tok == "sbfiz")
4340 Operands[0] = ARM64Operand::CreateToken(
4341 "sbfm", false, Op->getStartLoc(), getContext());
4342 else if (Tok == "ubfiz")
4343 Operands[0] = ARM64Operand::CreateToken(
4344 "ubfm", false, Op->getStartLoc(), getContext());
4346 llvm_unreachable("No valid mnemonic for alias?");
4354 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4355 // UBFX -> UBFM aliases.
4356 } else if (NumOperands == 5 &&
4357 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4358 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4359 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4360 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4362 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4363 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4364 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4366 if (Op3CE && Op4CE) {
4367 uint64_t Op3Val = Op3CE->getValue();
4368 uint64_t Op4Val = Op4CE->getValue();
4369 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4371 if (NewOp4Val >= Op3Val) {
4372 const MCExpr *NewOp4 =
4373 MCConstantExpr::Create(NewOp4Val, getContext());
4374 Operands[4] = ARM64Operand::CreateImm(
4375 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4377 Operands[0] = ARM64Operand::CreateToken(
4378 "bfm", false, Op->getStartLoc(), getContext());
4379 else if (Tok == "sbfx")
4380 Operands[0] = ARM64Operand::CreateToken(
4381 "sbfm", false, Op->getStartLoc(), getContext());
4382 else if (Tok == "ubfx")
4383 Operands[0] = ARM64Operand::CreateToken(
4384 "ubfm", false, Op->getStartLoc(), getContext());
4386 llvm_unreachable("No valid mnemonic for alias?");
4395 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4396 // InstAlias can't quite handle this since the reg classes aren't
4398 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4399 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4401 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4402 if (OpCE->getValue() < 32) {
4403 // The source register can be Wn here, but the matcher expects a
4404 // GPR64. Twiddle it here if necessary.
4405 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4407 unsigned Reg = getXRegFromWReg(Op->getReg());
4408 Operands[1] = ARM64Operand::CreateReg(
4409 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4416 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4417 // InstAlias can't quite handle this since the reg classes aren't
4419 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4420 // The source register can be Wn here, but the matcher expects a
4421 // GPR64. Twiddle it here if necessary.
4422 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4424 unsigned Reg = getXRegFromWReg(Op->getReg());
4425 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4426 Op->getEndLoc(), getContext());
4430 // FIXME: Likewise for [su]xt[bh] with a Xd dst operand
4431 else if (NumOperands == 3 &&
4432 (Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
4433 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4434 if (Op->isReg() && isGPR64Reg(Op->getReg())) {
4435 // The source register can be Wn here, but the matcher expects a
4436 // GPR64. Twiddle it here if necessary.
4437 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4439 unsigned Reg = getXRegFromWReg(Op->getReg());
4440 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4441 Op->getEndLoc(), getContext());
4447 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4448 if (NumOperands == 3 && Tok == "fmov") {
4449 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4450 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4451 if (RegOp->isReg() && ImmOp->isFPImm() &&
4452 ImmOp->getFPImm() == (unsigned)-1) {
4454 isFPR32Register(RegOp->getReg()) ? ARM64::WZR : ARM64::XZR;
4455 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4456 Op->getEndLoc(), getContext());
4461 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4462 // FMOV instructions. The index isn't an actual instruction operand
4463 // but rather syntactic sugar. It really should be part of the mnemonic,
4464 // not the operand, but whatever.
4465 if ((NumOperands == 5) && Tok == "fmov") {
4466 // If the last operand is a vectorindex of '1', then replace it with
4467 // a '[' '1' ']' token sequence, which is what the matcher
4468 // (annoyingly) expects for a literal vector index operand.
4469 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4470 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4471 SMLoc Loc = Op->getStartLoc();
4472 Operands.pop_back();
4474 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4476 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4478 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4479 } else if (Op->isReg()) {
4480 // Similarly, check the destination operand for the GPR->High-lane
4482 unsigned OpNo = NumOperands - 2;
4483 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4484 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4485 SMLoc Loc = Op->getStartLoc();
4487 ARM64Operand::CreateToken("[", false, Loc, getContext());
4489 Operands.begin() + OpNo + 1,
4490 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4492 Operands.begin() + OpNo + 2,
4493 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4499 // First try to match against the secondary set of tables containing the
4500 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4501 unsigned MatchResult =
4502 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4504 // If that fails, try against the alternate table containing long-form NEON:
4505 // "fadd v0.2s, v1.2s, v2.2s"
4506 if (MatchResult != Match_Success)
4508 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4510 switch (MatchResult) {
4511 case Match_Success: {
4512 // Perform range checking and other semantic validations
4513 SmallVector<SMLoc, 8> OperandLocs;
4514 NumOperands = Operands.size();
4515 for (unsigned i = 1; i < NumOperands; ++i)
4516 OperandLocs.push_back(Operands[i]->getStartLoc());
4517 if (validateInstruction(Inst, OperandLocs))
4521 Out.EmitInstruction(Inst, STI);
4524 case Match_MissingFeature:
4525 case Match_MnemonicFail:
4526 return showMatchError(IDLoc, MatchResult);
4527 case Match_InvalidOperand: {
4528 SMLoc ErrorLoc = IDLoc;
4529 if (ErrorInfo != ~0U) {
4530 if (ErrorInfo >= Operands.size())
4531 return Error(IDLoc, "too few operands for instruction");
4533 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4534 if (ErrorLoc == SMLoc())
4537 // If the match failed on a suffix token operand, tweak the diagnostic
4539 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4540 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4541 MatchResult = Match_InvalidSuffix;
4543 return showMatchError(ErrorLoc, MatchResult);
4545 case Match_InvalidMemoryIndexedSImm9: {
4546 // If there is not a '!' after the memory operand that failed, we really
4547 // want the diagnostic for the non-pre-indexed instruction variant instead.
4548 // Be careful to check for the post-indexed variant as well, which also
4549 // uses this match diagnostic. Also exclude the explicitly unscaled
4550 // mnemonics, as they want the unscaled diagnostic as well.
4551 if (Operands.size() == ErrorInfo + 1 &&
4552 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4553 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4554 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4555 // the register class of the previous operand. Default to 64 in case
4556 // we see something unexpected.
4557 MatchResult = Match_InvalidMemoryIndexed64;
4559 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4560 if (PrevOp->isReg() && ARM64MCRegisterClasses[ARM64::GPR32RegClassID]
4561 .contains(PrevOp->getReg()))
4562 MatchResult = Match_InvalidMemoryIndexed32;
4565 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4566 if (ErrorLoc == SMLoc())
4568 return showMatchError(ErrorLoc, MatchResult);
4570 case Match_InvalidMemoryIndexed32:
4571 case Match_InvalidMemoryIndexed64:
4572 case Match_InvalidMemoryIndexed128:
4573 // If there is a '!' after the memory operand that failed, we really
4574 // want the diagnostic for the pre-indexed instruction variant instead.
4575 if (Operands.size() > ErrorInfo + 1 &&
4576 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4577 MatchResult = Match_InvalidMemoryIndexedSImm9;
4579 case Match_InvalidMemoryIndexed8:
4580 case Match_InvalidMemoryIndexed16:
4581 case Match_InvalidMemoryIndexed32SImm7:
4582 case Match_InvalidMemoryIndexed64SImm7:
4583 case Match_InvalidMemoryIndexed128SImm7:
4584 case Match_InvalidImm1_8:
4585 case Match_InvalidImm1_16:
4586 case Match_InvalidImm1_32:
4587 case Match_InvalidImm1_64: {
4588 // Any time we get here, there's nothing fancy to do. Just get the
4589 // operand SMLoc and display the diagnostic.
4590 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4591 // If it's a memory operand, the error is with the offset immediate,
4592 // so get that location instead.
4593 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4594 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4595 if (ErrorLoc == SMLoc())
4597 return showMatchError(ErrorLoc, MatchResult);
4601 llvm_unreachable("Implement any new match types added!");
4605 /// ParseDirective parses the arm specific directives
4606 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4607 StringRef IDVal = DirectiveID.getIdentifier();
4608 SMLoc Loc = DirectiveID.getLoc();
4609 if (IDVal == ".hword")
4610 return parseDirectiveWord(2, Loc);
4611 if (IDVal == ".word")
4612 return parseDirectiveWord(4, Loc);
4613 if (IDVal == ".xword")
4614 return parseDirectiveWord(8, Loc);
4615 if (IDVal == ".tlsdesccall")
4616 return parseDirectiveTLSDescCall(Loc);
4618 return parseDirectiveLOH(IDVal, Loc);
4621 /// parseDirectiveWord
4622 /// ::= .word [ expression (, expression)* ]
4623 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4624 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4626 const MCExpr *Value;
4627 if (getParser().parseExpression(Value))
4630 getParser().getStreamer().EmitValue(Value, Size);
4632 if (getLexer().is(AsmToken::EndOfStatement))
4635 // FIXME: Improve diagnostic.
4636 if (getLexer().isNot(AsmToken::Comma))
4637 return Error(L, "unexpected token in directive");
4646 // parseDirectiveTLSDescCall:
4647 // ::= .tlsdesccall symbol
4648 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4650 if (getParser().parseIdentifier(Name))
4651 return Error(L, "expected symbol after directive");
4653 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4654 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4655 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4658 Inst.setOpcode(ARM64::TLSDESCCALL);
4659 Inst.addOperand(MCOperand::CreateExpr(Expr));
4661 getParser().getStreamer().EmitInstruction(Inst, STI);
4665 /// ::= .loh <lohName | lohId> label1, ..., labelN
4666 /// The number of arguments depends on the loh identifier.
4667 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4668 if (IDVal != MCLOHDirectiveName())
4671 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4672 if (getParser().getTok().isNot(AsmToken::Integer))
4673 return TokError("expected an identifier or a number in directive");
4674 // We successfully get a numeric value for the identifier.
4675 // Check if it is valid.
4676 int64_t Id = getParser().getTok().getIntVal();
4677 Kind = (MCLOHType)Id;
4678 // Check that Id does not overflow MCLOHType.
4679 if (!isValidMCLOHType(Kind) || Id != Kind)
4680 return TokError("invalid numeric identifier in directive");
4682 StringRef Name = getTok().getIdentifier();
4683 // We successfully parse an identifier.
4684 // Check if it is a recognized one.
4685 int Id = MCLOHNameToId(Name);
4688 return TokError("invalid identifier in directive");
4689 Kind = (MCLOHType)Id;
4691 // Consume the identifier.
4693 // Get the number of arguments of this LOH.
4694 int NbArgs = MCLOHIdToNbArgs(Kind);
4696 assert(NbArgs != -1 && "Invalid number of arguments");
4698 SmallVector<MCSymbol *, 3> Args;
4699 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4701 if (getParser().parseIdentifier(Name))
4702 return TokError("expected identifier in directive");
4703 Args.push_back(getContext().GetOrCreateSymbol(Name));
4705 if (Idx + 1 == NbArgs)
4707 if (getLexer().isNot(AsmToken::Comma))
4708 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4711 if (getLexer().isNot(AsmToken::EndOfStatement))
4712 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4714 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4719 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4720 ARM64MCExpr::VariantKind &ELFRefKind,
4721 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4722 const MCConstantExpr *&Addend) {
4723 ELFRefKind = ARM64MCExpr::VK_INVALID;
4724 DarwinRefKind = MCSymbolRefExpr::VK_None;
4726 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4727 ELFRefKind = AE->getKind();
4728 Expr = AE->getSubExpr();
4731 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4733 // It's a simple symbol reference with no addend.
4734 DarwinRefKind = SE->getKind();
4739 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4743 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4746 DarwinRefKind = SE->getKind();
4748 if (BE->getOpcode() != MCBinaryExpr::Add)
4751 // See if the addend is is a constant, otherwise there's more going
4752 // on here than we can deal with.
4753 Addend = dyn_cast<MCConstantExpr>(BE->getRHS());
4757 // It's some symbol reference + a constant addend, but really
4758 // shouldn't use both Darwin and ELF syntax.
4759 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4760 DarwinRefKind == MCSymbolRefExpr::VK_None;
4763 /// Force static initialization.
4764 extern "C" void LLVMInitializeARM64AsmParser() {
4765 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64Target);
4768 #define GET_REGISTER_MATCHER
4769 #define GET_MATCHER_IMPLEMENTATION
4770 #include "ARM64GenAsmMatcher.inc"
4772 // Define this matcher function after the auto-generated include so we
4773 // have the match class enum definitions.
4774 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4776 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4777 // If the kind is a token for a literal immediate, check if our asm
4778 // operand matches. This is for InstAliases which have a fixed-value
4779 // immediate in the syntax.
4780 int64_t ExpectedVal;
4783 return Match_InvalidOperand;
4825 return Match_InvalidOperand;
4826 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4828 return Match_InvalidOperand;
4829 if (CE->getValue() == ExpectedVal)
4830 return Match_Success;
4831 return Match_InvalidOperand;