1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
80 unsigned &ErrorInfo, bool MatchingInlineAsm);
81 /// @name Auto-generated Match Functions
84 #define GET_ASSEMBLER_HEADER
85 #include "ARM64GenAsmMatcher.inc"
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseMSRSystemRegister(OperandVector &Operands);
93 OperandMatchResultTy tryParseCPSRField(OperandVector &Operands);
94 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
95 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
98 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
99 bool tryParseVectorRegister(OperandVector &Operands);
102 enum ARM64MatchResultTy {
103 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
104 #define GET_OPERAND_DIAGNOSTIC_TYPES
105 #include "ARM64GenAsmMatcher.inc"
107 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
108 const MCInstrInfo &MII)
109 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
110 MCAsmParserExtension::Initialize(_Parser);
113 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
114 SMLoc NameLoc, OperandVector &Operands);
115 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
116 virtual bool ParseDirective(AsmToken DirectiveID);
117 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
119 static bool classifySymbolRef(const MCExpr *Expr,
120 ARM64MCExpr::VariantKind &ELFRefKind,
121 MCSymbolRefExpr::VariantKind &DarwinRefKind,
122 const MCConstantExpr *&Addend);
124 } // end anonymous namespace
128 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
130 class ARM64Operand : public MCParsedAsmOperand {
133 ImmediateOffset, // pre-indexed, no writeback
134 RegisterOffset // register offset, with optional extend
155 SMLoc StartLoc, EndLoc, OffsetLoc;
160 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
168 struct VectorListOp {
171 unsigned NumElements;
172 unsigned ElementKind;
175 struct VectorIndexOp {
184 unsigned Val; // Encoded 8-bit representation.
188 unsigned Val; // Not the enum since not all values have names.
191 struct SystemRegisterOp {
192 // 16-bit immediate, usually from the ARM64SYS::SystermRegister enum,
193 // but not limited to those values.
198 ARM64SYS::CPSRField Field;
217 // This is for all forms of ARM64 address expressions
219 unsigned BaseRegNum, OffsetRegNum;
220 ARM64_AM::ExtendType ExtType;
223 const MCExpr *OffsetImm;
230 struct VectorListOp VectorList;
231 struct VectorIndexOp VectorIndex;
233 struct FPImmOp FPImm;
234 struct BarrierOp Barrier;
235 struct SystemRegisterOp SystemRegister;
236 struct CPSRFieldOp CPSRField;
237 struct SysCRImmOp SysCRImm;
238 struct PrefetchOp Prefetch;
239 struct ShifterOp Shifter;
240 struct ExtendOp Extend;
244 // Keep the MCContext around as the MCExprs may need manipulated during
245 // the add<>Operands() calls.
248 ARM64Operand(KindTy K, MCContext &_Ctx)
249 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
252 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
254 StartLoc = o.StartLoc;
269 case k_SystemRegister:
270 SystemRegister = o.SystemRegister;
273 CPSRField = o.CPSRField;
279 VectorList = o.VectorList;
282 VectorIndex = o.VectorIndex;
285 SysCRImm = o.SysCRImm;
288 Prefetch = o.Prefetch;
302 /// getStartLoc - Get the location of the first token of this operand.
303 SMLoc getStartLoc() const { return StartLoc; }
304 /// getEndLoc - Get the location of the last token of this operand.
305 SMLoc getEndLoc() const { return EndLoc; }
306 /// getOffsetLoc - Get the location of the offset of this memory operand.
307 SMLoc getOffsetLoc() const { return OffsetLoc; }
309 StringRef getToken() const {
310 assert(Kind == k_Token && "Invalid access!");
311 return StringRef(Tok.Data, Tok.Length);
314 bool isTokenSuffix() const {
315 assert(Kind == k_Token && "Invalid access!");
319 const MCExpr *getImm() const {
320 assert(Kind == k_Immediate && "Invalid access!");
324 unsigned getFPImm() const {
325 assert(Kind == k_FPImm && "Invalid access!");
329 unsigned getBarrier() const {
330 assert(Kind == k_Barrier && "Invalid access!");
334 uint16_t getSystemRegister() const {
335 assert(Kind == k_SystemRegister && "Invalid access!");
336 return SystemRegister.Val;
339 ARM64SYS::CPSRField getCPSRField() const {
340 assert(Kind == k_CPSRField && "Invalid access!");
341 return CPSRField.Field;
344 unsigned getReg() const {
345 assert(Kind == k_Register && "Invalid access!");
349 unsigned getVectorListStart() const {
350 assert(Kind == k_VectorList && "Invalid access!");
351 return VectorList.RegNum;
354 unsigned getVectorListCount() const {
355 assert(Kind == k_VectorList && "Invalid access!");
356 return VectorList.Count;
359 unsigned getVectorIndex() const {
360 assert(Kind == k_VectorIndex && "Invalid access!");
361 return VectorIndex.Val;
364 unsigned getSysCR() const {
365 assert(Kind == k_SysCR && "Invalid access!");
369 unsigned getPrefetch() const {
370 assert(Kind == k_Prefetch && "Invalid access!");
374 unsigned getShifter() const {
375 assert(Kind == k_Shifter && "Invalid access!");
379 unsigned getExtend() const {
380 assert(Kind == k_Extend && "Invalid access!");
384 bool isImm() const { return Kind == k_Immediate; }
385 bool isSImm9() const {
388 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
391 int64_t Val = MCE->getValue();
392 return (Val >= -256 && Val < 256);
394 bool isSImm7s4() const {
397 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
400 int64_t Val = MCE->getValue();
401 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
403 bool isSImm7s8() const {
406 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
409 int64_t Val = MCE->getValue();
410 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
412 bool isSImm7s16() const {
415 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
418 int64_t Val = MCE->getValue();
419 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
421 bool isImm0_7() const {
424 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
427 int64_t Val = MCE->getValue();
428 return (Val >= 0 && Val < 8);
430 bool isImm1_8() const {
433 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
436 int64_t Val = MCE->getValue();
437 return (Val > 0 && Val < 9);
439 bool isImm0_15() const {
442 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
445 int64_t Val = MCE->getValue();
446 return (Val >= 0 && Val < 16);
448 bool isImm1_16() const {
451 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
454 int64_t Val = MCE->getValue();
455 return (Val > 0 && Val < 17);
457 bool isImm0_31() const {
460 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
463 int64_t Val = MCE->getValue();
464 return (Val >= 0 && Val < 32);
466 bool isImm1_31() const {
469 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
472 int64_t Val = MCE->getValue();
473 return (Val >= 1 && Val < 32);
475 bool isImm1_32() const {
478 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
481 int64_t Val = MCE->getValue();
482 return (Val >= 1 && Val < 33);
484 bool isImm0_63() const {
487 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
490 int64_t Val = MCE->getValue();
491 return (Val >= 0 && Val < 64);
493 bool isImm1_63() const {
496 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
499 int64_t Val = MCE->getValue();
500 return (Val >= 1 && Val < 64);
502 bool isImm1_64() const {
505 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
508 int64_t Val = MCE->getValue();
509 return (Val >= 1 && Val < 65);
511 bool isImm0_127() const {
514 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
517 int64_t Val = MCE->getValue();
518 return (Val >= 0 && Val < 128);
520 bool isImm0_255() const {
523 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
526 int64_t Val = MCE->getValue();
527 return (Val >= 0 && Val < 256);
529 bool isImm0_65535() const {
532 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
535 int64_t Val = MCE->getValue();
536 return (Val >= 0 && Val < 65536);
538 bool isLogicalImm32() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
546 bool isLogicalImm64() const {
549 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
552 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
554 bool isSIMDImmType10() const {
557 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
560 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
562 bool isBranchTarget26() const {
565 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
568 int64_t Val = MCE->getValue();
571 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
573 bool isBranchTarget19() const {
576 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
579 int64_t Val = MCE->getValue();
582 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
584 bool isBranchTarget14() const {
587 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
590 int64_t Val = MCE->getValue();
593 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
596 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
600 ARM64MCExpr::VariantKind ELFRefKind;
601 MCSymbolRefExpr::VariantKind DarwinRefKind;
602 const MCConstantExpr *Addend;
603 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
607 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
610 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
611 if (ELFRefKind == AllowedModifiers[i])
618 bool isMovZSymbolG3() const {
619 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
620 return isMovWSymbol(Variants);
623 bool isMovZSymbolG2() const {
624 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
625 ARM64MCExpr::VK_TPREL_G2,
626 ARM64MCExpr::VK_DTPREL_G2 };
627 return isMovWSymbol(Variants);
630 bool isMovZSymbolG1() const {
631 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
632 ARM64MCExpr::VK_GOTTPREL_G1,
633 ARM64MCExpr::VK_TPREL_G1,
634 ARM64MCExpr::VK_DTPREL_G1, };
635 return isMovWSymbol(Variants);
638 bool isMovZSymbolG0() const {
639 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
640 ARM64MCExpr::VK_TPREL_G0,
641 ARM64MCExpr::VK_DTPREL_G0 };
642 return isMovWSymbol(Variants);
645 bool isMovKSymbolG2() const {
646 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
647 return isMovWSymbol(Variants);
650 bool isMovKSymbolG1() const {
651 static ARM64MCExpr::VariantKind Variants[] = {
652 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
653 ARM64MCExpr::VK_DTPREL_G1_NC
655 return isMovWSymbol(Variants);
658 bool isMovKSymbolG0() const {
659 static ARM64MCExpr::VariantKind Variants[] = {
660 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
661 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
663 return isMovWSymbol(Variants);
666 bool isFPImm() const { return Kind == k_FPImm; }
667 bool isBarrier() const { return Kind == k_Barrier; }
668 bool isSystemRegister() const {
669 if (Kind == k_SystemRegister)
671 // SPSel is legal for both the system register and the CPSR-field
672 // variants of MSR, so special case that. Fugly.
673 return (Kind == k_CPSRField && getCPSRField() == ARM64SYS::cpsr_SPSel);
675 bool isSystemCPSRField() const { return Kind == k_CPSRField; }
676 bool isReg() const { return Kind == k_Register && !Reg.isVector; }
677 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
679 /// Is this a vector list with the type implicit (presumably attached to the
680 /// instruction itself)?
681 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
682 return Kind == k_VectorList && VectorList.Count == NumRegs &&
683 !VectorList.ElementKind;
686 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
687 bool isTypedVectorList() const {
688 if (Kind != k_VectorList)
690 if (VectorList.Count != NumRegs)
692 if (VectorList.ElementKind != ElementKind)
694 return VectorList.NumElements == NumElements;
697 bool isVectorIndexB() const {
698 return Kind == k_VectorIndex && VectorIndex.Val < 16;
700 bool isVectorIndexH() const {
701 return Kind == k_VectorIndex && VectorIndex.Val < 8;
703 bool isVectorIndexS() const {
704 return Kind == k_VectorIndex && VectorIndex.Val < 4;
706 bool isVectorIndexD() const {
707 return Kind == k_VectorIndex && VectorIndex.Val < 2;
709 bool isToken() const { return Kind == k_Token; }
710 bool isTokenEqual(StringRef Str) const {
711 return Kind == k_Token && getToken() == Str;
713 bool isMem() const { return Kind == k_Memory; }
714 bool isSysCR() const { return Kind == k_SysCR; }
715 bool isPrefetch() const { return Kind == k_Prefetch; }
716 bool isShifter() const { return Kind == k_Shifter; }
717 bool isExtend() const {
718 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
720 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
721 return ST == ARM64_AM::LSL;
723 return Kind == k_Extend;
725 bool isExtend64() const {
726 if (Kind != k_Extend)
728 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
729 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
730 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
732 bool isExtendLSL64() const {
733 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
735 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
736 return ST == ARM64_AM::LSL;
738 if (Kind != k_Extend)
740 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
741 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
744 bool isArithmeticShifter() const {
748 // An arithmetic shifter is LSL, LSR, or ASR.
749 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
750 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
753 bool isMovImm32Shifter() const {
757 // A MOVi shifter is LSL of 0, 16, 32, or 48.
758 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
759 if (ST != ARM64_AM::LSL)
761 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
762 return (Val == 0 || Val == 16);
765 bool isMovImm64Shifter() const {
769 // A MOVi shifter is LSL of 0 or 16.
770 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
771 if (ST != ARM64_AM::LSL)
773 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
774 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
777 bool isAddSubShifter() const {
781 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
782 unsigned Val = Shifter.Val;
783 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
784 (ARM64_AM::getShiftValue(Val) == 0 ||
785 ARM64_AM::getShiftValue(Val) == 12);
788 bool isLogicalVecShifter() const {
792 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
793 unsigned Val = Shifter.Val;
794 unsigned Shift = ARM64_AM::getShiftValue(Val);
795 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
796 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
799 bool isLogicalVecHalfWordShifter() const {
800 if (!isLogicalVecShifter())
803 // A logical vector shifter is a left shift by 0 or 8.
804 unsigned Val = Shifter.Val;
805 unsigned Shift = ARM64_AM::getShiftValue(Val);
806 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
807 (Shift == 0 || Shift == 8);
810 bool isMoveVecShifter() const {
814 // A logical vector shifter is a left shift by 8 or 16.
815 unsigned Val = Shifter.Val;
816 unsigned Shift = ARM64_AM::getShiftValue(Val);
817 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
818 (Shift == 8 || Shift == 16);
821 bool isMemoryRegisterOffset8() const {
822 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
825 bool isMemoryRegisterOffset16() const {
826 return isMem() && Mem.Mode == RegisterOffset &&
827 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
830 bool isMemoryRegisterOffset32() const {
831 return isMem() && Mem.Mode == RegisterOffset &&
832 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
835 bool isMemoryRegisterOffset64() const {
836 return isMem() && Mem.Mode == RegisterOffset &&
837 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
840 bool isMemoryRegisterOffset128() const {
841 return isMem() && Mem.Mode == RegisterOffset &&
842 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
845 bool isMemoryUnscaled() const {
848 if (Mem.Mode != ImmediateOffset)
852 // Make sure the immediate value is valid.
853 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
856 // The offset must fit in a signed 9-bit unscaled immediate.
857 int64_t Value = CE->getValue();
858 return (Value >= -256 && Value < 256);
860 // Fallback unscaled operands are for aliases of LDR/STR that fall back
861 // to LDUR/STUR when the offset is not legal for the former but is for
862 // the latter. As such, in addition to checking for being a legal unscaled
863 // address, also check that it is not a legal scaled address. This avoids
864 // ambiguity in the matcher.
865 bool isMemoryUnscaledFB8() const {
866 return isMemoryUnscaled() && !isMemoryIndexed8();
868 bool isMemoryUnscaledFB16() const {
869 return isMemoryUnscaled() && !isMemoryIndexed16();
871 bool isMemoryUnscaledFB32() const {
872 return isMemoryUnscaled() && !isMemoryIndexed32();
874 bool isMemoryUnscaledFB64() const {
875 return isMemoryUnscaled() && !isMemoryIndexed64();
877 bool isMemoryUnscaledFB128() const {
878 return isMemoryUnscaled() && !isMemoryIndexed128();
880 bool isMemoryIndexed(unsigned Scale) const {
883 if (Mem.Mode != ImmediateOffset)
887 // Make sure the immediate value is valid.
888 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
891 // The offset must be a positive multiple of the scale and in range of
892 // encoding with a 12-bit immediate.
893 int64_t Value = CE->getValue();
894 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
897 // If it's not a constant, check for some expressions we know.
898 const MCExpr *Expr = Mem.OffsetImm;
899 ARM64MCExpr::VariantKind ELFRefKind;
900 MCSymbolRefExpr::VariantKind DarwinRefKind;
901 const MCConstantExpr *Addend;
902 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
904 // If we don't understand the expression, assume the best and
905 // let the fixup and relocation code deal with it.
909 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
910 ELFRefKind == ARM64MCExpr::VK_LO12 ||
911 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
912 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
913 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
914 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
915 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
916 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
917 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
918 // Note that we don't range-check the addend. It's adjusted modulo page
919 // size when converted, so there is no "out of range" condition when using
921 int64_t Value = Addend ? Addend->getValue() : 0;
922 return Value >= 0 && (Value % Scale) == 0;
923 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
924 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
925 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
931 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
932 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
933 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
934 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
935 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
936 bool isMemoryNoIndex() const {
939 if (Mem.Mode != ImmediateOffset)
944 // Make sure the immediate value is valid. Only zero is allowed.
945 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
946 if (!CE || CE->getValue() != 0)
950 bool isMemorySIMDNoIndex() const {
953 if (Mem.Mode != ImmediateOffset)
955 return Mem.OffsetImm == 0;
957 bool isMemoryIndexedSImm9() const {
958 if (!isMem() || Mem.Mode != ImmediateOffset)
962 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
963 assert(CE && "Non-constant pre-indexed offset!");
964 int64_t Value = CE->getValue();
965 return Value >= -256 && Value <= 255;
967 bool isMemoryIndexed32SImm7() const {
968 if (!isMem() || Mem.Mode != ImmediateOffset)
972 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
973 assert(CE && "Non-constant pre-indexed offset!");
974 int64_t Value = CE->getValue();
975 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
977 bool isMemoryIndexed64SImm7() const {
978 if (!isMem() || Mem.Mode != ImmediateOffset)
982 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
983 assert(CE && "Non-constant pre-indexed offset!");
984 int64_t Value = CE->getValue();
985 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
987 bool isMemoryIndexed128SImm7() const {
988 if (!isMem() || Mem.Mode != ImmediateOffset)
992 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
993 assert(CE && "Non-constant pre-indexed offset!");
994 int64_t Value = CE->getValue();
995 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
998 bool isAdrpLabel() const {
999 // Validation was handled during parsing, so we just sanity check that
1000 // something didn't go haywire.
1004 bool isAdrLabel() const {
1005 // Validation was handled during parsing, so we just sanity check that
1006 // something didn't go haywire.
1010 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1011 // Add as immediates when possible. Null MCExpr = 0.
1013 Inst.addOperand(MCOperand::CreateImm(0));
1014 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1015 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1017 Inst.addOperand(MCOperand::CreateExpr(Expr));
1020 void addRegOperands(MCInst &Inst, unsigned N) const {
1021 assert(N == 1 && "Invalid number of operands!");
1022 Inst.addOperand(MCOperand::CreateReg(getReg()));
1025 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1026 assert(N == 1 && "Invalid number of operands!");
1027 Inst.addOperand(MCOperand::CreateReg(getReg()));
1030 template <unsigned NumRegs>
1031 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1032 assert(N == 1 && "Invalid number of operands!");
1033 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1034 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1035 unsigned FirstReg = FirstRegs[NumRegs - 1];
1038 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1041 template <unsigned NumRegs>
1042 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1043 assert(N == 1 && "Invalid number of operands!");
1044 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1045 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1046 unsigned FirstReg = FirstRegs[NumRegs - 1];
1049 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1052 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1053 assert(N == 1 && "Invalid number of operands!");
1054 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1057 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1058 assert(N == 1 && "Invalid number of operands!");
1059 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1062 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1063 assert(N == 1 && "Invalid number of operands!");
1064 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1067 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1068 assert(N == 1 && "Invalid number of operands!");
1069 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1072 void addImmOperands(MCInst &Inst, unsigned N) const {
1073 assert(N == 1 && "Invalid number of operands!");
1074 // If this is a pageoff symrefexpr with an addend, adjust the addend
1075 // to be only the page-offset portion. Otherwise, just add the expr
1077 addExpr(Inst, getImm());
1080 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1081 addImmOperands(Inst, N);
1084 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1085 addImmOperands(Inst, N);
1088 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1089 assert(N == 1 && "Invalid number of operands!");
1090 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1091 assert(MCE && "Invalid constant immediate operand!");
1092 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1095 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1096 assert(N == 1 && "Invalid number of operands!");
1097 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1098 assert(MCE && "Invalid constant immediate operand!");
1099 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1102 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1103 assert(N == 1 && "Invalid number of operands!");
1104 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1105 assert(MCE && "Invalid constant immediate operand!");
1106 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1109 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1110 assert(N == 1 && "Invalid number of operands!");
1111 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1112 assert(MCE && "Invalid constant immediate operand!");
1113 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1116 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1117 assert(N == 1 && "Invalid number of operands!");
1118 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1119 assert(MCE && "Invalid constant immediate operand!");
1120 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1123 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1124 assert(N == 1 && "Invalid number of operands!");
1125 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1126 assert(MCE && "Invalid constant immediate operand!");
1127 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1130 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1131 assert(N == 1 && "Invalid number of operands!");
1132 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1133 assert(MCE && "Invalid constant immediate operand!");
1134 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1137 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1138 assert(N == 1 && "Invalid number of operands!");
1139 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1140 assert(MCE && "Invalid constant immediate operand!");
1141 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1144 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1147 assert(MCE && "Invalid constant immediate operand!");
1148 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1151 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1152 assert(N == 1 && "Invalid number of operands!");
1153 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1154 assert(MCE && "Invalid constant immediate operand!");
1155 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1158 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1159 assert(N == 1 && "Invalid number of operands!");
1160 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1161 assert(MCE && "Invalid constant immediate operand!");
1162 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1165 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1166 assert(N == 1 && "Invalid number of operands!");
1167 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1168 assert(MCE && "Invalid constant immediate operand!");
1169 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1172 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1173 assert(N == 1 && "Invalid number of operands!");
1174 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1175 assert(MCE && "Invalid constant immediate operand!");
1176 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1179 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1180 assert(N == 1 && "Invalid number of operands!");
1181 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1182 assert(MCE && "Invalid constant immediate operand!");
1183 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1186 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1187 assert(N == 1 && "Invalid number of operands!");
1188 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1189 assert(MCE && "Invalid constant immediate operand!");
1190 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1193 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1194 assert(N == 1 && "Invalid number of operands!");
1195 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1196 assert(MCE && "Invalid constant immediate operand!");
1197 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1200 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1201 assert(N == 1 && "Invalid number of operands!");
1202 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1203 assert(MCE && "Invalid constant immediate operand!");
1204 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1207 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1208 assert(N == 1 && "Invalid number of operands!");
1209 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1210 assert(MCE && "Invalid logical immediate operand!");
1211 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1212 Inst.addOperand(MCOperand::CreateImm(encoding));
1215 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1216 assert(N == 1 && "Invalid number of operands!");
1217 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1218 assert(MCE && "Invalid logical immediate operand!");
1219 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1220 Inst.addOperand(MCOperand::CreateImm(encoding));
1223 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1224 assert(N == 1 && "Invalid number of operands!");
1225 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1226 assert(MCE && "Invalid immediate operand!");
1227 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1228 Inst.addOperand(MCOperand::CreateImm(encoding));
1231 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1232 // Branch operands don't encode the low bits, so shift them off
1233 // here. If it's a label, however, just put it on directly as there's
1234 // not enough information now to do anything.
1235 assert(N == 1 && "Invalid number of operands!");
1236 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1238 addExpr(Inst, getImm());
1241 assert(MCE && "Invalid constant immediate operand!");
1242 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1245 void addBranchTarget19Operands(MCInst &Inst, unsigned N) const {
1246 // Branch operands don't encode the low bits, so shift them off
1247 // here. If it's a label, however, just put it on directly as there's
1248 // not enough information now to do anything.
1249 assert(N == 1 && "Invalid number of operands!");
1250 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1252 addExpr(Inst, getImm());
1255 assert(MCE && "Invalid constant immediate operand!");
1256 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1259 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1260 // Branch operands don't encode the low bits, so shift them off
1261 // here. If it's a label, however, just put it on directly as there's
1262 // not enough information now to do anything.
1263 assert(N == 1 && "Invalid number of operands!");
1264 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1266 addExpr(Inst, getImm());
1269 assert(MCE && "Invalid constant immediate operand!");
1270 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1273 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1278 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1279 assert(N == 1 && "Invalid number of operands!");
1280 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1283 void addSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 if (Kind == k_SystemRegister)
1286 Inst.addOperand(MCOperand::CreateImm(getSystemRegister()));
1288 assert(Kind == k_CPSRField && getCPSRField() == ARM64SYS::cpsr_SPSel);
1289 Inst.addOperand(MCOperand::CreateImm(ARM64SYS::SPSel));
1293 void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
1294 assert(N == 1 && "Invalid number of operands!");
1295 Inst.addOperand(MCOperand::CreateImm(getCPSRField()));
1298 void addSysCROperands(MCInst &Inst, unsigned N) const {
1299 assert(N == 1 && "Invalid number of operands!");
1300 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1303 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1304 assert(N == 1 && "Invalid number of operands!");
1305 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1308 void addShifterOperands(MCInst &Inst, unsigned N) const {
1309 assert(N == 1 && "Invalid number of operands!");
1310 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1313 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1314 assert(N == 1 && "Invalid number of operands!");
1315 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1318 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1319 assert(N == 1 && "Invalid number of operands!");
1320 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1323 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1324 assert(N == 1 && "Invalid number of operands!");
1325 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1328 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1329 assert(N == 1 && "Invalid number of operands!");
1330 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1333 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1334 assert(N == 1 && "Invalid number of operands!");
1335 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1338 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1340 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1343 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1344 assert(N == 1 && "Invalid number of operands!");
1345 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1348 void addExtendOperands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
1352 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1353 unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
1354 ARM64_AM::getShiftValue(getShifter()));
1355 Inst.addOperand(MCOperand::CreateImm(imm));
1357 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1360 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1361 assert(N == 1 && "Invalid number of operands!");
1362 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1365 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1366 assert(N == 1 && "Invalid number of operands!");
1367 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1369 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1370 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1371 ARM64_AM::getShiftValue(getShifter()));
1372 Inst.addOperand(MCOperand::CreateImm(imm));
1374 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1377 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1378 assert(N == 3 && "Invalid number of operands!");
1380 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1381 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum));
1382 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1383 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1386 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1387 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1390 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1391 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1394 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1395 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1398 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1399 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1402 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1403 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1406 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1407 unsigned Scale) const {
1408 // Add the base register operand.
1409 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1411 if (!Mem.OffsetImm) {
1412 // There isn't an offset.
1413 Inst.addOperand(MCOperand::CreateImm(0));
1417 // Add the offset operand.
1418 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1419 assert(CE->getValue() % Scale == 0 &&
1420 "Offset operand must be multiple of the scale!");
1422 // The MCInst offset operand doesn't include the low bits (like the
1423 // instruction encoding).
1424 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1427 // If this is a pageoff symrefexpr with an addend, the linker will
1428 // do the scaling of the addend.
1430 // Otherwise we don't know what this is, so just add the scaling divide to
1431 // the expression and let the MC fixup evaluation code deal with it.
1432 const MCExpr *Expr = Mem.OffsetImm;
1433 ARM64MCExpr::VariantKind ELFRefKind;
1434 MCSymbolRefExpr::VariantKind DarwinRefKind;
1435 const MCConstantExpr *Addend;
1437 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1439 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1440 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1444 Inst.addOperand(MCOperand::CreateExpr(Expr));
1447 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1448 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1449 // Add the base register operand.
1450 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1452 // Add the offset operand.
1454 Inst.addOperand(MCOperand::CreateImm(0));
1456 // Only constant offsets supported.
1457 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1458 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1462 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1463 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1464 addMemoryIndexedOperands(Inst, N, 16);
1467 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1468 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1469 addMemoryIndexedOperands(Inst, N, 8);
1472 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1473 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1474 addMemoryIndexedOperands(Inst, N, 4);
1477 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1478 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1479 addMemoryIndexedOperands(Inst, N, 2);
1482 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1483 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1484 addMemoryIndexedOperands(Inst, N, 1);
1487 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1488 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1489 // Add the base register operand (the offset is always zero, so ignore it).
1490 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1493 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1494 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1495 // Add the base register operand (the offset is always zero, so ignore it).
1496 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1499 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1500 unsigned Scale) const {
1501 assert(N == 2 && "Invalid number of operands!");
1503 // Add the base register operand.
1504 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1506 // Add the offset operand.
1508 if (Mem.OffsetImm) {
1509 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1510 assert(CE && "Non-constant indexed offset operand!");
1511 Offset = CE->getValue();
1515 assert(Offset % Scale == 0 &&
1516 "Offset operand must be a multiple of the scale!");
1520 Inst.addOperand(MCOperand::CreateImm(Offset));
1523 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1524 addMemoryWritebackIndexedOperands(Inst, N, 1);
1527 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1528 addMemoryWritebackIndexedOperands(Inst, N, 4);
1531 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1532 addMemoryWritebackIndexedOperands(Inst, N, 8);
1535 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1536 addMemoryWritebackIndexedOperands(Inst, N, 16);
1539 virtual void print(raw_ostream &OS) const;
1541 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1543 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1544 Op->Tok.Data = Str.data();
1545 Op->Tok.Length = Str.size();
1546 Op->Tok.IsSuffix = IsSuffix;
1552 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1553 SMLoc E, MCContext &Ctx) {
1554 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1555 Op->Reg.RegNum = RegNum;
1556 Op->Reg.isVector = isVector;
1562 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1563 unsigned NumElements, char ElementKind,
1564 SMLoc S, SMLoc E, MCContext &Ctx) {
1565 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1566 Op->VectorList.RegNum = RegNum;
1567 Op->VectorList.Count = Count;
1568 Op->VectorList.NumElements = NumElements;
1569 Op->VectorList.ElementKind = ElementKind;
1575 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1577 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1578 Op->VectorIndex.Val = Idx;
1584 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1586 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1593 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1594 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1595 Op->FPImm.Val = Val;
1601 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1602 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1603 Op->Barrier.Val = Val;
1609 static ARM64Operand *CreateSystemRegister(uint16_t Val, SMLoc S,
1611 ARM64Operand *Op = new ARM64Operand(k_SystemRegister, Ctx);
1612 Op->SystemRegister.Val = Val;
1618 static ARM64Operand *CreateCPSRField(ARM64SYS::CPSRField Field, SMLoc S,
1620 ARM64Operand *Op = new ARM64Operand(k_CPSRField, Ctx);
1621 Op->CPSRField.Field = Field;
1627 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1628 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1630 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1631 Op->Mem.BaseRegNum = BaseRegNum;
1632 Op->Mem.OffsetRegNum = 0;
1633 Op->Mem.OffsetImm = Off;
1634 Op->Mem.ExtType = ARM64_AM::UXTX;
1635 Op->Mem.ShiftVal = 0;
1636 Op->Mem.ExplicitShift = false;
1637 Op->Mem.Mode = ImmediateOffset;
1638 Op->OffsetLoc = OffsetLoc;
1644 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1645 ARM64_AM::ExtendType ExtType,
1646 unsigned ShiftVal, bool ExplicitShift,
1647 SMLoc S, SMLoc E, MCContext &Ctx) {
1648 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1649 Op->Mem.BaseRegNum = BaseReg;
1650 Op->Mem.OffsetRegNum = OffsetReg;
1651 Op->Mem.OffsetImm = 0;
1652 Op->Mem.ExtType = ExtType;
1653 Op->Mem.ShiftVal = ShiftVal;
1654 Op->Mem.ExplicitShift = ExplicitShift;
1655 Op->Mem.Mode = RegisterOffset;
1661 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1663 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1664 Op->SysCRImm.Val = Val;
1670 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1671 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1672 Op->Prefetch.Val = Val;
1678 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1679 SMLoc S, SMLoc E, MCContext &Ctx) {
1680 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1681 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1687 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1688 SMLoc S, SMLoc E, MCContext &Ctx) {
1689 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1690 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1697 } // end anonymous namespace.
1699 void ARM64Operand::print(raw_ostream &OS) const {
1702 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1707 ARM64SYS::getBarrierOptName((ARM64SYS::BarrierOption)getBarrier());
1716 case k_SystemRegister: {
1717 const char *Name = ARM64SYS::getSystemRegisterName(
1718 (ARM64SYS::SystemRegister)getSystemRegister());
1719 OS << "<systemreg ";
1723 OS << "#" << getSystemRegister();
1728 const char *Name = ARM64SYS::getCPSRFieldName(getCPSRField());
1729 OS << "<cpsrfield " << Name << ">";
1733 getImm()->print(OS);
1739 OS << "<register " << getReg() << ">";
1741 case k_VectorList: {
1742 OS << "<vectorlist ";
1743 unsigned Reg = getVectorListStart();
1744 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1745 OS << Reg + i << " ";
1750 OS << "<vectorindex " << getVectorIndex() << ">";
1753 OS << "'" << getToken() << "'";
1756 OS << "c" << getSysCR();
1760 if (ARM64_AM::isNamedPrefetchOp(getPrefetch()))
1761 OS << ARM64_AM::getPrefetchOpName((ARM64_AM::PrefetchOp)getPrefetch());
1763 OS << "#" << getPrefetch();
1767 unsigned Val = getShifter();
1768 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1769 << ARM64_AM::getShiftValue(Val) << ">";
1773 unsigned Val = getExtend();
1774 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1775 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1781 /// @name Auto-generated Match Functions
1784 static unsigned MatchRegisterName(StringRef Name);
1788 static unsigned matchVectorRegName(StringRef Name) {
1789 return StringSwitch<unsigned>(Name)
1790 .Case("v0", ARM64::Q0)
1791 .Case("v1", ARM64::Q1)
1792 .Case("v2", ARM64::Q2)
1793 .Case("v3", ARM64::Q3)
1794 .Case("v4", ARM64::Q4)
1795 .Case("v5", ARM64::Q5)
1796 .Case("v6", ARM64::Q6)
1797 .Case("v7", ARM64::Q7)
1798 .Case("v8", ARM64::Q8)
1799 .Case("v9", ARM64::Q9)
1800 .Case("v10", ARM64::Q10)
1801 .Case("v11", ARM64::Q11)
1802 .Case("v12", ARM64::Q12)
1803 .Case("v13", ARM64::Q13)
1804 .Case("v14", ARM64::Q14)
1805 .Case("v15", ARM64::Q15)
1806 .Case("v16", ARM64::Q16)
1807 .Case("v17", ARM64::Q17)
1808 .Case("v18", ARM64::Q18)
1809 .Case("v19", ARM64::Q19)
1810 .Case("v20", ARM64::Q20)
1811 .Case("v21", ARM64::Q21)
1812 .Case("v22", ARM64::Q22)
1813 .Case("v23", ARM64::Q23)
1814 .Case("v24", ARM64::Q24)
1815 .Case("v25", ARM64::Q25)
1816 .Case("v26", ARM64::Q26)
1817 .Case("v27", ARM64::Q27)
1818 .Case("v28", ARM64::Q28)
1819 .Case("v29", ARM64::Q29)
1820 .Case("v30", ARM64::Q30)
1821 .Case("v31", ARM64::Q31)
1825 static bool isValidVectorKind(StringRef Name) {
1826 return StringSwitch<bool>(Name.lower())
1836 // Accept the width neutral ones, too, for verbose syntax. If those
1837 // aren't used in the right places, the token operand won't match so
1838 // all will work out.
1846 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1847 char &ElementKind) {
1848 assert(isValidVectorKind(Name));
1850 ElementKind = Name.lower()[Name.size() - 1];
1853 if (Name.size() == 2)
1856 // Parse the lane count
1857 Name = Name.drop_front();
1858 while (isdigit(Name.front())) {
1859 NumElements = 10 * NumElements + (Name.front() - '0');
1860 Name = Name.drop_front();
1864 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1866 StartLoc = getLoc();
1867 RegNo = tryParseRegister();
1868 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1869 return (RegNo == (unsigned)-1);
1872 /// tryParseRegister - Try to parse a register name. The token must be an
1873 /// Identifier when called, and if it is a register name the token is eaten and
1874 /// the register is added to the operand list.
1875 int ARM64AsmParser::tryParseRegister() {
1876 const AsmToken &Tok = Parser.getTok();
1877 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1879 std::string lowerCase = Tok.getString().lower();
1880 unsigned RegNum = MatchRegisterName(lowerCase);
1881 // Also handle a few aliases of registers.
1883 RegNum = StringSwitch<unsigned>(lowerCase)
1884 .Case("x29", ARM64::FP)
1885 .Case("x30", ARM64::LR)
1886 .Case("x31", ARM64::XZR)
1887 .Case("w31", ARM64::WZR)
1893 Parser.Lex(); // Eat identifier token.
1897 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1898 /// kind specifier. If it is a register specifier, eat the token and return it.
1899 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1900 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1901 TokError("vector register expected");
1905 StringRef Name = Parser.getTok().getString();
1906 // If there is a kind specifier, it's separated from the register name by
1908 size_t Start = 0, Next = Name.find('.');
1909 StringRef Head = Name.slice(Start, Next);
1910 unsigned RegNum = matchVectorRegName(Head);
1912 if (Next != StringRef::npos) {
1913 Kind = Name.slice(Next, StringRef::npos);
1914 if (!isValidVectorKind(Kind)) {
1915 TokError("invalid vector kind qualifier");
1919 Parser.Lex(); // Eat the register token.
1924 TokError("vector register expected");
1928 static int MatchSysCRName(StringRef Name) {
1929 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1931 switch (Name.size()) {
1935 if (Name[0] != 'c' && Name[0] != 'C')
1963 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
1984 llvm_unreachable("Unhandled SysCR operand string!");
1988 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1989 ARM64AsmParser::OperandMatchResultTy
1990 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1992 const AsmToken &Tok = Parser.getTok();
1993 if (Tok.isNot(AsmToken::Identifier))
1994 return MatchOperand_NoMatch;
1996 int Num = MatchSysCRName(Tok.getString());
1998 return MatchOperand_NoMatch;
2000 Parser.Lex(); // Eat identifier token.
2001 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
2002 return MatchOperand_Success;
2005 /// tryParsePrefetch - Try to parse a prefetch operand.
2006 ARM64AsmParser::OperandMatchResultTy
2007 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2009 const AsmToken &Tok = Parser.getTok();
2010 // Either an identifier for named values or a 5-bit immediate.
2011 if (Tok.is(AsmToken::Hash)) {
2012 Parser.Lex(); // Eat hash token.
2013 const MCExpr *ImmVal;
2014 if (getParser().parseExpression(ImmVal))
2015 return MatchOperand_ParseFail;
2017 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2019 TokError("immediate value expected for prefetch operand");
2020 return MatchOperand_ParseFail;
2022 unsigned prfop = MCE->getValue();
2024 TokError("prefetch operand out of range, [0,31] expected");
2025 return MatchOperand_ParseFail;
2028 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2029 return MatchOperand_Success;
2032 if (Tok.isNot(AsmToken::Identifier)) {
2033 TokError("pre-fetch hint expected");
2034 return MatchOperand_ParseFail;
2037 unsigned prfop = StringSwitch<unsigned>(Tok.getString())
2038 .Case("pldl1keep", ARM64_AM::PLDL1KEEP)
2039 .Case("pldl1strm", ARM64_AM::PLDL1STRM)
2040 .Case("pldl2keep", ARM64_AM::PLDL2KEEP)
2041 .Case("pldl2strm", ARM64_AM::PLDL2STRM)
2042 .Case("pldl3keep", ARM64_AM::PLDL3KEEP)
2043 .Case("pldl3strm", ARM64_AM::PLDL3STRM)
2044 .Case("pstl1keep", ARM64_AM::PSTL1KEEP)
2045 .Case("pstl1strm", ARM64_AM::PSTL1STRM)
2046 .Case("pstl2keep", ARM64_AM::PSTL2KEEP)
2047 .Case("pstl2strm", ARM64_AM::PSTL2STRM)
2048 .Case("pstl3keep", ARM64_AM::PSTL3KEEP)
2049 .Case("pstl3strm", ARM64_AM::PSTL3STRM)
2051 if (prfop == 0xff) {
2052 TokError("pre-fetch hint expected");
2053 return MatchOperand_ParseFail;
2056 Parser.Lex(); // Eat identifier token.
2057 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2058 return MatchOperand_Success;
2061 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2063 ARM64AsmParser::OperandMatchResultTy
2064 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2067 if (parseSymbolicImmVal(Expr))
2068 return MatchOperand_ParseFail;
2070 ARM64MCExpr::VariantKind ELFRefKind;
2071 MCSymbolRefExpr::VariantKind DarwinRefKind;
2072 const MCConstantExpr *Addend;
2073 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2074 Error(S, "modified label reference + constant expected");
2075 return MatchOperand_ParseFail;
2078 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2079 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2080 // No modifier was specified at all; this is the syntax for an ELF basic
2081 // ADRP relocation (unfortunately).
2082 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2083 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2084 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2086 Error(S, "gotpage label reference not allowed an addend");
2087 return MatchOperand_ParseFail;
2088 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2089 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2090 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2091 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2092 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2093 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2094 // The operand must be an @page or @gotpage qualified symbolref.
2095 Error(S, "page or gotpage label reference expected");
2096 return MatchOperand_ParseFail;
2099 // We have a label reference possibly with addend. The addend is a raw value
2100 // here. The linker will adjust it to only reference the page.
2101 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2102 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2104 return MatchOperand_Success;
2107 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2109 ARM64AsmParser::OperandMatchResultTy
2110 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2113 if (getParser().parseExpression(Expr))
2114 return MatchOperand_ParseFail;
2116 // The operand must be an un-qualified assembler local symbolref.
2117 // FIXME: wrong for ELF.
2118 if (const MCSymbolRefExpr *SRE = dyn_cast<const MCSymbolRefExpr>(Expr)) {
2119 // FIXME: Should reference the MachineAsmInfo to get the private prefix.
2120 bool isTemporary = SRE->getSymbol().getName().startswith("L");
2121 if (!isTemporary || SRE->getKind() != MCSymbolRefExpr::VK_None) {
2122 Error(S, "unqualified, assembler-local label name expected");
2123 return MatchOperand_ParseFail;
2127 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2128 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2130 return MatchOperand_Success;
2133 /// tryParseFPImm - A floating point immediate expression operand.
2134 ARM64AsmParser::OperandMatchResultTy
2135 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2138 if (Parser.getTok().isNot(AsmToken::Hash))
2139 return MatchOperand_NoMatch;
2140 Parser.Lex(); // Eat the '#'.
2142 // Handle negation, as that still comes through as a separate token.
2143 bool isNegative = false;
2144 if (Parser.getTok().is(AsmToken::Minus)) {
2148 const AsmToken &Tok = Parser.getTok();
2149 if (Tok.is(AsmToken::Real)) {
2150 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2151 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2152 // If we had a '-' in front, toggle the sign bit.
2153 IntVal ^= (uint64_t)isNegative << 63;
2154 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2155 Parser.Lex(); // Eat the token.
2156 // Check for out of range values. As an exception, we let Zero through,
2157 // as we handle that special case in post-processing before matching in
2158 // order to use the zero register for it.
2159 if (Val == -1 && !RealVal.isZero()) {
2160 TokError("floating point value out of range");
2161 return MatchOperand_ParseFail;
2163 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2164 return MatchOperand_Success;
2166 if (Tok.is(AsmToken::Integer)) {
2168 if (!isNegative && Tok.getString().startswith("0x")) {
2169 Val = Tok.getIntVal();
2170 if (Val > 255 || Val < 0) {
2171 TokError("encoded floating point value out of range");
2172 return MatchOperand_ParseFail;
2175 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2176 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2177 // If we had a '-' in front, toggle the sign bit.
2178 IntVal ^= (uint64_t)isNegative << 63;
2179 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2181 Parser.Lex(); // Eat the token.
2182 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2183 return MatchOperand_Success;
2186 TokError("invalid floating point immediate");
2187 return MatchOperand_ParseFail;
2190 /// parseCondCodeString - Parse a Condition Code string.
2191 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2192 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2193 .Case("eq", ARM64CC::EQ)
2194 .Case("ne", ARM64CC::NE)
2195 .Case("cs", ARM64CC::CS)
2196 .Case("hs", ARM64CC::CS)
2197 .Case("cc", ARM64CC::CC)
2198 .Case("lo", ARM64CC::CC)
2199 .Case("mi", ARM64CC::MI)
2200 .Case("pl", ARM64CC::PL)
2201 .Case("vs", ARM64CC::VS)
2202 .Case("vc", ARM64CC::VC)
2203 .Case("hi", ARM64CC::HI)
2204 .Case("ls", ARM64CC::LS)
2205 .Case("ge", ARM64CC::GE)
2206 .Case("lt", ARM64CC::LT)
2207 .Case("gt", ARM64CC::GT)
2208 .Case("le", ARM64CC::LE)
2209 .Case("al", ARM64CC::AL)
2210 .Case("nv", ARM64CC::NV)
2211 .Default(ARM64CC::Invalid);
2215 /// parseCondCode - Parse a Condition Code operand.
2216 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2217 bool invertCondCode) {
2219 const AsmToken &Tok = Parser.getTok();
2220 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2222 StringRef Cond = Tok.getString();
2223 unsigned CC = parseCondCodeString(Cond);
2224 if (CC == ARM64CC::Invalid)
2225 return TokError("invalid condition code");
2226 Parser.Lex(); // Eat identifier token.
2229 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2231 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2233 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2237 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2238 /// them if present.
2239 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2240 const AsmToken &Tok = Parser.getTok();
2241 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2242 .Case("lsl", ARM64_AM::LSL)
2243 .Case("lsr", ARM64_AM::LSR)
2244 .Case("asr", ARM64_AM::ASR)
2245 .Case("ror", ARM64_AM::ROR)
2246 .Case("msl", ARM64_AM::MSL)
2247 .Case("LSL", ARM64_AM::LSL)
2248 .Case("LSR", ARM64_AM::LSR)
2249 .Case("ASR", ARM64_AM::ASR)
2250 .Case("ROR", ARM64_AM::ROR)
2251 .Case("MSL", ARM64_AM::MSL)
2252 .Default(ARM64_AM::InvalidShift);
2253 if (ShOp == ARM64_AM::InvalidShift)
2256 SMLoc S = Tok.getLoc();
2259 // We expect a number here.
2260 if (getLexer().isNot(AsmToken::Hash))
2261 return TokError("immediate value expected for shifter operand");
2262 Parser.Lex(); // Eat the '#'.
2264 SMLoc ExprLoc = getLoc();
2265 const MCExpr *ImmVal;
2266 if (getParser().parseExpression(ImmVal))
2269 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2271 return TokError("immediate value expected for shifter operand");
2273 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2274 return Error(ExprLoc, "immediate value too large for shifter operand");
2276 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2278 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2282 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2283 /// them if present.
2284 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2285 const AsmToken &Tok = Parser.getTok();
2286 ARM64_AM::ExtendType ExtOp =
2287 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2288 .Case("uxtb", ARM64_AM::UXTB)
2289 .Case("uxth", ARM64_AM::UXTH)
2290 .Case("uxtw", ARM64_AM::UXTW)
2291 .Case("uxtx", ARM64_AM::UXTX)
2292 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2293 .Case("sxtb", ARM64_AM::SXTB)
2294 .Case("sxth", ARM64_AM::SXTH)
2295 .Case("sxtw", ARM64_AM::SXTW)
2296 .Case("sxtx", ARM64_AM::SXTX)
2297 .Case("UXTB", ARM64_AM::UXTB)
2298 .Case("UXTH", ARM64_AM::UXTH)
2299 .Case("UXTW", ARM64_AM::UXTW)
2300 .Case("UXTX", ARM64_AM::UXTX)
2301 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2302 .Case("SXTB", ARM64_AM::SXTB)
2303 .Case("SXTH", ARM64_AM::SXTH)
2304 .Case("SXTW", ARM64_AM::SXTW)
2305 .Case("SXTX", ARM64_AM::SXTX)
2306 .Default(ARM64_AM::InvalidExtend);
2307 if (ExtOp == ARM64_AM::InvalidExtend)
2310 SMLoc S = Tok.getLoc();
2313 if (getLexer().is(AsmToken::EndOfStatement) ||
2314 getLexer().is(AsmToken::Comma)) {
2315 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2317 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2321 if (getLexer().isNot(AsmToken::Hash)) {
2322 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2324 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2328 Parser.Lex(); // Eat the '#'.
2330 const MCExpr *ImmVal;
2331 if (getParser().parseExpression(ImmVal))
2334 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2336 return TokError("immediate value expected for extend operand");
2338 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2340 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2344 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2345 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2346 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2347 OperandVector &Operands) {
2348 if (Name.find('.') != StringRef::npos)
2349 return TokError("invalid operand");
2353 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2355 const AsmToken &Tok = Parser.getTok();
2356 StringRef Op = Tok.getString();
2357 SMLoc S = Tok.getLoc();
2359 const MCExpr *Expr = 0;
2361 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2363 Expr = MCConstantExpr::Create(op1, getContext()); \
2364 Operands.push_back( \
2365 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2366 Operands.push_back( \
2367 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2368 Operands.push_back( \
2369 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2370 Expr = MCConstantExpr::Create(op2, getContext()); \
2371 Operands.push_back( \
2372 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2375 if (Mnemonic == "ic") {
2376 if (!Op.compare_lower("ialluis")) {
2377 // SYS #0, C7, C1, #0
2378 SYS_ALIAS(0, 7, 1, 0);
2379 } else if (!Op.compare_lower("iallu")) {
2380 // SYS #0, C7, C5, #0
2381 SYS_ALIAS(0, 7, 5, 0);
2382 } else if (!Op.compare_lower("ivau")) {
2383 // SYS #3, C7, C5, #1
2384 SYS_ALIAS(3, 7, 5, 1);
2386 return TokError("invalid operand for IC instruction");
2388 } else if (Mnemonic == "dc") {
2389 if (!Op.compare_lower("zva")) {
2390 // SYS #3, C7, C4, #1
2391 SYS_ALIAS(3, 7, 4, 1);
2392 } else if (!Op.compare_lower("ivac")) {
2393 // SYS #3, C7, C6, #1
2394 SYS_ALIAS(0, 7, 6, 1);
2395 } else if (!Op.compare_lower("isw")) {
2396 // SYS #0, C7, C6, #2
2397 SYS_ALIAS(0, 7, 6, 2);
2398 } else if (!Op.compare_lower("cvac")) {
2399 // SYS #3, C7, C10, #1
2400 SYS_ALIAS(3, 7, 10, 1);
2401 } else if (!Op.compare_lower("csw")) {
2402 // SYS #0, C7, C10, #2
2403 SYS_ALIAS(0, 7, 10, 2);
2404 } else if (!Op.compare_lower("cvau")) {
2405 // SYS #3, C7, C11, #1
2406 SYS_ALIAS(3, 7, 11, 1);
2407 } else if (!Op.compare_lower("civac")) {
2408 // SYS #3, C7, C14, #1
2409 SYS_ALIAS(3, 7, 14, 1);
2410 } else if (!Op.compare_lower("cisw")) {
2411 // SYS #0, C7, C14, #2
2412 SYS_ALIAS(0, 7, 14, 2);
2414 return TokError("invalid operand for DC instruction");
2416 } else if (Mnemonic == "at") {
2417 if (!Op.compare_lower("s1e1r")) {
2418 // SYS #0, C7, C8, #0
2419 SYS_ALIAS(0, 7, 8, 0);
2420 } else if (!Op.compare_lower("s1e2r")) {
2421 // SYS #4, C7, C8, #0
2422 SYS_ALIAS(4, 7, 8, 0);
2423 } else if (!Op.compare_lower("s1e3r")) {
2424 // SYS #6, C7, C8, #0
2425 SYS_ALIAS(6, 7, 8, 0);
2426 } else if (!Op.compare_lower("s1e1w")) {
2427 // SYS #0, C7, C8, #1
2428 SYS_ALIAS(0, 7, 8, 1);
2429 } else if (!Op.compare_lower("s1e2w")) {
2430 // SYS #4, C7, C8, #1
2431 SYS_ALIAS(4, 7, 8, 1);
2432 } else if (!Op.compare_lower("s1e3w")) {
2433 // SYS #6, C7, C8, #1
2434 SYS_ALIAS(6, 7, 8, 1);
2435 } else if (!Op.compare_lower("s1e0r")) {
2436 // SYS #0, C7, C8, #3
2437 SYS_ALIAS(0, 7, 8, 2);
2438 } else if (!Op.compare_lower("s1e0w")) {
2439 // SYS #0, C7, C8, #3
2440 SYS_ALIAS(0, 7, 8, 3);
2441 } else if (!Op.compare_lower("s12e1r")) {
2442 // SYS #4, C7, C8, #4
2443 SYS_ALIAS(4, 7, 8, 4);
2444 } else if (!Op.compare_lower("s12e1w")) {
2445 // SYS #4, C7, C8, #5
2446 SYS_ALIAS(4, 7, 8, 5);
2447 } else if (!Op.compare_lower("s12e0r")) {
2448 // SYS #4, C7, C8, #6
2449 SYS_ALIAS(4, 7, 8, 6);
2450 } else if (!Op.compare_lower("s12e0w")) {
2451 // SYS #4, C7, C8, #7
2452 SYS_ALIAS(4, 7, 8, 7);
2454 return TokError("invalid operand for AT instruction");
2456 } else if (Mnemonic == "tlbi") {
2457 if (!Op.compare_lower("vmalle1is")) {
2458 // SYS #0, C8, C3, #0
2459 SYS_ALIAS(0, 8, 3, 0);
2460 } else if (!Op.compare_lower("alle2is")) {
2461 // SYS #4, C8, C3, #0
2462 SYS_ALIAS(4, 8, 3, 0);
2463 } else if (!Op.compare_lower("alle3is")) {
2464 // SYS #6, C8, C3, #0
2465 SYS_ALIAS(6, 8, 3, 0);
2466 } else if (!Op.compare_lower("vae1is")) {
2467 // SYS #0, C8, C3, #1
2468 SYS_ALIAS(0, 8, 3, 1);
2469 } else if (!Op.compare_lower("vae2is")) {
2470 // SYS #4, C8, C3, #1
2471 SYS_ALIAS(4, 8, 3, 1);
2472 } else if (!Op.compare_lower("vae3is")) {
2473 // SYS #6, C8, C3, #1
2474 SYS_ALIAS(6, 8, 3, 1);
2475 } else if (!Op.compare_lower("aside1is")) {
2476 // SYS #0, C8, C3, #2
2477 SYS_ALIAS(0, 8, 3, 2);
2478 } else if (!Op.compare_lower("vaae1is")) {
2479 // SYS #0, C8, C3, #3
2480 SYS_ALIAS(0, 8, 3, 3);
2481 } else if (!Op.compare_lower("alle1is")) {
2482 // SYS #4, C8, C3, #4
2483 SYS_ALIAS(4, 8, 3, 4);
2484 } else if (!Op.compare_lower("vale1is")) {
2485 // SYS #0, C8, C3, #5
2486 SYS_ALIAS(0, 8, 3, 5);
2487 } else if (!Op.compare_lower("vaale1is")) {
2488 // SYS #0, C8, C3, #7
2489 SYS_ALIAS(0, 8, 3, 7);
2490 } else if (!Op.compare_lower("vmalle1")) {
2491 // SYS #0, C8, C7, #0
2492 SYS_ALIAS(0, 8, 7, 0);
2493 } else if (!Op.compare_lower("alle2")) {
2494 // SYS #4, C8, C7, #0
2495 SYS_ALIAS(4, 8, 7, 0);
2496 } else if (!Op.compare_lower("vale2is")) {
2497 // SYS #4, C8, C3, #5
2498 SYS_ALIAS(4, 8, 3, 5);
2499 } else if (!Op.compare_lower("vale3is")) {
2500 // SYS #6, C8, C3, #5
2501 SYS_ALIAS(6, 8, 3, 5);
2502 } else if (!Op.compare_lower("alle3")) {
2503 // SYS #6, C8, C7, #0
2504 SYS_ALIAS(6, 8, 7, 0);
2505 } else if (!Op.compare_lower("vae1")) {
2506 // SYS #0, C8, C7, #1
2507 SYS_ALIAS(0, 8, 7, 1);
2508 } else if (!Op.compare_lower("vae2")) {
2509 // SYS #4, C8, C7, #1
2510 SYS_ALIAS(4, 8, 7, 1);
2511 } else if (!Op.compare_lower("vae3")) {
2512 // SYS #6, C8, C7, #1
2513 SYS_ALIAS(6, 8, 7, 1);
2514 } else if (!Op.compare_lower("aside1")) {
2515 // SYS #0, C8, C7, #2
2516 SYS_ALIAS(0, 8, 7, 2);
2517 } else if (!Op.compare_lower("vaae1")) {
2518 // SYS #0, C8, C7, #3
2519 SYS_ALIAS(0, 8, 7, 3);
2520 } else if (!Op.compare_lower("alle1")) {
2521 // SYS #4, C8, C7, #4
2522 SYS_ALIAS(4, 8, 7, 4);
2523 } else if (!Op.compare_lower("vale1")) {
2524 // SYS #0, C8, C7, #5
2525 SYS_ALIAS(0, 8, 7, 5);
2526 } else if (!Op.compare_lower("vale2")) {
2527 // SYS #4, C8, C7, #5
2528 SYS_ALIAS(4, 8, 7, 5);
2529 } else if (!Op.compare_lower("vale3")) {
2530 // SYS #6, C8, C7, #5
2531 SYS_ALIAS(6, 8, 7, 5);
2532 } else if (!Op.compare_lower("vaale1")) {
2533 // SYS #0, C8, C7, #7
2534 SYS_ALIAS(0, 8, 7, 7);
2535 } else if (!Op.compare_lower("ipas2e1")) {
2536 // SYS #4, C8, C4, #1
2537 SYS_ALIAS(4, 8, 4, 1);
2538 } else if (!Op.compare_lower("ipas2le1")) {
2539 // SYS #4, C8, C4, #5
2540 SYS_ALIAS(4, 8, 4, 5);
2541 } else if (!Op.compare_lower("vmalls12e1")) {
2542 // SYS #4, C8, C7, #6
2543 SYS_ALIAS(4, 8, 7, 6);
2544 } else if (!Op.compare_lower("vmalls12e1is")) {
2545 // SYS #4, C8, C3, #6
2546 SYS_ALIAS(4, 8, 3, 6);
2548 return TokError("invalid operand for TLBI instruction");
2554 Parser.Lex(); // Eat operand.
2556 // Check for the optional register operand.
2557 if (getLexer().is(AsmToken::Comma)) {
2558 Parser.Lex(); // Eat comma.
2560 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2561 return TokError("expected register operand");
2564 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2565 Parser.eatToEndOfStatement();
2566 return TokError("unexpected token in argument list");
2569 Parser.Lex(); // Consume the EndOfStatement
2573 ARM64AsmParser::OperandMatchResultTy
2574 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2575 const AsmToken &Tok = Parser.getTok();
2577 // Can be either a #imm style literal or an option name
2578 if (Tok.is(AsmToken::Hash)) {
2579 // Immediate operand.
2580 Parser.Lex(); // Eat the '#'
2581 const MCExpr *ImmVal;
2582 SMLoc ExprLoc = getLoc();
2583 if (getParser().parseExpression(ImmVal))
2584 return MatchOperand_ParseFail;
2585 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2587 Error(ExprLoc, "immediate value expected for barrier operand");
2588 return MatchOperand_ParseFail;
2590 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2591 Error(ExprLoc, "barrier operand out of range");
2592 return MatchOperand_ParseFail;
2595 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2596 return MatchOperand_Success;
2599 if (Tok.isNot(AsmToken::Identifier)) {
2600 TokError("invalid operand for instruction");
2601 return MatchOperand_ParseFail;
2604 unsigned Opt = StringSwitch<unsigned>(Tok.getString())
2605 .Case("oshld", ARM64SYS::OSHLD)
2606 .Case("oshst", ARM64SYS::OSHST)
2607 .Case("osh", ARM64SYS::OSH)
2608 .Case("nshld", ARM64SYS::NSHLD)
2609 .Case("nshst", ARM64SYS::NSHST)
2610 .Case("nsh", ARM64SYS::NSH)
2611 .Case("ishld", ARM64SYS::ISHLD)
2612 .Case("ishst", ARM64SYS::ISHST)
2613 .Case("ish", ARM64SYS::ISH)
2614 .Case("ld", ARM64SYS::LD)
2615 .Case("st", ARM64SYS::ST)
2616 .Case("sy", ARM64SYS::SY)
2617 .Default(ARM64SYS::InvalidBarrier);
2618 if (Opt == ARM64SYS::InvalidBarrier) {
2619 TokError("invalid barrier option name");
2620 return MatchOperand_ParseFail;
2623 // The only valid named option for ISB is 'sy'
2624 if (Mnemonic == "isb" && Opt != ARM64SYS::SY) {
2625 TokError("'sy' or #imm operand expected");
2626 return MatchOperand_ParseFail;
2629 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2630 Parser.Lex(); // Consume the option
2632 return MatchOperand_Success;
2635 ARM64AsmParser::OperandMatchResultTy
2636 ARM64AsmParser::tryParseMRSSystemRegister(OperandVector &Operands) {
2637 const AsmToken &Tok = Parser.getTok();
2639 if (Tok.isNot(AsmToken::Identifier))
2640 return MatchOperand_NoMatch;
2643 auto Mapper = ARM64SysReg::MRSMapper();
2644 uint32_t Reg = Mapper.fromString(Tok.getString(), Valid);
2648 ARM64Operand::CreateSystemRegister((uint16_t)Reg, getLoc(),
2650 Parser.Lex(); // Consume the register name.
2651 return MatchOperand_Success;
2654 return MatchOperand_NoMatch;
2657 ARM64AsmParser::OperandMatchResultTy
2658 ARM64AsmParser::tryParseMSRSystemRegister(OperandVector &Operands) {
2659 const AsmToken &Tok = Parser.getTok();
2661 if (Tok.isNot(AsmToken::Identifier))
2662 return MatchOperand_NoMatch;
2665 auto Mapper = ARM64SysReg::MSRMapper();
2666 uint32_t Reg = Mapper.fromString(Tok.getString(), Valid);
2670 ARM64Operand::CreateSystemRegister((uint16_t)Reg, getLoc(),
2672 Parser.Lex(); // Consume the register name.
2673 return MatchOperand_Success;
2676 return MatchOperand_NoMatch;
2679 ARM64AsmParser::OperandMatchResultTy
2680 ARM64AsmParser::tryParseCPSRField(OperandVector &Operands) {
2681 const AsmToken &Tok = Parser.getTok();
2683 if (Tok.isNot(AsmToken::Identifier))
2684 return MatchOperand_NoMatch;
2686 ARM64SYS::CPSRField Field =
2687 StringSwitch<ARM64SYS::CPSRField>(Tok.getString().lower())
2688 .Case("spsel", ARM64SYS::cpsr_SPSel)
2689 .Case("daifset", ARM64SYS::cpsr_DAIFSet)
2690 .Case("daifclr", ARM64SYS::cpsr_DAIFClr)
2691 .Default(ARM64SYS::InvalidCPSRField);
2692 if (Field == ARM64SYS::InvalidCPSRField)
2693 return MatchOperand_NoMatch;
2695 ARM64Operand::CreateCPSRField(Field, getLoc(), getContext()));
2696 Parser.Lex(); // Consume the register name.
2698 return MatchOperand_Success;
2701 /// tryParseVectorRegister - Parse a vector register operand.
2702 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2703 if (Parser.getTok().isNot(AsmToken::Identifier))
2707 // Check for a vector register specifier first.
2709 int64_t Reg = tryMatchVectorRegister(Kind, false);
2713 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2714 // If there was an explicit qualifier, that goes on as a literal text
2717 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2719 // If there is an index specifier following the register, parse that too.
2720 if (Parser.getTok().is(AsmToken::LBrac)) {
2721 SMLoc SIdx = getLoc();
2722 Parser.Lex(); // Eat left bracket token.
2724 const MCExpr *ImmVal;
2725 if (getParser().parseExpression(ImmVal))
2727 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2729 TokError("immediate value expected for vector index");
2734 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2735 Error(E, "']' expected");
2739 Parser.Lex(); // Eat right bracket token.
2741 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2748 /// parseRegister - Parse a non-vector register operand.
2749 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2751 // Try for a vector register.
2752 if (!tryParseVectorRegister(Operands))
2755 // Try for a scalar register.
2756 int64_t Reg = tryParseRegister();
2760 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2762 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2763 // as a string token in the instruction itself.
2764 if (getLexer().getKind() == AsmToken::LBrac) {
2765 SMLoc LBracS = getLoc();
2767 const AsmToken &Tok = Parser.getTok();
2768 if (Tok.is(AsmToken::Integer)) {
2769 SMLoc IntS = getLoc();
2770 int64_t Val = Tok.getIntVal();
2773 if (getLexer().getKind() == AsmToken::RBrac) {
2774 SMLoc RBracS = getLoc();
2777 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2779 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2781 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2791 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2792 /// do not allow base regisrer writeback modes,
2793 /// or those that handle writeback separately from
2794 /// the memory operand (like the AdvSIMD ldX/stX
2796 ARM64AsmParser::OperandMatchResultTy
2797 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2798 if (Parser.getTok().isNot(AsmToken::LBrac))
2799 return MatchOperand_NoMatch;
2801 Parser.Lex(); // Eat left bracket token.
2803 const AsmToken &BaseRegTok = Parser.getTok();
2804 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2805 Error(BaseRegTok.getLoc(), "register expected");
2806 return MatchOperand_ParseFail;
2809 int64_t Reg = tryParseRegister();
2811 Error(BaseRegTok.getLoc(), "register expected");
2812 return MatchOperand_ParseFail;
2816 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2817 Error(E, "']' expected");
2818 return MatchOperand_ParseFail;
2821 Parser.Lex(); // Eat right bracket token.
2823 Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
2824 return MatchOperand_Success;
2827 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2828 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2829 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2831 Parser.Lex(); // Eat left bracket token.
2833 const AsmToken &BaseRegTok = Parser.getTok();
2834 if (BaseRegTok.isNot(AsmToken::Identifier))
2835 return Error(BaseRegTok.getLoc(), "register expected");
2837 int64_t Reg = tryParseRegister();
2839 return Error(BaseRegTok.getLoc(), "register expected");
2841 // If there is an offset expression, parse it.
2842 const MCExpr *OffsetExpr = 0;
2844 if (Parser.getTok().is(AsmToken::Comma)) {
2845 Parser.Lex(); // Eat the comma.
2846 OffsetLoc = getLoc();
2849 const AsmToken &OffsetRegTok = Parser.getTok();
2850 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2852 // Default shift is LSL, with an omitted shift. We use the third bit of
2853 // the extend value to indicate presence/omission of the immediate offset.
2854 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
2855 int64_t ShiftVal = 0;
2856 bool ExplicitShift = false;
2858 if (Parser.getTok().is(AsmToken::Comma)) {
2859 // Embedded extend operand.
2860 Parser.Lex(); // Eat the comma
2862 SMLoc ExtLoc = getLoc();
2863 const AsmToken &Tok = Parser.getTok();
2864 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2865 .Case("uxtw", ARM64_AM::UXTW)
2866 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2867 .Case("sxtw", ARM64_AM::SXTW)
2868 .Case("sxtx", ARM64_AM::SXTX)
2869 .Case("UXTW", ARM64_AM::UXTW)
2870 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2871 .Case("SXTW", ARM64_AM::SXTW)
2872 .Case("SXTX", ARM64_AM::SXTX)
2873 .Default(ARM64_AM::InvalidExtend);
2874 if (ExtOp == ARM64_AM::InvalidExtend)
2875 return Error(ExtLoc, "expected valid extend operation");
2877 Parser.Lex(); // Eat the extend op.
2879 if (getLexer().is(AsmToken::RBrac)) {
2880 // No immediate operand.
2881 if (ExtOp == ARM64_AM::UXTX)
2882 return Error(ExtLoc, "LSL extend requires immediate operand");
2883 } else if (getLexer().is(AsmToken::Hash)) {
2884 // Immediate operand.
2885 Parser.Lex(); // Eat the '#'
2886 const MCExpr *ImmVal;
2887 SMLoc ExprLoc = getLoc();
2888 if (getParser().parseExpression(ImmVal))
2890 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2892 return TokError("immediate value expected for extend operand");
2894 ExplicitShift = true;
2895 ShiftVal = MCE->getValue();
2896 if (ShiftVal < 0 || ShiftVal > 4)
2897 return Error(ExprLoc, "immediate operand out of range");
2899 return Error(getLoc(), "expected immediate operand");
2902 if (Parser.getTok().isNot(AsmToken::RBrac))
2903 return Error(getLoc(), "']' expected");
2905 Parser.Lex(); // Eat right bracket token.
2908 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
2909 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
2912 // Immediate expressions.
2913 } else if (Parser.getTok().is(AsmToken::Hash)) {
2914 Parser.Lex(); // Eat hash token.
2916 if (parseSymbolicImmVal(OffsetExpr))
2919 // FIXME: We really should make sure that we're dealing with a LDR/STR
2920 // instruction that can legally have a symbolic expression here.
2921 // Symbol reference.
2922 if (Parser.getTok().isNot(AsmToken::Identifier) &&
2923 Parser.getTok().isNot(AsmToken::String))
2924 return Error(getLoc(), "identifier or immediate expression expected");
2925 if (getParser().parseExpression(OffsetExpr))
2927 // If this is a plain ref, Make sure a legal variant kind was specified.
2928 // Otherwise, it's a more complicated expression and we have to just
2929 // assume it's OK and let the relocation stuff puke if it's not.
2930 ARM64MCExpr::VariantKind ELFRefKind;
2931 MCSymbolRefExpr::VariantKind DarwinRefKind;
2932 const MCConstantExpr *Addend;
2933 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
2935 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
2936 "ELF symbol modifiers not supported here yet");
2938 switch (DarwinRefKind) {
2940 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
2941 case MCSymbolRefExpr::VK_GOTPAGEOFF:
2942 case MCSymbolRefExpr::VK_PAGEOFF:
2943 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
2944 // These are what we're expecting.
2952 if (Parser.getTok().isNot(AsmToken::RBrac))
2953 return Error(E, "']' expected");
2955 Parser.Lex(); // Eat right bracket token.
2957 // Create the memory operand.
2959 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
2961 // Check for a '!', indicating pre-indexed addressing with writeback.
2962 if (Parser.getTok().is(AsmToken::Exclaim)) {
2963 // There needs to have been an immediate or wback doesn't make sense.
2965 return Error(E, "missing offset for pre-indexed addressing");
2966 // Pre-indexed with writeback must have a constant expression for the
2967 // offset. FIXME: Theoretically, we'd like to allow fixups so long
2968 // as they don't require a relocation.
2969 if (!isa<MCConstantExpr>(OffsetExpr))
2970 return Error(OffsetLoc, "constant immediate expression expected");
2972 // Create the Token operand for the '!'.
2973 Operands.push_back(ARM64Operand::CreateToken(
2974 "!", false, Parser.getTok().getLoc(), getContext()));
2975 Parser.Lex(); // Eat the '!' token.
2981 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2982 bool HasELFModifier = false;
2983 ARM64MCExpr::VariantKind RefKind;
2985 if (Parser.getTok().is(AsmToken::Colon)) {
2986 Parser.Lex(); // Eat ':"
2987 HasELFModifier = true;
2989 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2990 Error(Parser.getTok().getLoc(),
2991 "expect relocation specifier in operand after ':'");
2995 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2996 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
2997 .Case("lo12", ARM64MCExpr::VK_LO12)
2998 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
2999 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
3000 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
3001 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
3002 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3003 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3004 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3005 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3006 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3007 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3008 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3009 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3010 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3011 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3012 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3013 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3014 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3015 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3016 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3017 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3018 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3019 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3020 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3021 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3022 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3023 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3024 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3025 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3026 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3027 .Default(ARM64MCExpr::VK_INVALID);
3029 if (RefKind == ARM64MCExpr::VK_INVALID) {
3030 Error(Parser.getTok().getLoc(),
3031 "expect relocation specifier in operand after ':'");
3035 Parser.Lex(); // Eat identifier
3037 if (Parser.getTok().isNot(AsmToken::Colon)) {
3038 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3041 Parser.Lex(); // Eat ':'
3044 if (getParser().parseExpression(ImmVal))
3048 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3053 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3054 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3055 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3057 Parser.Lex(); // Eat left bracket token.
3059 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3062 int64_t PrevReg = FirstReg;
3065 if (Parser.getTok().is(AsmToken::Minus)) {
3066 Parser.Lex(); // Eat the minus.
3068 SMLoc Loc = getLoc();
3070 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3073 // Any Kind suffices must match on all regs in the list.
3074 if (Kind != NextKind)
3075 return Error(Loc, "mismatched register size suffix");
3077 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3079 if (Space == 0 || Space > 3) {
3080 return Error(Loc, "invalid number of vectors");
3086 while (Parser.getTok().is(AsmToken::Comma)) {
3087 Parser.Lex(); // Eat the comma token.
3089 SMLoc Loc = getLoc();
3091 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3094 // Any Kind suffices must match on all regs in the list.
3095 if (Kind != NextKind)
3096 return Error(Loc, "mismatched register size suffix");
3098 // Registers must be incremental (with wraparound at 31)
3099 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3100 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3101 return Error(Loc, "registers must be sequential");
3108 if (Parser.getTok().is(AsmToken::EndOfStatement))
3109 Error(getLoc(), "'}' expected");
3110 Parser.Lex(); // Eat the '}' token.
3112 unsigned NumElements = 0;
3113 char ElementKind = 0;
3115 parseValidVectorKind(Kind, NumElements, ElementKind);
3117 Operands.push_back(ARM64Operand::CreateVectorList(
3118 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3120 // If there is an index specifier following the list, parse that too.
3121 if (Parser.getTok().is(AsmToken::LBrac)) {
3122 SMLoc SIdx = getLoc();
3123 Parser.Lex(); // Eat left bracket token.
3125 const MCExpr *ImmVal;
3126 if (getParser().parseExpression(ImmVal))
3128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3130 TokError("immediate value expected for vector index");
3135 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3136 Error(E, "']' expected");
3140 Parser.Lex(); // Eat right bracket token.
3142 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3148 /// parseOperand - Parse a arm instruction operand. For now this parses the
3149 /// operand regardless of the mnemonic.
3150 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3151 bool invertCondCode) {
3152 // Check if the current operand has a custom associated parser, if so, try to
3153 // custom parse the operand, or fallback to the general approach.
3154 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3155 if (ResTy == MatchOperand_Success)
3157 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3158 // there was a match, but an error occurred, in which case, just return that
3159 // the operand parsing failed.
3160 if (ResTy == MatchOperand_ParseFail)
3163 // Nothing custom, so do general case parsing.
3165 switch (getLexer().getKind()) {
3169 if (parseSymbolicImmVal(Expr))
3170 return Error(S, "invalid operand");
3172 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3173 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3176 case AsmToken::LBrac:
3177 return parseMemory(Operands);
3178 case AsmToken::LCurly:
3179 return parseVectorList(Operands);
3180 case AsmToken::Identifier: {
3181 // If we're expecting a Condition Code operand, then just parse that.
3183 return parseCondCode(Operands, invertCondCode);
3185 // If it's a register name, parse it.
3186 if (!parseRegister(Operands))
3189 // This could be an optional "shift" operand.
3190 if (!parseOptionalShift(Operands))
3193 // Or maybe it could be an optional "extend" operand.
3194 if (!parseOptionalExtend(Operands))
3197 // This was not a register so parse other operands that start with an
3198 // identifier (like labels) as expressions and create them as immediates.
3199 const MCExpr *IdVal;
3201 if (getParser().parseExpression(IdVal))
3204 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3205 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3208 case AsmToken::Hash: {
3209 // #42 -> immediate.
3213 // The only Real that should come through here is a literal #0.0 for
3214 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3215 // so convert the value.
3216 const AsmToken &Tok = Parser.getTok();
3217 if (Tok.is(AsmToken::Real)) {
3218 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3219 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3220 if (IntVal != 0 || (Mnemonic != "fcmp" && Mnemonic != "fcmpe"))
3221 return TokError("unexpected floating point literal");
3222 Parser.Lex(); // Eat the token.
3225 ARM64Operand::CreateToken("#0", false, S, getContext()));
3227 ARM64Operand::CreateToken(".0", false, S, getContext()));
3231 const MCExpr *ImmVal;
3232 if (parseSymbolicImmVal(ImmVal))
3235 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3236 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3242 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3244 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3245 StringRef Name, SMLoc NameLoc,
3246 OperandVector &Operands) {
3247 // Create the leading tokens for the mnemonic, split by '.' characters.
3248 size_t Start = 0, Next = Name.find('.');
3249 StringRef Head = Name.slice(Start, Next);
3251 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3252 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3253 return parseSysAlias(Head, NameLoc, Operands);
3256 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3259 // Handle condition codes for a branch mnemonic
3260 if (Head == "b" && Next != StringRef::npos) {
3262 Next = Name.find('.', Start + 1);
3263 Head = Name.slice(Start + 1, Next);
3265 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3266 (Head.data() - Name.data()));
3267 unsigned CC = parseCondCodeString(Head);
3268 if (CC == ARM64CC::Invalid)
3269 return Error(SuffixLoc, "invalid condition code");
3270 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3272 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3275 // Add the remaining tokens in the mnemonic.
3276 while (Next != StringRef::npos) {
3278 Next = Name.find('.', Start + 1);
3279 Head = Name.slice(Start, Next);
3280 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3281 (Head.data() - Name.data()) + 1);
3283 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3286 // Conditional compare instructions have a Condition Code operand, which needs
3287 // to be parsed and an immediate operand created.
3288 bool condCodeFourthOperand =
3289 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3290 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3291 Head == "csinc" || Head == "csinv" || Head == "csneg");
3293 // These instructions are aliases to some of the conditional select
3294 // instructions. However, the condition code is inverted in the aliased
3297 // FIXME: Is this the correct way to handle these? Or should the parser
3298 // generate the aliased instructions directly?
3299 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3300 bool condCodeThirdOperand =
3301 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3303 // Read the remaining operands.
3304 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3305 // Read the first operand.
3306 if (parseOperand(Operands, false, false)) {
3307 Parser.eatToEndOfStatement();
3312 while (getLexer().is(AsmToken::Comma)) {
3313 Parser.Lex(); // Eat the comma.
3315 // Parse and remember the operand.
3316 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3317 (N == 3 && condCodeThirdOperand) ||
3318 (N == 2 && condCodeSecondOperand),
3319 condCodeSecondOperand || condCodeThirdOperand)) {
3320 Parser.eatToEndOfStatement();
3328 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3329 SMLoc Loc = Parser.getTok().getLoc();
3330 Parser.eatToEndOfStatement();
3331 return Error(Loc, "unexpected token in argument list");
3334 Parser.Lex(); // Consume the EndOfStatement
3338 /// isFPR32Register - Check if a register is in the FPR32 register class.
3339 /// (The parser does not have the target register info to check the register
3340 /// class directly.)
3341 static bool isFPR32Register(unsigned Reg) {
3342 using namespace ARM64;
3346 case S0: case S1: case S2: case S3: case S4: case S5: case S6:
3347 case S7: case S8: case S9: case S10: case S11: case S12: case S13:
3348 case S14: case S15: case S16: case S17: case S18: case S19: case S20:
3349 case S21: case S22: case S23: case S24: case S25: case S26: case S27:
3350 case S28: case S29: case S30: case S31:
3356 /// isGPR32Register - Check if a register is in the GPR32sp register class.
3357 /// (The parser does not have the target register info to check the register
3358 /// class directly.)
3359 static bool isGPR32Register(unsigned Reg) {
3360 using namespace ARM64;
3364 case W0: case W1: case W2: case W3: case W4: case W5: case W6:
3365 case W7: case W8: case W9: case W10: case W11: case W12: case W13:
3366 case W14: case W15: case W16: case W17: case W18: case W19: case W20:
3367 case W21: case W22: case W23: case W24: case W25: case W26: case W27:
3368 case W28: case W29: case W30: case WSP:
3374 static bool isGPR64Reg(unsigned Reg) {
3375 using namespace ARM64;
3377 case X0: case X1: case X2: case X3: case X4: case X5: case X6:
3378 case X7: case X8: case X9: case X10: case X11: case X12: case X13:
3379 case X14: case X15: case X16: case X17: case X18: case X19: case X20:
3380 case X21: case X22: case X23: case X24: case X25: case X26: case X27:
3381 case X28: case FP: case LR: case SP: case XZR:
3389 // FIXME: This entire function is a giant hack to provide us with decent
3390 // operand range validation/diagnostics until TableGen/MC can be extended
3391 // to support autogeneration of this kind of validation.
3392 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3393 SmallVectorImpl<SMLoc> &Loc) {
3394 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3395 // Check for indexed addressing modes w/ the base register being the
3396 // same as a destination/source register or pair load where
3397 // the Rt == Rt2. All of those are undefined behaviour.
3398 switch (Inst.getOpcode()) {
3399 case ARM64::LDPSWpre:
3400 case ARM64::LDPWpost:
3401 case ARM64::LDPWpre:
3402 case ARM64::LDPXpost:
3403 case ARM64::LDPXpre: {
3404 unsigned Rt = Inst.getOperand(0).getReg();
3405 unsigned Rt2 = Inst.getOperand(1).getReg();
3406 unsigned Rn = Inst.getOperand(2).getReg();
3407 if (RI->isSubRegisterEq(Rn, Rt))
3408 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3409 "is also a destination");
3410 if (RI->isSubRegisterEq(Rn, Rt2))
3411 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3412 "is also a destination");
3415 case ARM64::LDPDpost:
3416 case ARM64::LDPDpre:
3417 case ARM64::LDPQpost:
3418 case ARM64::LDPQpre:
3419 case ARM64::LDPSpost:
3420 case ARM64::LDPSpre:
3421 case ARM64::LDPSWpost:
3427 case ARM64::LDPXi: {
3428 unsigned Rt = Inst.getOperand(0).getReg();
3429 unsigned Rt2 = Inst.getOperand(1).getReg();
3431 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3434 case ARM64::STPDpost:
3435 case ARM64::STPDpre:
3436 case ARM64::STPQpost:
3437 case ARM64::STPQpre:
3438 case ARM64::STPSpost:
3439 case ARM64::STPSpre:
3440 case ARM64::STPWpost:
3441 case ARM64::STPWpre:
3442 case ARM64::STPXpost:
3443 case ARM64::STPXpre: {
3444 unsigned Rt = Inst.getOperand(0).getReg();
3445 unsigned Rt2 = Inst.getOperand(1).getReg();
3446 unsigned Rn = Inst.getOperand(2).getReg();
3447 if (RI->isSubRegisterEq(Rn, Rt))
3448 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3449 "is also a source");
3450 if (RI->isSubRegisterEq(Rn, Rt2))
3451 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3452 "is also a source");
3455 case ARM64::LDRBBpre:
3456 case ARM64::LDRBpre:
3457 case ARM64::LDRHHpre:
3458 case ARM64::LDRHpre:
3459 case ARM64::LDRSBWpre:
3460 case ARM64::LDRSBXpre:
3461 case ARM64::LDRSHWpre:
3462 case ARM64::LDRSHXpre:
3463 case ARM64::LDRSWpre:
3464 case ARM64::LDRWpre:
3465 case ARM64::LDRXpre:
3466 case ARM64::LDRBBpost:
3467 case ARM64::LDRBpost:
3468 case ARM64::LDRHHpost:
3469 case ARM64::LDRHpost:
3470 case ARM64::LDRSBWpost:
3471 case ARM64::LDRSBXpost:
3472 case ARM64::LDRSHWpost:
3473 case ARM64::LDRSHXpost:
3474 case ARM64::LDRSWpost:
3475 case ARM64::LDRWpost:
3476 case ARM64::LDRXpost: {
3477 unsigned Rt = Inst.getOperand(0).getReg();
3478 unsigned Rn = Inst.getOperand(1).getReg();
3479 if (RI->isSubRegisterEq(Rn, Rt))
3480 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3481 "is also a source");
3484 case ARM64::STRBBpost:
3485 case ARM64::STRBpost:
3486 case ARM64::STRHHpost:
3487 case ARM64::STRHpost:
3488 case ARM64::STRWpost:
3489 case ARM64::STRXpost:
3490 case ARM64::STRBBpre:
3491 case ARM64::STRBpre:
3492 case ARM64::STRHHpre:
3493 case ARM64::STRHpre:
3494 case ARM64::STRWpre:
3495 case ARM64::STRXpre: {
3496 unsigned Rt = Inst.getOperand(0).getReg();
3497 unsigned Rn = Inst.getOperand(1).getReg();
3498 if (RI->isSubRegisterEq(Rn, Rt))
3499 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3500 "is also a source");
3505 // Now check immediate ranges. Separate from the above as there is overlap
3506 // in the instructions being checked and this keeps the nested conditionals
3508 switch (Inst.getOpcode()) {
3510 case ARM64::ANDSWrs:
3512 case ARM64::ORRWrs: {
3513 if (!Inst.getOperand(3).isImm())
3514 return Error(Loc[3], "immediate value expected");
3515 int64_t shifter = Inst.getOperand(3).getImm();
3516 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3517 if (ST == ARM64_AM::LSL && shifter > 31)
3518 return Error(Loc[3], "shift value out of range");
3521 case ARM64::ADDSWri:
3522 case ARM64::ADDSXri:
3525 case ARM64::SUBSWri:
3526 case ARM64::SUBSXri:
3528 case ARM64::SUBXri: {
3529 if (!Inst.getOperand(3).isImm())
3530 return Error(Loc[3], "immediate value expected");
3531 int64_t shifter = Inst.getOperand(3).getImm();
3532 if (shifter != 0 && shifter != 12)
3533 return Error(Loc[3], "shift value out of range");
3534 // The imm12 operand can be an expression. Validate that it's legit.
3535 // FIXME: We really, really want to allow arbitrary expressions here
3536 // and resolve the value and validate the result at fixup time, but
3537 // that's hard as we have long since lost any source information we
3538 // need to generate good diagnostics by that point.
3539 if (Inst.getOpcode() == ARM64::ADDXri && Inst.getOperand(2).isExpr()) {
3540 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3541 ARM64MCExpr::VariantKind ELFRefKind;
3542 MCSymbolRefExpr::VariantKind DarwinRefKind;
3543 const MCConstantExpr *Addend;
3544 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3545 return Error(Loc[2], "invalid immediate expression");
3548 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3549 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
3550 ELFRefKind == ARM64MCExpr::VK_LO12 ||
3551 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3552 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3553 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3554 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3555 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3556 // Note that we don't range-check the addend. It's adjusted
3557 // modulo page size when converted, so there is no "out of range"
3558 // condition when using @pageoff. Any validity checking for the value
3559 // was done in the is*() predicate function.
3561 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3562 // @gotpageoff can only be used directly, not with an addend.
3566 // Otherwise, we're not sure, so don't allow it for now.
3567 return Error(Loc[2], "invalid immediate expression");
3570 // If it's anything but an immediate, it's not legit.
3571 if (!Inst.getOperand(2).isImm())
3572 return Error(Loc[2], "invalid immediate expression");
3573 int64_t imm = Inst.getOperand(2).getImm();
3574 if (imm > 4095 || imm < 0)
3575 return Error(Loc[2], "immediate value out of range");
3578 case ARM64::LDRBpre:
3579 case ARM64::LDRHpre:
3580 case ARM64::LDRSBWpre:
3581 case ARM64::LDRSBXpre:
3582 case ARM64::LDRSHWpre:
3583 case ARM64::LDRSHXpre:
3584 case ARM64::LDRWpre:
3585 case ARM64::LDRXpre:
3586 case ARM64::LDRSpre:
3587 case ARM64::LDRDpre:
3588 case ARM64::LDRQpre:
3589 case ARM64::STRBpre:
3590 case ARM64::STRHpre:
3591 case ARM64::STRWpre:
3592 case ARM64::STRXpre:
3593 case ARM64::STRSpre:
3594 case ARM64::STRDpre:
3595 case ARM64::STRQpre:
3596 case ARM64::LDRBpost:
3597 case ARM64::LDRHpost:
3598 case ARM64::LDRSBWpost:
3599 case ARM64::LDRSBXpost:
3600 case ARM64::LDRSHWpost:
3601 case ARM64::LDRSHXpost:
3602 case ARM64::LDRWpost:
3603 case ARM64::LDRXpost:
3604 case ARM64::LDRSpost:
3605 case ARM64::LDRDpost:
3606 case ARM64::LDRQpost:
3607 case ARM64::STRBpost:
3608 case ARM64::STRHpost:
3609 case ARM64::STRWpost:
3610 case ARM64::STRXpost:
3611 case ARM64::STRSpost:
3612 case ARM64::STRDpost:
3613 case ARM64::STRQpost:
3618 case ARM64::LDTRSHWi:
3619 case ARM64::LDTRSHXi:
3620 case ARM64::LDTRSBWi:
3621 case ARM64::LDTRSBXi:
3622 case ARM64::LDTRSWi:
3634 case ARM64::LDURSHWi:
3635 case ARM64::LDURSHXi:
3636 case ARM64::LDURSBWi:
3637 case ARM64::LDURSBXi:
3638 case ARM64::LDURSWi:
3646 case ARM64::STURBi: {
3647 // FIXME: Should accept expressions and error in fixup evaluation
3649 if (!Inst.getOperand(2).isImm())
3650 return Error(Loc[1], "immediate value expected");
3651 int64_t offset = Inst.getOperand(2).getImm();
3652 if (offset > 255 || offset < -256)
3653 return Error(Loc[1], "offset value out of range");
3658 case ARM64::LDRSWro:
3660 case ARM64::STRSro: {
3661 // FIXME: Should accept expressions and error in fixup evaluation
3663 if (!Inst.getOperand(3).isImm())
3664 return Error(Loc[1], "immediate value expected");
3665 int64_t shift = Inst.getOperand(3).getImm();
3666 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3667 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3668 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3669 return Error(Loc[1], "shift type invalid");
3678 case ARM64::STRQro: {
3679 // FIXME: Should accept expressions and error in fixup evaluation
3681 if (!Inst.getOperand(3).isImm())
3682 return Error(Loc[1], "immediate value expected");
3683 int64_t shift = Inst.getOperand(3).getImm();
3684 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3685 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3686 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3687 return Error(Loc[1], "shift type invalid");
3691 case ARM64::LDRHHro:
3692 case ARM64::LDRSHWro:
3693 case ARM64::LDRSHXro:
3695 case ARM64::STRHHro: {
3696 // FIXME: Should accept expressions and error in fixup evaluation
3698 if (!Inst.getOperand(3).isImm())
3699 return Error(Loc[1], "immediate value expected");
3700 int64_t shift = Inst.getOperand(3).getImm();
3701 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3702 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3703 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3704 return Error(Loc[1], "shift type invalid");
3708 case ARM64::LDRBBro:
3709 case ARM64::LDRSBWro:
3710 case ARM64::LDRSBXro:
3712 case ARM64::STRBBro: {
3713 // FIXME: Should accept expressions and error in fixup evaluation
3715 if (!Inst.getOperand(3).isImm())
3716 return Error(Loc[1], "immediate value expected");
3717 int64_t shift = Inst.getOperand(3).getImm();
3718 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3719 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3720 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3721 return Error(Loc[1], "shift type invalid");
3735 case ARM64::LDPWpre:
3736 case ARM64::LDPXpre:
3737 case ARM64::LDPSpre:
3738 case ARM64::LDPDpre:
3739 case ARM64::LDPQpre:
3740 case ARM64::LDPSWpre:
3741 case ARM64::STPWpre:
3742 case ARM64::STPXpre:
3743 case ARM64::STPSpre:
3744 case ARM64::STPDpre:
3745 case ARM64::STPQpre:
3746 case ARM64::LDPWpost:
3747 case ARM64::LDPXpost:
3748 case ARM64::LDPSpost:
3749 case ARM64::LDPDpost:
3750 case ARM64::LDPQpost:
3751 case ARM64::LDPSWpost:
3752 case ARM64::STPWpost:
3753 case ARM64::STPXpost:
3754 case ARM64::STPSpost:
3755 case ARM64::STPDpost:
3756 case ARM64::STPQpost:
3766 case ARM64::STNPQi: {
3767 // FIXME: Should accept expressions and error in fixup evaluation
3769 if (!Inst.getOperand(3).isImm())
3770 return Error(Loc[2], "immediate value expected");
3771 int64_t offset = Inst.getOperand(3).getImm();
3772 if (offset > 63 || offset < -64)
3773 return Error(Loc[2], "offset value out of range");
3781 static void rewriteMOV(ARM64AsmParser::OperandVector &Operands,
3782 StringRef mnemonic, uint64_t imm, unsigned shift,
3783 MCContext &Context) {
3784 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3785 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3787 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
3789 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
3790 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
3791 Op2->getEndLoc(), Context);
3793 Operands.push_back(ARM64Operand::CreateShifter(
3794 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3799 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3801 case Match_MissingFeature:
3803 "instruction requires a CPU feature not currently enabled");
3804 case Match_InvalidOperand:
3805 return Error(Loc, "invalid operand for instruction");
3806 case Match_InvalidSuffix:
3807 return Error(Loc, "invalid type suffix for instruction");
3808 case Match_InvalidMemoryIndexedSImm9:
3809 return Error(Loc, "index must be an integer in range [-256,255].");
3810 case Match_InvalidMemoryIndexed32SImm7:
3811 return Error(Loc, "index must be a multiple of 4 in range [-256,252].");
3812 case Match_InvalidMemoryIndexed64SImm7:
3813 return Error(Loc, "index must be a multiple of 8 in range [-512,504].");
3814 case Match_InvalidMemoryIndexed128SImm7:
3815 return Error(Loc, "index must be a multiple of 16 in range [-1024,1008].");
3816 case Match_InvalidMemoryIndexed8:
3817 return Error(Loc, "index must be an integer in range [0,4095].");
3818 case Match_InvalidMemoryIndexed16:
3819 return Error(Loc, "index must be a multiple of 2 in range [0,8190].");
3820 case Match_InvalidMemoryIndexed32:
3821 return Error(Loc, "index must be a multiple of 4 in range [0,16380].");
3822 case Match_InvalidMemoryIndexed64:
3823 return Error(Loc, "index must be a multiple of 8 in range [0,32760].");
3824 case Match_InvalidMemoryIndexed128:
3825 return Error(Loc, "index must be a multiple of 16 in range [0,65520].");
3826 case Match_InvalidImm1_8:
3827 return Error(Loc, "immediate must be an integer in range [1,8].");
3828 case Match_InvalidImm1_16:
3829 return Error(Loc, "immediate must be an integer in range [1,16].");
3830 case Match_InvalidImm1_32:
3831 return Error(Loc, "immediate must be an integer in range [1,32].");
3832 case Match_InvalidImm1_64:
3833 return Error(Loc, "immediate must be an integer in range [1,64].");
3834 case Match_MnemonicFail:
3835 return Error(Loc, "unrecognized instruction mnemonic");
3837 assert(0 && "unexpected error code!");
3838 return Error(Loc, "invalid instruction format");
3842 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3843 OperandVector &Operands,
3845 unsigned &ErrorInfo,
3846 bool MatchingInlineAsm) {
3847 assert(!Operands.empty() && "Unexpect empty operand list!");
3848 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3849 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3851 StringRef Tok = Op->getToken();
3852 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
3853 // This needs to be done before the special handling of ADD/SUB immediates.
3854 if (Tok == "cmp" || Tok == "cmn") {
3855 // Replace the opcode with either ADDS or SUBS.
3856 const char *Repl = StringSwitch<const char *>(Tok)
3857 .Case("cmp", "subs")
3858 .Case("cmn", "adds")
3860 assert(Repl && "Unknown compare instruction");
3862 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
3864 // Insert WZR or XZR as destination operand.
3865 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3867 if (RegOp->isReg() &&
3868 (isGPR32Register(RegOp->getReg()) || RegOp->getReg() == ARM64::WZR))
3869 ZeroReg = ARM64::WZR;
3871 ZeroReg = ARM64::XZR;
3873 Operands.begin() + 1,
3874 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
3875 // Update since we modified it above.
3876 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3877 Tok = Op->getToken();
3880 unsigned NumOperands = Operands.size();
3882 if (Tok == "mov" && NumOperands == 3) {
3883 // The MOV mnemomic is aliased to movn/movz, depending on the value of
3884 // the immediate being instantiated.
3885 // FIXME: Catching this here is a total hack, and we should use tblgen
3886 // support to implement this instead as soon as it is available.
3888 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3890 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
3891 uint64_t Val = CE->getValue();
3892 uint64_t NVal = ~Val;
3894 // If this is a 32-bit register and the value has none of the upper
3895 // set, clear the complemented upper 32-bits so the logic below works
3896 // for 32-bit registers too.
3897 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3898 if (Op1->isReg() && isGPR32Register(Op1->getReg()) &&
3899 (Val & 0xFFFFFFFFULL) == Val)
3900 NVal &= 0x00000000FFFFFFFFULL;
3902 // MOVK Rd, imm << 0
3903 if ((Val & 0xFFFF) == Val)
3904 rewriteMOV(Operands, "movz", Val, 0, getContext());
3906 // MOVK Rd, imm << 16
3907 else if ((Val & 0xFFFF0000ULL) == Val)
3908 rewriteMOV(Operands, "movz", Val, 16, getContext());
3910 // MOVK Rd, imm << 32
3911 else if ((Val & 0xFFFF00000000ULL) == Val)
3912 rewriteMOV(Operands, "movz", Val, 32, getContext());
3914 // MOVK Rd, imm << 48
3915 else if ((Val & 0xFFFF000000000000ULL) == Val)
3916 rewriteMOV(Operands, "movz", Val, 48, getContext());
3918 // MOVN Rd, (~imm << 0)
3919 else if ((NVal & 0xFFFFULL) == NVal)
3920 rewriteMOV(Operands, "movn", NVal, 0, getContext());
3922 // MOVN Rd, ~(imm << 16)
3923 else if ((NVal & 0xFFFF0000ULL) == NVal)
3924 rewriteMOV(Operands, "movn", NVal, 16, getContext());
3926 // MOVN Rd, ~(imm << 32)
3927 else if ((NVal & 0xFFFF00000000ULL) == NVal)
3928 rewriteMOV(Operands, "movn", NVal, 32, getContext());
3930 // MOVN Rd, ~(imm << 48)
3931 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
3932 rewriteMOV(Operands, "movn", NVal, 48, getContext());
3935 } else if (NumOperands == 4) {
3936 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
3937 // Handle the uimm24 immediate form, where the shift is not specified.
3938 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3940 if (const MCConstantExpr *CE =
3941 dyn_cast<MCConstantExpr>(Op3->getImm())) {
3942 uint64_t Val = CE->getValue();
3943 if (Val >= (1 << 24)) {
3944 Error(IDLoc, "immediate value is too large");
3947 if (Val < (1 << 12)) {
3948 Operands.push_back(ARM64Operand::CreateShifter(
3949 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3950 } else if ((Val & 0xfff) == 0) {
3952 CE = MCConstantExpr::Create(Val >> 12, getContext());
3954 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
3955 Operands.push_back(ARM64Operand::CreateShifter(
3956 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
3958 Error(IDLoc, "immediate value is too large");
3962 Operands.push_back(ARM64Operand::CreateShifter(
3963 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3967 // FIXME: Horible hack to handle the LSL -> UBFM alias.
3968 } else if (NumOperands == 4 && Tok == "lsl") {
3969 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3970 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3971 if (Op2->isReg() && Op3->isImm()) {
3972 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3974 uint64_t Op3Val = Op3CE->getValue();
3975 uint64_t NewOp3Val = 0;
3976 uint64_t NewOp4Val = 0;
3977 if (isGPR32Register(Op2->getReg()) || Op2->getReg() == ARM64::WZR) {
3978 NewOp3Val = (32 - Op3Val) & 0x1f;
3979 NewOp4Val = 31 - Op3Val;
3981 NewOp3Val = (64 - Op3Val) & 0x3f;
3982 NewOp4Val = 63 - Op3Val;
3985 const MCExpr *NewOp3 =
3986 MCConstantExpr::Create(NewOp3Val, getContext());
3987 const MCExpr *NewOp4 =
3988 MCConstantExpr::Create(NewOp4Val, getContext());
3990 Operands[0] = ARM64Operand::CreateToken(
3991 "ubfm", false, Op->getStartLoc(), getContext());
3992 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3993 Op3->getEndLoc(), getContext());
3994 Operands.push_back(ARM64Operand::CreateImm(
3995 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
4001 // FIXME: Horrible hack to handle the optional LSL shift for vector
4003 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
4004 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4005 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4006 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4007 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4008 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
4009 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
4010 IDLoc, getContext()));
4011 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
4012 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4013 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4014 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4015 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4016 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
4017 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
4018 // Canonicalize on lower-case for ease of comparison.
4019 std::string CanonicalSuffix = Suffix.lower();
4020 if (Tok != "movi" ||
4021 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
4022 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
4023 Operands.push_back(ARM64Operand::CreateShifter(
4024 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4027 } else if (NumOperands == 5) {
4028 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4029 // UBFIZ -> UBFM aliases.
4030 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4031 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4032 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4033 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4035 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4036 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4037 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4039 if (Op3CE && Op4CE) {
4040 uint64_t Op3Val = Op3CE->getValue();
4041 uint64_t Op4Val = Op4CE->getValue();
4043 uint64_t NewOp3Val = 0;
4044 if (isGPR32Register(Op1->getReg()))
4045 NewOp3Val = (32 - Op3Val) & 0x1f;
4047 NewOp3Val = (64 - Op3Val) & 0x3f;
4049 uint64_t NewOp4Val = Op4Val - 1;
4051 const MCExpr *NewOp3 =
4052 MCConstantExpr::Create(NewOp3Val, getContext());
4053 const MCExpr *NewOp4 =
4054 MCConstantExpr::Create(NewOp4Val, getContext());
4055 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4056 Op3->getEndLoc(), getContext());
4057 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
4058 Op4->getEndLoc(), getContext());
4060 Operands[0] = ARM64Operand::CreateToken(
4061 "bfm", false, Op->getStartLoc(), getContext());
4062 else if (Tok == "sbfiz")
4063 Operands[0] = ARM64Operand::CreateToken(
4064 "sbfm", false, Op->getStartLoc(), getContext());
4065 else if (Tok == "ubfiz")
4066 Operands[0] = ARM64Operand::CreateToken(
4067 "ubfm", false, Op->getStartLoc(), getContext());
4069 llvm_unreachable("No valid mnemonic for alias?");
4077 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4078 // UBFX -> UBFM aliases.
4079 } else if (NumOperands == 5 &&
4080 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4081 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4082 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4083 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4085 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4086 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4087 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4089 if (Op3CE && Op4CE) {
4090 uint64_t Op3Val = Op3CE->getValue();
4091 uint64_t Op4Val = Op4CE->getValue();
4092 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4094 if (NewOp4Val >= Op3Val) {
4095 const MCExpr *NewOp4 =
4096 MCConstantExpr::Create(NewOp4Val, getContext());
4097 Operands[4] = ARM64Operand::CreateImm(
4098 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4100 Operands[0] = ARM64Operand::CreateToken(
4101 "bfm", false, Op->getStartLoc(), getContext());
4102 else if (Tok == "sbfx")
4103 Operands[0] = ARM64Operand::CreateToken(
4104 "sbfm", false, Op->getStartLoc(), getContext());
4105 else if (Tok == "ubfx")
4106 Operands[0] = ARM64Operand::CreateToken(
4107 "ubfm", false, Op->getStartLoc(), getContext());
4109 llvm_unreachable("No valid mnemonic for alias?");
4118 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4119 // InstAlias can't quite handle this since the reg classes aren't
4121 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4122 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4124 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4125 if (OpCE->getValue() < 32) {
4126 // The source register can be Wn here, but the matcher expects a
4127 // GPR64. Twiddle it here if necessary.
4128 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4130 unsigned Reg = getXRegFromWReg(Op->getReg());
4131 Operands[1] = ARM64Operand::CreateReg(
4132 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4139 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4140 // InstAlias can't quite handle this since the reg classes aren't
4142 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4143 // The source register can be Wn here, but the matcher expects a
4144 // GPR64. Twiddle it here if necessary.
4145 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4147 unsigned Reg = getXRegFromWReg(Op->getReg());
4148 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4149 Op->getEndLoc(), getContext());
4153 // FIXME: Likewise for [su]xt[bh] with a Xd dst operand
4154 else if (NumOperands == 3 &&
4155 (Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
4156 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4157 if (Op->isReg() && isGPR64Reg(Op->getReg())) {
4158 // The source register can be Wn here, but the matcher expects a
4159 // GPR64. Twiddle it here if necessary.
4160 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4162 unsigned Reg = getXRegFromWReg(Op->getReg());
4163 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4164 Op->getEndLoc(), getContext());
4170 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4171 if (NumOperands == 3 && Tok == "fmov") {
4172 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4173 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4174 if (RegOp->isReg() && ImmOp->isFPImm() &&
4175 ImmOp->getFPImm() == (unsigned)-1) {
4177 isFPR32Register(RegOp->getReg()) ? ARM64::WZR : ARM64::XZR;
4178 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4179 Op->getEndLoc(), getContext());
4184 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4185 // FMOV instructions. The index isn't an actual instruction operand
4186 // but rather syntactic sugar. It really should be part of the mnemonic,
4187 // not the operand, but whatever.
4188 if ((NumOperands == 5) && Tok == "fmov") {
4189 // If the last operand is a vectorindex of '1', then replace it with
4190 // a '[' '1' ']' token sequence, which is what the matcher
4191 // (annoyingly) expects for a literal vector index operand.
4192 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4193 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4194 SMLoc Loc = Op->getStartLoc();
4195 Operands.pop_back();
4197 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4199 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4201 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4202 } else if (Op->isReg()) {
4203 // Similarly, check the destination operand for the GPR->High-lane
4205 unsigned OpNo = NumOperands - 2;
4206 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4207 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4208 SMLoc Loc = Op->getStartLoc();
4210 ARM64Operand::CreateToken("[", false, Loc, getContext());
4212 Operands.begin() + OpNo + 1,
4213 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4215 Operands.begin() + OpNo + 2,
4216 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4222 // First try to match against the secondary set of tables containing the
4223 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4224 unsigned MatchResult =
4225 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4227 // If that fails, try against the alternate table containing long-form NEON:
4228 // "fadd v0.2s, v1.2s, v2.2s"
4229 if (MatchResult != Match_Success)
4231 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4233 switch (MatchResult) {
4234 case Match_Success: {
4235 // Perform range checking and other semantic validations
4236 SmallVector<SMLoc, 8> OperandLocs;
4237 NumOperands = Operands.size();
4238 for (unsigned i = 1; i < NumOperands; ++i)
4239 OperandLocs.push_back(Operands[i]->getStartLoc());
4240 if (validateInstruction(Inst, OperandLocs))
4244 Out.EmitInstruction(Inst, STI);
4247 case Match_MissingFeature:
4248 case Match_MnemonicFail:
4249 return showMatchError(IDLoc, MatchResult);
4250 case Match_InvalidOperand: {
4251 SMLoc ErrorLoc = IDLoc;
4252 if (ErrorInfo != ~0U) {
4253 if (ErrorInfo >= Operands.size())
4254 return Error(IDLoc, "too few operands for instruction");
4256 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4257 if (ErrorLoc == SMLoc())
4260 // If the match failed on a suffix token operand, tweak the diagnostic
4262 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4263 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4264 MatchResult = Match_InvalidSuffix;
4266 return showMatchError(ErrorLoc, MatchResult);
4268 case Match_InvalidMemoryIndexedSImm9: {
4269 // If there is not a '!' after the memory operand that failed, we really
4270 // want the diagnostic for the non-pre-indexed instruction variant instead.
4271 // Be careful to check for the post-indexed variant as well, which also
4272 // uses this match diagnostic. Also exclude the explicitly unscaled
4273 // mnemonics, as they want the unscaled diagnostic as well.
4274 if (Operands.size() == ErrorInfo + 1 &&
4275 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4276 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4277 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4278 // the register class of the previous operand. Default to 64 in case
4279 // we see something unexpected.
4280 MatchResult = Match_InvalidMemoryIndexed64;
4282 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4283 if (PrevOp->isReg() && ARM64MCRegisterClasses[ARM64::GPR32RegClassID]
4284 .contains(PrevOp->getReg()))
4285 MatchResult = Match_InvalidMemoryIndexed32;
4288 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4289 if (ErrorLoc == SMLoc())
4291 return showMatchError(ErrorLoc, MatchResult);
4293 case Match_InvalidMemoryIndexed32:
4294 case Match_InvalidMemoryIndexed64:
4295 case Match_InvalidMemoryIndexed128:
4296 // If there is a '!' after the memory operand that failed, we really
4297 // want the diagnostic for the pre-indexed instruction variant instead.
4298 if (Operands.size() > ErrorInfo + 1 &&
4299 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4300 MatchResult = Match_InvalidMemoryIndexedSImm9;
4302 case Match_InvalidMemoryIndexed8:
4303 case Match_InvalidMemoryIndexed16:
4304 case Match_InvalidMemoryIndexed32SImm7:
4305 case Match_InvalidMemoryIndexed64SImm7:
4306 case Match_InvalidMemoryIndexed128SImm7:
4307 case Match_InvalidImm1_8:
4308 case Match_InvalidImm1_16:
4309 case Match_InvalidImm1_32:
4310 case Match_InvalidImm1_64: {
4311 // Any time we get here, there's nothing fancy to do. Just get the
4312 // operand SMLoc and display the diagnostic.
4313 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4314 // If it's a memory operand, the error is with the offset immediate,
4315 // so get that location instead.
4316 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4317 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4318 if (ErrorLoc == SMLoc())
4320 return showMatchError(ErrorLoc, MatchResult);
4324 llvm_unreachable("Implement any new match types added!");
4328 /// ParseDirective parses the arm specific directives
4329 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4330 StringRef IDVal = DirectiveID.getIdentifier();
4331 SMLoc Loc = DirectiveID.getLoc();
4332 if (IDVal == ".hword")
4333 return parseDirectiveWord(2, Loc);
4334 if (IDVal == ".word")
4335 return parseDirectiveWord(4, Loc);
4336 if (IDVal == ".xword")
4337 return parseDirectiveWord(8, Loc);
4338 if (IDVal == ".tlsdesccall")
4339 return parseDirectiveTLSDescCall(Loc);
4341 return parseDirectiveLOH(IDVal, Loc);
4344 /// parseDirectiveWord
4345 /// ::= .word [ expression (, expression)* ]
4346 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4347 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4349 const MCExpr *Value;
4350 if (getParser().parseExpression(Value))
4353 getParser().getStreamer().EmitValue(Value, Size);
4355 if (getLexer().is(AsmToken::EndOfStatement))
4358 // FIXME: Improve diagnostic.
4359 if (getLexer().isNot(AsmToken::Comma))
4360 return Error(L, "unexpected token in directive");
4369 // parseDirectiveTLSDescCall:
4370 // ::= .tlsdesccall symbol
4371 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4373 if (getParser().parseIdentifier(Name))
4374 return Error(L, "expected symbol after directive");
4376 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4377 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4378 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4381 Inst.setOpcode(ARM64::TLSDESCCALL);
4382 Inst.addOperand(MCOperand::CreateExpr(Expr));
4384 getParser().getStreamer().EmitInstruction(Inst, STI);
4388 /// ::= .loh <lohName | lohId> label1, ..., labelN
4389 /// The number of arguments depends on the loh identifier.
4390 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4391 if (IDVal != MCLOHDirectiveName())
4394 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4395 if (getParser().getTok().isNot(AsmToken::Integer))
4396 return TokError("expected an identifier or a number in directive");
4397 // We successfully get a numeric value for the identifier.
4398 // Check if it is valid.
4399 int64_t Id = getParser().getTok().getIntVal();
4400 Kind = (MCLOHType)Id;
4401 // Check that Id does not overflow MCLOHType.
4402 if (!isValidMCLOHType(Kind) || Id != Kind)
4403 return TokError("invalid numeric identifier in directive");
4405 StringRef Name = getTok().getIdentifier();
4406 // We successfully parse an identifier.
4407 // Check if it is a recognized one.
4408 int Id = MCLOHNameToId(Name);
4411 return TokError("invalid identifier in directive");
4412 Kind = (MCLOHType)Id;
4414 // Consume the identifier.
4416 // Get the number of arguments of this LOH.
4417 int NbArgs = MCLOHIdToNbArgs(Kind);
4419 assert(NbArgs != -1 && "Invalid number of arguments");
4421 SmallVector<MCSymbol *, 3> Args;
4422 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4424 if (getParser().parseIdentifier(Name))
4425 return TokError("expected identifier in directive");
4426 Args.push_back(getContext().GetOrCreateSymbol(Name));
4428 if (Idx + 1 == NbArgs)
4430 if (getLexer().isNot(AsmToken::Comma))
4431 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4434 if (getLexer().isNot(AsmToken::EndOfStatement))
4435 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4437 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4442 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4443 ARM64MCExpr::VariantKind &ELFRefKind,
4444 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4445 const MCConstantExpr *&Addend) {
4446 ELFRefKind = ARM64MCExpr::VK_INVALID;
4447 DarwinRefKind = MCSymbolRefExpr::VK_None;
4449 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4450 ELFRefKind = AE->getKind();
4451 Expr = AE->getSubExpr();
4454 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4456 // It's a simple symbol reference with no addend.
4457 DarwinRefKind = SE->getKind();
4462 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4466 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4469 DarwinRefKind = SE->getKind();
4471 if (BE->getOpcode() != MCBinaryExpr::Add)
4474 // See if the addend is is a constant, otherwise there's more going
4475 // on here than we can deal with.
4476 Addend = dyn_cast<MCConstantExpr>(BE->getRHS());
4480 // It's some symbol reference + a constant addend, but really
4481 // shouldn't use both Darwin and ELF syntax.
4482 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4483 DarwinRefKind == MCSymbolRefExpr::VK_None;
4486 /// Force static initialization.
4487 extern "C" void LLVMInitializeARM64AsmParser() {
4488 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64Target);
4491 #define GET_REGISTER_MATCHER
4492 #define GET_MATCHER_IMPLEMENTATION
4493 #include "ARM64GenAsmMatcher.inc"
4495 // Define this matcher function after the auto-generated include so we
4496 // have the match class enum definitions.
4497 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4499 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4500 // If the kind is a token for a literal immediate, check if our asm
4501 // operand matches. This is for InstAliases which have a fixed-value
4502 // immediate in the syntax.
4503 int64_t ExpectedVal;
4506 return Match_InvalidOperand;
4548 return Match_InvalidOperand;
4549 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4551 return Match_InvalidOperand;
4552 if (CE->getValue() == ExpectedVal)
4553 return Match_Success;
4554 return Match_InvalidOperand;