1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
80 unsigned &ErrorInfo, bool MatchingInlineAsm);
81 /// @name Auto-generated Match Functions
84 #define GET_ASSEMBLER_HEADER
85 #include "ARM64GenAsmMatcher.inc"
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
94 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
95 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum ARM64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "ARM64GenAsmMatcher.inc"
106 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII,
108 const MCTargetOptions &Options)
109 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
110 MCAsmParserExtension::Initialize(_Parser);
112 // Initialize the set of available features.
113 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
116 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
117 SMLoc NameLoc, OperandVector &Operands);
118 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
119 virtual bool ParseDirective(AsmToken DirectiveID);
120 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
122 static bool classifySymbolRef(const MCExpr *Expr,
123 ARM64MCExpr::VariantKind &ELFRefKind,
124 MCSymbolRefExpr::VariantKind &DarwinRefKind,
125 const MCConstantExpr *&Addend);
127 } // end anonymous namespace
131 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
133 class ARM64Operand : public MCParsedAsmOperand {
136 ImmediateOffset, // pre-indexed, no writeback
137 RegisterOffset // register offset, with optional extend
157 SMLoc StartLoc, EndLoc, OffsetLoc;
162 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
170 struct VectorListOp {
173 unsigned NumElements;
174 unsigned ElementKind;
177 struct VectorIndexOp {
186 unsigned Val; // Encoded 8-bit representation.
190 unsigned Val; // Not the enum since not all values have names.
214 // This is for all forms of ARM64 address expressions
216 unsigned BaseRegNum, OffsetRegNum;
217 ARM64_AM::ExtendType ExtType;
220 const MCExpr *OffsetImm;
227 struct VectorListOp VectorList;
228 struct VectorIndexOp VectorIndex;
230 struct FPImmOp FPImm;
231 struct BarrierOp Barrier;
232 struct SysRegOp SysReg;
233 struct SysCRImmOp SysCRImm;
234 struct PrefetchOp Prefetch;
235 struct ShifterOp Shifter;
236 struct ExtendOp Extend;
240 // Keep the MCContext around as the MCExprs may need manipulated during
241 // the add<>Operands() calls.
244 ARM64Operand(KindTy K, MCContext &_Ctx)
245 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
248 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
250 StartLoc = o.StartLoc;
269 VectorList = o.VectorList;
272 VectorIndex = o.VectorIndex;
278 SysCRImm = o.SysCRImm;
281 Prefetch = o.Prefetch;
295 /// getStartLoc - Get the location of the first token of this operand.
296 SMLoc getStartLoc() const { return StartLoc; }
297 /// getEndLoc - Get the location of the last token of this operand.
298 SMLoc getEndLoc() const { return EndLoc; }
299 /// getOffsetLoc - Get the location of the offset of this memory operand.
300 SMLoc getOffsetLoc() const { return OffsetLoc; }
302 StringRef getToken() const {
303 assert(Kind == k_Token && "Invalid access!");
304 return StringRef(Tok.Data, Tok.Length);
307 bool isTokenSuffix() const {
308 assert(Kind == k_Token && "Invalid access!");
312 const MCExpr *getImm() const {
313 assert(Kind == k_Immediate && "Invalid access!");
317 unsigned getFPImm() const {
318 assert(Kind == k_FPImm && "Invalid access!");
322 unsigned getBarrier() const {
323 assert(Kind == k_Barrier && "Invalid access!");
327 unsigned getReg() const {
328 assert(Kind == k_Register && "Invalid access!");
332 unsigned getVectorListStart() const {
333 assert(Kind == k_VectorList && "Invalid access!");
334 return VectorList.RegNum;
337 unsigned getVectorListCount() const {
338 assert(Kind == k_VectorList && "Invalid access!");
339 return VectorList.Count;
342 unsigned getVectorIndex() const {
343 assert(Kind == k_VectorIndex && "Invalid access!");
344 return VectorIndex.Val;
347 StringRef getSysReg() const {
348 assert(Kind == k_SysReg && "Invalid access!");
349 return StringRef(SysReg.Data, SysReg.Length);
352 unsigned getSysCR() const {
353 assert(Kind == k_SysCR && "Invalid access!");
357 unsigned getPrefetch() const {
358 assert(Kind == k_Prefetch && "Invalid access!");
362 unsigned getShifter() const {
363 assert(Kind == k_Shifter && "Invalid access!");
367 unsigned getExtend() const {
368 assert(Kind == k_Extend && "Invalid access!");
372 bool isImm() const { return Kind == k_Immediate; }
373 bool isSImm9() const {
376 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
379 int64_t Val = MCE->getValue();
380 return (Val >= -256 && Val < 256);
382 bool isSImm7s4() const {
385 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
388 int64_t Val = MCE->getValue();
389 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
391 bool isSImm7s8() const {
394 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
397 int64_t Val = MCE->getValue();
398 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
400 bool isSImm7s16() const {
403 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
406 int64_t Val = MCE->getValue();
407 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
409 bool isImm0_7() const {
412 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
415 int64_t Val = MCE->getValue();
416 return (Val >= 0 && Val < 8);
418 bool isImm1_8() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val > 0 && Val < 9);
427 bool isImm0_15() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= 0 && Val < 16);
436 bool isImm1_16() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val > 0 && Val < 17);
445 bool isImm0_31() const {
448 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
451 int64_t Val = MCE->getValue();
452 return (Val >= 0 && Val < 32);
454 bool isImm1_31() const {
457 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
460 int64_t Val = MCE->getValue();
461 return (Val >= 1 && Val < 32);
463 bool isImm1_32() const {
466 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
469 int64_t Val = MCE->getValue();
470 return (Val >= 1 && Val < 33);
472 bool isImm0_63() const {
475 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
478 int64_t Val = MCE->getValue();
479 return (Val >= 0 && Val < 64);
481 bool isImm1_63() const {
484 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
487 int64_t Val = MCE->getValue();
488 return (Val >= 1 && Val < 64);
490 bool isImm1_64() const {
493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 int64_t Val = MCE->getValue();
497 return (Val >= 1 && Val < 65);
499 bool isImm0_127() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 128);
508 bool isImm0_255() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 256);
517 bool isImm0_65535() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val >= 0 && Val < 65536);
526 bool isLogicalImm32() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
534 bool isLogicalImm64() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
542 bool isSIMDImmType10() const {
545 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
548 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
550 bool isBranchTarget26() const {
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 int64_t Val = MCE->getValue();
559 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
561 bool isPCRelLabel19() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
570 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
572 bool isBranchTarget14() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
581 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
584 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
588 ARM64MCExpr::VariantKind ELFRefKind;
589 MCSymbolRefExpr::VariantKind DarwinRefKind;
590 const MCConstantExpr *Addend;
591 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
595 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
598 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
599 if (ELFRefKind == AllowedModifiers[i])
606 bool isMovZSymbolG3() const {
607 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
608 return isMovWSymbol(Variants);
611 bool isMovZSymbolG2() const {
612 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
613 ARM64MCExpr::VK_ABS_G2_S,
614 ARM64MCExpr::VK_TPREL_G2,
615 ARM64MCExpr::VK_DTPREL_G2 };
616 return isMovWSymbol(Variants);
619 bool isMovZSymbolG1() const {
620 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
621 ARM64MCExpr::VK_ABS_G1_S,
622 ARM64MCExpr::VK_GOTTPREL_G1,
623 ARM64MCExpr::VK_TPREL_G1,
624 ARM64MCExpr::VK_DTPREL_G1, };
625 return isMovWSymbol(Variants);
628 bool isMovZSymbolG0() const {
629 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
630 ARM64MCExpr::VK_ABS_G0_S,
631 ARM64MCExpr::VK_TPREL_G0,
632 ARM64MCExpr::VK_DTPREL_G0 };
633 return isMovWSymbol(Variants);
636 bool isMovKSymbolG3() const {
637 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
638 return isMovWSymbol(Variants);
641 bool isMovKSymbolG2() const {
642 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
643 return isMovWSymbol(Variants);
646 bool isMovKSymbolG1() const {
647 static ARM64MCExpr::VariantKind Variants[] = {
648 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
649 ARM64MCExpr::VK_DTPREL_G1_NC
651 return isMovWSymbol(Variants);
654 bool isMovKSymbolG0() const {
655 static ARM64MCExpr::VariantKind Variants[] = {
656 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
657 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
659 return isMovWSymbol(Variants);
662 bool isFPImm() const { return Kind == k_FPImm; }
663 bool isBarrier() const { return Kind == k_Barrier; }
664 bool isSysReg() const { return Kind == k_SysReg; }
665 bool isMRSSystemRegister() const {
666 if (!isSysReg()) return false;
668 bool IsKnownRegister;
669 ARM64SysReg::MRSMapper().fromString(getSysReg(), IsKnownRegister);
671 return IsKnownRegister;
673 bool isMSRSystemRegister() const {
674 if (!isSysReg()) return false;
676 bool IsKnownRegister;
677 ARM64SysReg::MSRMapper().fromString(getSysReg(), IsKnownRegister);
679 return IsKnownRegister;
681 bool isSystemCPSRField() const {
682 if (!isSysReg()) return false;
684 bool IsKnownRegister;
685 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
687 return IsKnownRegister;
689 bool isReg() const { return Kind == k_Register && !Reg.isVector; }
690 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
692 /// Is this a vector list with the type implicit (presumably attached to the
693 /// instruction itself)?
694 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
695 return Kind == k_VectorList && VectorList.Count == NumRegs &&
696 !VectorList.ElementKind;
699 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
700 bool isTypedVectorList() const {
701 if (Kind != k_VectorList)
703 if (VectorList.Count != NumRegs)
705 if (VectorList.ElementKind != ElementKind)
707 return VectorList.NumElements == NumElements;
710 bool isVectorIndexB() const {
711 return Kind == k_VectorIndex && VectorIndex.Val < 16;
713 bool isVectorIndexH() const {
714 return Kind == k_VectorIndex && VectorIndex.Val < 8;
716 bool isVectorIndexS() const {
717 return Kind == k_VectorIndex && VectorIndex.Val < 4;
719 bool isVectorIndexD() const {
720 return Kind == k_VectorIndex && VectorIndex.Val < 2;
722 bool isToken() const { return Kind == k_Token; }
723 bool isTokenEqual(StringRef Str) const {
724 return Kind == k_Token && getToken() == Str;
726 bool isMem() const { return Kind == k_Memory; }
727 bool isSysCR() const { return Kind == k_SysCR; }
728 bool isPrefetch() const { return Kind == k_Prefetch; }
729 bool isShifter() const { return Kind == k_Shifter; }
730 bool isExtend() const {
731 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
733 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
734 return ST == ARM64_AM::LSL;
736 return Kind == k_Extend;
738 bool isExtend64() const {
739 if (Kind != k_Extend)
741 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
742 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
743 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
745 bool isExtendLSL64() const {
746 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
748 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
749 return ST == ARM64_AM::LSL;
751 if (Kind != k_Extend)
753 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
754 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
757 bool isArithmeticShifter() const {
761 // An arithmetic shifter is LSL, LSR, or ASR.
762 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
763 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
766 bool isMovImm32Shifter() const {
770 // A MOVi shifter is LSL of 0, 16, 32, or 48.
771 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
772 if (ST != ARM64_AM::LSL)
774 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
775 return (Val == 0 || Val == 16);
778 bool isMovImm64Shifter() const {
782 // A MOVi shifter is LSL of 0 or 16.
783 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
784 if (ST != ARM64_AM::LSL)
786 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
787 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
790 bool isAddSubShifter() const {
794 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
795 unsigned Val = Shifter.Val;
796 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
797 (ARM64_AM::getShiftValue(Val) == 0 ||
798 ARM64_AM::getShiftValue(Val) == 12);
801 bool isLogicalVecShifter() const {
805 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
806 unsigned Val = Shifter.Val;
807 unsigned Shift = ARM64_AM::getShiftValue(Val);
808 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
809 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
812 bool isLogicalVecHalfWordShifter() const {
813 if (!isLogicalVecShifter())
816 // A logical vector shifter is a left shift by 0 or 8.
817 unsigned Val = Shifter.Val;
818 unsigned Shift = ARM64_AM::getShiftValue(Val);
819 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
820 (Shift == 0 || Shift == 8);
823 bool isMoveVecShifter() const {
827 // A logical vector shifter is a left shift by 8 or 16.
828 unsigned Val = Shifter.Val;
829 unsigned Shift = ARM64_AM::getShiftValue(Val);
830 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
831 (Shift == 8 || Shift == 16);
834 bool isMemoryRegisterOffset8() const {
835 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
838 bool isMemoryRegisterOffset16() const {
839 return isMem() && Mem.Mode == RegisterOffset &&
840 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
843 bool isMemoryRegisterOffset32() const {
844 return isMem() && Mem.Mode == RegisterOffset &&
845 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
848 bool isMemoryRegisterOffset64() const {
849 return isMem() && Mem.Mode == RegisterOffset &&
850 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
853 bool isMemoryRegisterOffset128() const {
854 return isMem() && Mem.Mode == RegisterOffset &&
855 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
858 bool isMemoryUnscaled() const {
861 if (Mem.Mode != ImmediateOffset)
865 // Make sure the immediate value is valid.
866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
869 // The offset must fit in a signed 9-bit unscaled immediate.
870 int64_t Value = CE->getValue();
871 return (Value >= -256 && Value < 256);
873 // Fallback unscaled operands are for aliases of LDR/STR that fall back
874 // to LDUR/STUR when the offset is not legal for the former but is for
875 // the latter. As such, in addition to checking for being a legal unscaled
876 // address, also check that it is not a legal scaled address. This avoids
877 // ambiguity in the matcher.
878 bool isMemoryUnscaledFB8() const {
879 return isMemoryUnscaled() && !isMemoryIndexed8();
881 bool isMemoryUnscaledFB16() const {
882 return isMemoryUnscaled() && !isMemoryIndexed16();
884 bool isMemoryUnscaledFB32() const {
885 return isMemoryUnscaled() && !isMemoryIndexed32();
887 bool isMemoryUnscaledFB64() const {
888 return isMemoryUnscaled() && !isMemoryIndexed64();
890 bool isMemoryUnscaledFB128() const {
891 return isMemoryUnscaled() && !isMemoryIndexed128();
893 bool isMemoryIndexed(unsigned Scale) const {
896 if (Mem.Mode != ImmediateOffset)
900 // Make sure the immediate value is valid.
901 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
904 // The offset must be a positive multiple of the scale and in range of
905 // encoding with a 12-bit immediate.
906 int64_t Value = CE->getValue();
907 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
910 // If it's not a constant, check for some expressions we know.
911 const MCExpr *Expr = Mem.OffsetImm;
912 ARM64MCExpr::VariantKind ELFRefKind;
913 MCSymbolRefExpr::VariantKind DarwinRefKind;
914 const MCConstantExpr *Addend;
915 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
917 // If we don't understand the expression, assume the best and
918 // let the fixup and relocation code deal with it.
922 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
923 ELFRefKind == ARM64MCExpr::VK_LO12 ||
924 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
925 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
926 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
927 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
928 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
929 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
930 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
931 // Note that we don't range-check the addend. It's adjusted modulo page
932 // size when converted, so there is no "out of range" condition when using
934 int64_t Value = Addend ? Addend->getValue() : 0;
935 return Value >= 0 && (Value % Scale) == 0;
936 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
937 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
938 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
944 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
945 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
946 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
947 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
948 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
949 bool isMemoryNoIndex() const {
952 if (Mem.Mode != ImmediateOffset)
957 // Make sure the immediate value is valid. Only zero is allowed.
958 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
959 if (!CE || CE->getValue() != 0)
963 bool isMemorySIMDNoIndex() const {
966 if (Mem.Mode != ImmediateOffset)
968 return Mem.OffsetImm == 0;
970 bool isMemoryIndexedSImm9() const {
971 if (!isMem() || Mem.Mode != ImmediateOffset)
975 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
976 assert(CE && "Non-constant pre-indexed offset!");
977 int64_t Value = CE->getValue();
978 return Value >= -256 && Value <= 255;
980 bool isMemoryIndexed32SImm7() const {
981 if (!isMem() || Mem.Mode != ImmediateOffset)
985 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
986 assert(CE && "Non-constant pre-indexed offset!");
987 int64_t Value = CE->getValue();
988 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
990 bool isMemoryIndexed64SImm7() const {
991 if (!isMem() || Mem.Mode != ImmediateOffset)
995 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
996 assert(CE && "Non-constant pre-indexed offset!");
997 int64_t Value = CE->getValue();
998 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
1000 bool isMemoryIndexed128SImm7() const {
1001 if (!isMem() || Mem.Mode != ImmediateOffset)
1005 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1006 assert(CE && "Non-constant pre-indexed offset!");
1007 int64_t Value = CE->getValue();
1008 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
1011 bool isAdrpLabel() const {
1012 // Validation was handled during parsing, so we just sanity check that
1013 // something didn't go haywire.
1017 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1018 int64_t Val = CE->getValue();
1019 int64_t Min = - (4096 * (1LL << (21 - 1)));
1020 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1021 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1027 bool isAdrLabel() const {
1028 // Validation was handled during parsing, so we just sanity check that
1029 // something didn't go haywire.
1033 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1034 int64_t Val = CE->getValue();
1035 int64_t Min = - (1LL << (21 - 1));
1036 int64_t Max = ((1LL << (21 - 1)) - 1);
1037 return Val >= Min && Val <= Max;
1043 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1044 // Add as immediates when possible. Null MCExpr = 0.
1046 Inst.addOperand(MCOperand::CreateImm(0));
1047 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1048 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1050 Inst.addOperand(MCOperand::CreateExpr(Expr));
1053 void addRegOperands(MCInst &Inst, unsigned N) const {
1054 assert(N == 1 && "Invalid number of operands!");
1055 Inst.addOperand(MCOperand::CreateReg(getReg()));
1058 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1059 assert(N == 1 && "Invalid number of operands!");
1060 Inst.addOperand(MCOperand::CreateReg(getReg()));
1063 template <unsigned NumRegs>
1064 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1065 assert(N == 1 && "Invalid number of operands!");
1066 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1067 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1068 unsigned FirstReg = FirstRegs[NumRegs - 1];
1071 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1074 template <unsigned NumRegs>
1075 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1076 assert(N == 1 && "Invalid number of operands!");
1077 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1078 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1079 unsigned FirstReg = FirstRegs[NumRegs - 1];
1082 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1085 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1086 assert(N == 1 && "Invalid number of operands!");
1087 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1090 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1091 assert(N == 1 && "Invalid number of operands!");
1092 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1095 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1096 assert(N == 1 && "Invalid number of operands!");
1097 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1100 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1101 assert(N == 1 && "Invalid number of operands!");
1102 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1105 void addImmOperands(MCInst &Inst, unsigned N) const {
1106 assert(N == 1 && "Invalid number of operands!");
1107 // If this is a pageoff symrefexpr with an addend, adjust the addend
1108 // to be only the page-offset portion. Otherwise, just add the expr
1110 addExpr(Inst, getImm());
1113 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1114 assert(N == 1 && "Invalid number of operands!");
1115 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1117 addExpr(Inst, getImm());
1119 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1122 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1123 addImmOperands(Inst, N);
1126 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1127 assert(N == 1 && "Invalid number of operands!");
1128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1129 assert(MCE && "Invalid constant immediate operand!");
1130 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1133 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1134 assert(N == 1 && "Invalid number of operands!");
1135 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1136 assert(MCE && "Invalid constant immediate operand!");
1137 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1140 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1141 assert(N == 1 && "Invalid number of operands!");
1142 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1143 assert(MCE && "Invalid constant immediate operand!");
1144 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1147 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1148 assert(N == 1 && "Invalid number of operands!");
1149 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1150 assert(MCE && "Invalid constant immediate operand!");
1151 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1154 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1157 assert(MCE && "Invalid constant immediate operand!");
1158 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1161 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1162 assert(N == 1 && "Invalid number of operands!");
1163 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1164 assert(MCE && "Invalid constant immediate operand!");
1165 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1168 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1169 assert(N == 1 && "Invalid number of operands!");
1170 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1171 assert(MCE && "Invalid constant immediate operand!");
1172 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1175 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1176 assert(N == 1 && "Invalid number of operands!");
1177 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1178 assert(MCE && "Invalid constant immediate operand!");
1179 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1182 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1183 assert(N == 1 && "Invalid number of operands!");
1184 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1185 assert(MCE && "Invalid constant immediate operand!");
1186 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1189 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1190 assert(N == 1 && "Invalid number of operands!");
1191 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1192 assert(MCE && "Invalid constant immediate operand!");
1193 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1196 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1197 assert(N == 1 && "Invalid number of operands!");
1198 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1199 assert(MCE && "Invalid constant immediate operand!");
1200 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1203 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1204 assert(N == 1 && "Invalid number of operands!");
1205 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1206 assert(MCE && "Invalid constant immediate operand!");
1207 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1210 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1211 assert(N == 1 && "Invalid number of operands!");
1212 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1213 assert(MCE && "Invalid constant immediate operand!");
1214 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1217 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1218 assert(N == 1 && "Invalid number of operands!");
1219 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1220 assert(MCE && "Invalid constant immediate operand!");
1221 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1224 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1225 assert(N == 1 && "Invalid number of operands!");
1226 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1227 assert(MCE && "Invalid constant immediate operand!");
1228 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1231 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1232 assert(N == 1 && "Invalid number of operands!");
1233 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1234 assert(MCE && "Invalid constant immediate operand!");
1235 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1238 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1239 assert(N == 1 && "Invalid number of operands!");
1240 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1241 assert(MCE && "Invalid constant immediate operand!");
1242 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1245 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1246 assert(N == 1 && "Invalid number of operands!");
1247 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1248 assert(MCE && "Invalid logical immediate operand!");
1249 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1250 Inst.addOperand(MCOperand::CreateImm(encoding));
1253 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1254 assert(N == 1 && "Invalid number of operands!");
1255 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1256 assert(MCE && "Invalid logical immediate operand!");
1257 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1258 Inst.addOperand(MCOperand::CreateImm(encoding));
1261 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1262 assert(N == 1 && "Invalid number of operands!");
1263 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1264 assert(MCE && "Invalid immediate operand!");
1265 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1266 Inst.addOperand(MCOperand::CreateImm(encoding));
1269 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1270 // Branch operands don't encode the low bits, so shift them off
1271 // here. If it's a label, however, just put it on directly as there's
1272 // not enough information now to do anything.
1273 assert(N == 1 && "Invalid number of operands!");
1274 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1276 addExpr(Inst, getImm());
1279 assert(MCE && "Invalid constant immediate operand!");
1280 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1283 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1284 // Branch operands don't encode the low bits, so shift them off
1285 // here. If it's a label, however, just put it on directly as there's
1286 // not enough information now to do anything.
1287 assert(N == 1 && "Invalid number of operands!");
1288 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1290 addExpr(Inst, getImm());
1293 assert(MCE && "Invalid constant immediate operand!");
1294 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1297 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1298 // Branch operands don't encode the low bits, so shift them off
1299 // here. If it's a label, however, just put it on directly as there's
1300 // not enough information now to do anything.
1301 assert(N == 1 && "Invalid number of operands!");
1302 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1304 addExpr(Inst, getImm());
1307 assert(MCE && "Invalid constant immediate operand!");
1308 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1311 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1312 assert(N == 1 && "Invalid number of operands!");
1313 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1316 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1321 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1325 uint32_t Bits = ARM64SysReg::MRSMapper().fromString(getSysReg(), Valid);
1327 Inst.addOperand(MCOperand::CreateImm(Bits));
1330 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1331 assert(N == 1 && "Invalid number of operands!");
1334 uint32_t Bits = ARM64SysReg::MSRMapper().fromString(getSysReg(), Valid);
1336 Inst.addOperand(MCOperand::CreateImm(Bits));
1339 void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1343 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1345 Inst.addOperand(MCOperand::CreateImm(Bits));
1348 void addSysCROperands(MCInst &Inst, unsigned N) const {
1349 assert(N == 1 && "Invalid number of operands!");
1350 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1353 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1354 assert(N == 1 && "Invalid number of operands!");
1355 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1358 void addShifterOperands(MCInst &Inst, unsigned N) const {
1359 assert(N == 1 && "Invalid number of operands!");
1360 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1363 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1364 assert(N == 1 && "Invalid number of operands!");
1365 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1368 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1369 assert(N == 1 && "Invalid number of operands!");
1370 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1373 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!");
1375 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1378 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1379 assert(N == 1 && "Invalid number of operands!");
1380 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1383 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1384 assert(N == 1 && "Invalid number of operands!");
1385 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1388 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1389 assert(N == 1 && "Invalid number of operands!");
1390 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1393 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1394 assert(N == 1 && "Invalid number of operands!");
1395 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1398 void addExtendOperands(MCInst &Inst, unsigned N) const {
1399 assert(N == 1 && "Invalid number of operands!");
1400 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
1402 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1403 unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
1404 ARM64_AM::getShiftValue(getShifter()));
1405 Inst.addOperand(MCOperand::CreateImm(imm));
1407 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1410 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1415 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1417 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1419 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1420 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1421 ARM64_AM::getShiftValue(getShifter()));
1422 Inst.addOperand(MCOperand::CreateImm(imm));
1424 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1427 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1428 assert(N == 3 && "Invalid number of operands!");
1430 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1431 Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
1432 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1433 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1436 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1437 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1440 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1441 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1444 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1445 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1448 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1449 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1452 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1453 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1456 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1457 unsigned Scale) const {
1458 // Add the base register operand.
1459 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1461 if (!Mem.OffsetImm) {
1462 // There isn't an offset.
1463 Inst.addOperand(MCOperand::CreateImm(0));
1467 // Add the offset operand.
1468 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1469 assert(CE->getValue() % Scale == 0 &&
1470 "Offset operand must be multiple of the scale!");
1472 // The MCInst offset operand doesn't include the low bits (like the
1473 // instruction encoding).
1474 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1477 // If this is a pageoff symrefexpr with an addend, the linker will
1478 // do the scaling of the addend.
1480 // Otherwise we don't know what this is, so just add the scaling divide to
1481 // the expression and let the MC fixup evaluation code deal with it.
1482 const MCExpr *Expr = Mem.OffsetImm;
1483 ARM64MCExpr::VariantKind ELFRefKind;
1484 MCSymbolRefExpr::VariantKind DarwinRefKind;
1485 const MCConstantExpr *Addend;
1487 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1489 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1490 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1494 Inst.addOperand(MCOperand::CreateExpr(Expr));
1497 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1498 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1499 // Add the base register operand.
1500 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1502 // Add the offset operand.
1504 Inst.addOperand(MCOperand::CreateImm(0));
1506 // Only constant offsets supported.
1507 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1508 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1512 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1513 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1514 addMemoryIndexedOperands(Inst, N, 16);
1517 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1518 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1519 addMemoryIndexedOperands(Inst, N, 8);
1522 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1523 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1524 addMemoryIndexedOperands(Inst, N, 4);
1527 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1528 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1529 addMemoryIndexedOperands(Inst, N, 2);
1532 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1533 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1534 addMemoryIndexedOperands(Inst, N, 1);
1537 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1538 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1539 // Add the base register operand (the offset is always zero, so ignore it).
1540 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1543 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1544 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1545 // Add the base register operand (the offset is always zero, so ignore it).
1546 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1549 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1550 unsigned Scale) const {
1551 assert(N == 2 && "Invalid number of operands!");
1553 // Add the base register operand.
1554 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1556 // Add the offset operand.
1558 if (Mem.OffsetImm) {
1559 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1560 assert(CE && "Non-constant indexed offset operand!");
1561 Offset = CE->getValue();
1565 assert(Offset % Scale == 0 &&
1566 "Offset operand must be a multiple of the scale!");
1570 Inst.addOperand(MCOperand::CreateImm(Offset));
1573 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1574 addMemoryWritebackIndexedOperands(Inst, N, 1);
1577 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1578 addMemoryWritebackIndexedOperands(Inst, N, 4);
1581 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1582 addMemoryWritebackIndexedOperands(Inst, N, 8);
1585 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1586 addMemoryWritebackIndexedOperands(Inst, N, 16);
1589 virtual void print(raw_ostream &OS) const;
1591 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1593 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1594 Op->Tok.Data = Str.data();
1595 Op->Tok.Length = Str.size();
1596 Op->Tok.IsSuffix = IsSuffix;
1602 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1603 SMLoc E, MCContext &Ctx) {
1604 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1605 Op->Reg.RegNum = RegNum;
1606 Op->Reg.isVector = isVector;
1612 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1613 unsigned NumElements, char ElementKind,
1614 SMLoc S, SMLoc E, MCContext &Ctx) {
1615 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1616 Op->VectorList.RegNum = RegNum;
1617 Op->VectorList.Count = Count;
1618 Op->VectorList.NumElements = NumElements;
1619 Op->VectorList.ElementKind = ElementKind;
1625 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1627 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1628 Op->VectorIndex.Val = Idx;
1634 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1636 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1643 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1644 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1645 Op->FPImm.Val = Val;
1651 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1652 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1653 Op->Barrier.Val = Val;
1659 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S, MCContext &Ctx) {
1660 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1661 Op->SysReg.Data = Str.data();
1662 Op->SysReg.Length = Str.size();
1668 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1669 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1671 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1672 Op->Mem.BaseRegNum = BaseRegNum;
1673 Op->Mem.OffsetRegNum = 0;
1674 Op->Mem.OffsetImm = Off;
1675 Op->Mem.ExtType = ARM64_AM::UXTX;
1676 Op->Mem.ShiftVal = 0;
1677 Op->Mem.ExplicitShift = false;
1678 Op->Mem.Mode = ImmediateOffset;
1679 Op->OffsetLoc = OffsetLoc;
1685 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1686 ARM64_AM::ExtendType ExtType,
1687 unsigned ShiftVal, bool ExplicitShift,
1688 SMLoc S, SMLoc E, MCContext &Ctx) {
1689 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1690 Op->Mem.BaseRegNum = BaseReg;
1691 Op->Mem.OffsetRegNum = OffsetReg;
1692 Op->Mem.OffsetImm = 0;
1693 Op->Mem.ExtType = ExtType;
1694 Op->Mem.ShiftVal = ShiftVal;
1695 Op->Mem.ExplicitShift = ExplicitShift;
1696 Op->Mem.Mode = RegisterOffset;
1702 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1704 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1705 Op->SysCRImm.Val = Val;
1711 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1712 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1713 Op->Prefetch.Val = Val;
1719 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1720 SMLoc S, SMLoc E, MCContext &Ctx) {
1721 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1722 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1728 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1729 SMLoc S, SMLoc E, MCContext &Ctx) {
1730 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1731 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1738 } // end anonymous namespace.
1740 void ARM64Operand::print(raw_ostream &OS) const {
1743 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1748 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1750 OS << "<barrier " << Name << ">";
1752 OS << "<barrier invalid #" << getBarrier() << ">";
1756 getImm()->print(OS);
1762 OS << "<register " << getReg() << ">";
1764 case k_VectorList: {
1765 OS << "<vectorlist ";
1766 unsigned Reg = getVectorListStart();
1767 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1768 OS << Reg + i << " ";
1773 OS << "<vectorindex " << getVectorIndex() << ">";
1776 OS << "<sysreg: " << getSysReg() << '>';
1779 OS << "'" << getToken() << "'";
1782 OS << "c" << getSysCR();
1786 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1788 OS << "<prfop " << Name << ">";
1790 OS << "<prfop invalid #" << getPrefetch() << ">";
1794 unsigned Val = getShifter();
1795 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1796 << ARM64_AM::getShiftValue(Val) << ">";
1800 unsigned Val = getExtend();
1801 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1802 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1808 /// @name Auto-generated Match Functions
1811 static unsigned MatchRegisterName(StringRef Name);
1815 static unsigned matchVectorRegName(StringRef Name) {
1816 return StringSwitch<unsigned>(Name)
1817 .Case("v0", ARM64::Q0)
1818 .Case("v1", ARM64::Q1)
1819 .Case("v2", ARM64::Q2)
1820 .Case("v3", ARM64::Q3)
1821 .Case("v4", ARM64::Q4)
1822 .Case("v5", ARM64::Q5)
1823 .Case("v6", ARM64::Q6)
1824 .Case("v7", ARM64::Q7)
1825 .Case("v8", ARM64::Q8)
1826 .Case("v9", ARM64::Q9)
1827 .Case("v10", ARM64::Q10)
1828 .Case("v11", ARM64::Q11)
1829 .Case("v12", ARM64::Q12)
1830 .Case("v13", ARM64::Q13)
1831 .Case("v14", ARM64::Q14)
1832 .Case("v15", ARM64::Q15)
1833 .Case("v16", ARM64::Q16)
1834 .Case("v17", ARM64::Q17)
1835 .Case("v18", ARM64::Q18)
1836 .Case("v19", ARM64::Q19)
1837 .Case("v20", ARM64::Q20)
1838 .Case("v21", ARM64::Q21)
1839 .Case("v22", ARM64::Q22)
1840 .Case("v23", ARM64::Q23)
1841 .Case("v24", ARM64::Q24)
1842 .Case("v25", ARM64::Q25)
1843 .Case("v26", ARM64::Q26)
1844 .Case("v27", ARM64::Q27)
1845 .Case("v28", ARM64::Q28)
1846 .Case("v29", ARM64::Q29)
1847 .Case("v30", ARM64::Q30)
1848 .Case("v31", ARM64::Q31)
1852 static bool isValidVectorKind(StringRef Name) {
1853 return StringSwitch<bool>(Name.lower())
1863 // Accept the width neutral ones, too, for verbose syntax. If those
1864 // aren't used in the right places, the token operand won't match so
1865 // all will work out.
1873 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1874 char &ElementKind) {
1875 assert(isValidVectorKind(Name));
1877 ElementKind = Name.lower()[Name.size() - 1];
1880 if (Name.size() == 2)
1883 // Parse the lane count
1884 Name = Name.drop_front();
1885 while (isdigit(Name.front())) {
1886 NumElements = 10 * NumElements + (Name.front() - '0');
1887 Name = Name.drop_front();
1891 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1893 StartLoc = getLoc();
1894 RegNo = tryParseRegister();
1895 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1896 return (RegNo == (unsigned)-1);
1899 /// tryParseRegister - Try to parse a register name. The token must be an
1900 /// Identifier when called, and if it is a register name the token is eaten and
1901 /// the register is added to the operand list.
1902 int ARM64AsmParser::tryParseRegister() {
1903 const AsmToken &Tok = Parser.getTok();
1904 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1906 std::string lowerCase = Tok.getString().lower();
1907 unsigned RegNum = MatchRegisterName(lowerCase);
1908 // Also handle a few aliases of registers.
1910 RegNum = StringSwitch<unsigned>(lowerCase)
1911 .Case("fp", ARM64::FP)
1912 .Case("lr", ARM64::LR)
1913 .Case("x31", ARM64::XZR)
1914 .Case("w31", ARM64::WZR)
1920 Parser.Lex(); // Eat identifier token.
1924 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1925 /// kind specifier. If it is a register specifier, eat the token and return it.
1926 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1927 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1928 TokError("vector register expected");
1932 StringRef Name = Parser.getTok().getString();
1933 // If there is a kind specifier, it's separated from the register name by
1935 size_t Start = 0, Next = Name.find('.');
1936 StringRef Head = Name.slice(Start, Next);
1937 unsigned RegNum = matchVectorRegName(Head);
1939 if (Next != StringRef::npos) {
1940 Kind = Name.slice(Next, StringRef::npos);
1941 if (!isValidVectorKind(Kind)) {
1942 TokError("invalid vector kind qualifier");
1946 Parser.Lex(); // Eat the register token.
1951 TokError("vector register expected");
1955 static int MatchSysCRName(StringRef Name) {
1956 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1958 switch (Name.size()) {
1962 if (Name[0] != 'c' && Name[0] != 'C')
1990 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
2011 llvm_unreachable("Unhandled SysCR operand string!");
2015 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2016 ARM64AsmParser::OperandMatchResultTy
2017 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2019 const AsmToken &Tok = Parser.getTok();
2020 if (Tok.isNot(AsmToken::Identifier))
2021 return MatchOperand_NoMatch;
2023 int Num = MatchSysCRName(Tok.getString());
2025 return MatchOperand_NoMatch;
2027 Parser.Lex(); // Eat identifier token.
2028 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
2029 return MatchOperand_Success;
2032 /// tryParsePrefetch - Try to parse a prefetch operand.
2033 ARM64AsmParser::OperandMatchResultTy
2034 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2036 const AsmToken &Tok = Parser.getTok();
2037 // Either an identifier for named values or a 5-bit immediate.
2038 bool Hash = Tok.is(AsmToken::Hash);
2039 if (Hash || Tok.is(AsmToken::Integer)) {
2041 Parser.Lex(); // Eat hash token.
2042 const MCExpr *ImmVal;
2043 if (getParser().parseExpression(ImmVal))
2044 return MatchOperand_ParseFail;
2046 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2048 TokError("immediate value expected for prefetch operand");
2049 return MatchOperand_ParseFail;
2051 unsigned prfop = MCE->getValue();
2053 TokError("prefetch operand out of range, [0,31] expected");
2054 return MatchOperand_ParseFail;
2057 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2058 return MatchOperand_Success;
2061 if (Tok.isNot(AsmToken::Identifier)) {
2062 TokError("pre-fetch hint expected");
2063 return MatchOperand_ParseFail;
2067 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2069 TokError("pre-fetch hint expected");
2070 return MatchOperand_ParseFail;
2073 Parser.Lex(); // Eat identifier token.
2074 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2075 return MatchOperand_Success;
2078 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2080 ARM64AsmParser::OperandMatchResultTy
2081 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2085 if (Parser.getTok().is(AsmToken::Hash)) {
2086 Parser.Lex(); // Eat hash token.
2089 if (parseSymbolicImmVal(Expr))
2090 return MatchOperand_ParseFail;
2092 ARM64MCExpr::VariantKind ELFRefKind;
2093 MCSymbolRefExpr::VariantKind DarwinRefKind;
2094 const MCConstantExpr *Addend;
2095 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2096 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2097 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2098 // No modifier was specified at all; this is the syntax for an ELF basic
2099 // ADRP relocation (unfortunately).
2100 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2101 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2102 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2104 Error(S, "gotpage label reference not allowed an addend");
2105 return MatchOperand_ParseFail;
2106 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2107 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2108 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2109 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2110 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2111 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2112 // The operand must be an @page or @gotpage qualified symbolref.
2113 Error(S, "page or gotpage label reference expected");
2114 return MatchOperand_ParseFail;
2118 // We have either a label reference possibly with addend or an immediate. The
2119 // addend is a raw value here. The linker will adjust it to only reference the
2121 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2122 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2124 return MatchOperand_Success;
2127 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2129 ARM64AsmParser::OperandMatchResultTy
2130 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2134 if (Parser.getTok().is(AsmToken::Hash)) {
2135 Parser.Lex(); // Eat hash token.
2138 if (getParser().parseExpression(Expr))
2139 return MatchOperand_ParseFail;
2141 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2142 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2144 return MatchOperand_Success;
2147 /// tryParseFPImm - A floating point immediate expression operand.
2148 ARM64AsmParser::OperandMatchResultTy
2149 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2153 if (Parser.getTok().is(AsmToken::Hash)) {
2154 Parser.Lex(); // Eat '#'
2158 // Handle negation, as that still comes through as a separate token.
2159 bool isNegative = false;
2160 if (Parser.getTok().is(AsmToken::Minus)) {
2164 const AsmToken &Tok = Parser.getTok();
2165 if (Tok.is(AsmToken::Real)) {
2166 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2167 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2168 // If we had a '-' in front, toggle the sign bit.
2169 IntVal ^= (uint64_t)isNegative << 63;
2170 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2171 Parser.Lex(); // Eat the token.
2172 // Check for out of range values. As an exception, we let Zero through,
2173 // as we handle that special case in post-processing before matching in
2174 // order to use the zero register for it.
2175 if (Val == -1 && !RealVal.isZero()) {
2176 TokError("floating point value out of range");
2177 return MatchOperand_ParseFail;
2179 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2180 return MatchOperand_Success;
2182 if (Tok.is(AsmToken::Integer)) {
2184 if (!isNegative && Tok.getString().startswith("0x")) {
2185 Val = Tok.getIntVal();
2186 if (Val > 255 || Val < 0) {
2187 TokError("encoded floating point value out of range");
2188 return MatchOperand_ParseFail;
2191 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2192 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2193 // If we had a '-' in front, toggle the sign bit.
2194 IntVal ^= (uint64_t)isNegative << 63;
2195 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2197 Parser.Lex(); // Eat the token.
2198 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2199 return MatchOperand_Success;
2203 return MatchOperand_NoMatch;
2205 TokError("invalid floating point immediate");
2206 return MatchOperand_ParseFail;
2209 /// parseCondCodeString - Parse a Condition Code string.
2210 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2211 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2212 .Case("eq", ARM64CC::EQ)
2213 .Case("ne", ARM64CC::NE)
2214 .Case("cs", ARM64CC::CS)
2215 .Case("hs", ARM64CC::CS)
2216 .Case("cc", ARM64CC::CC)
2217 .Case("lo", ARM64CC::CC)
2218 .Case("mi", ARM64CC::MI)
2219 .Case("pl", ARM64CC::PL)
2220 .Case("vs", ARM64CC::VS)
2221 .Case("vc", ARM64CC::VC)
2222 .Case("hi", ARM64CC::HI)
2223 .Case("ls", ARM64CC::LS)
2224 .Case("ge", ARM64CC::GE)
2225 .Case("lt", ARM64CC::LT)
2226 .Case("gt", ARM64CC::GT)
2227 .Case("le", ARM64CC::LE)
2228 .Case("al", ARM64CC::AL)
2229 .Case("nv", ARM64CC::NV)
2230 .Default(ARM64CC::Invalid);
2234 /// parseCondCode - Parse a Condition Code operand.
2235 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2236 bool invertCondCode) {
2238 const AsmToken &Tok = Parser.getTok();
2239 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2241 StringRef Cond = Tok.getString();
2242 unsigned CC = parseCondCodeString(Cond);
2243 if (CC == ARM64CC::Invalid)
2244 return TokError("invalid condition code");
2245 Parser.Lex(); // Eat identifier token.
2248 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2250 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2252 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2256 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2257 /// them if present.
2258 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2259 const AsmToken &Tok = Parser.getTok();
2260 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2261 .Case("lsl", ARM64_AM::LSL)
2262 .Case("lsr", ARM64_AM::LSR)
2263 .Case("asr", ARM64_AM::ASR)
2264 .Case("ror", ARM64_AM::ROR)
2265 .Case("msl", ARM64_AM::MSL)
2266 .Case("LSL", ARM64_AM::LSL)
2267 .Case("LSR", ARM64_AM::LSR)
2268 .Case("ASR", ARM64_AM::ASR)
2269 .Case("ROR", ARM64_AM::ROR)
2270 .Case("MSL", ARM64_AM::MSL)
2271 .Default(ARM64_AM::InvalidShift);
2272 if (ShOp == ARM64_AM::InvalidShift)
2275 SMLoc S = Tok.getLoc();
2278 // We expect a number here.
2279 bool Hash = getLexer().is(AsmToken::Hash);
2280 if (!Hash && getLexer().isNot(AsmToken::Integer))
2281 return TokError("immediate value expected for shifter operand");
2284 Parser.Lex(); // Eat the '#'.
2286 SMLoc ExprLoc = getLoc();
2287 const MCExpr *ImmVal;
2288 if (getParser().parseExpression(ImmVal))
2291 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2293 return TokError("immediate value expected for shifter operand");
2295 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2296 return Error(ExprLoc, "immediate value too large for shifter operand");
2298 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2300 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2304 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2305 /// them if present.
2306 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2307 const AsmToken &Tok = Parser.getTok();
2308 ARM64_AM::ExtendType ExtOp =
2309 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2310 .Case("uxtb", ARM64_AM::UXTB)
2311 .Case("uxth", ARM64_AM::UXTH)
2312 .Case("uxtw", ARM64_AM::UXTW)
2313 .Case("uxtx", ARM64_AM::UXTX)
2314 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2315 .Case("sxtb", ARM64_AM::SXTB)
2316 .Case("sxth", ARM64_AM::SXTH)
2317 .Case("sxtw", ARM64_AM::SXTW)
2318 .Case("sxtx", ARM64_AM::SXTX)
2319 .Case("UXTB", ARM64_AM::UXTB)
2320 .Case("UXTH", ARM64_AM::UXTH)
2321 .Case("UXTW", ARM64_AM::UXTW)
2322 .Case("UXTX", ARM64_AM::UXTX)
2323 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2324 .Case("SXTB", ARM64_AM::SXTB)
2325 .Case("SXTH", ARM64_AM::SXTH)
2326 .Case("SXTW", ARM64_AM::SXTW)
2327 .Case("SXTX", ARM64_AM::SXTX)
2328 .Default(ARM64_AM::InvalidExtend);
2329 if (ExtOp == ARM64_AM::InvalidExtend)
2332 SMLoc S = Tok.getLoc();
2335 if (getLexer().is(AsmToken::EndOfStatement) ||
2336 getLexer().is(AsmToken::Comma)) {
2337 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2339 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2343 bool Hash = getLexer().is(AsmToken::Hash);
2344 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2345 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2347 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2352 Parser.Lex(); // Eat the '#'.
2354 const MCExpr *ImmVal;
2355 if (getParser().parseExpression(ImmVal))
2358 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2360 return TokError("immediate value expected for extend operand");
2362 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2364 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2368 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2369 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2370 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2371 OperandVector &Operands) {
2372 if (Name.find('.') != StringRef::npos)
2373 return TokError("invalid operand");
2377 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2379 const AsmToken &Tok = Parser.getTok();
2380 StringRef Op = Tok.getString();
2381 SMLoc S = Tok.getLoc();
2383 const MCExpr *Expr = 0;
2385 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2387 Expr = MCConstantExpr::Create(op1, getContext()); \
2388 Operands.push_back( \
2389 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2390 Operands.push_back( \
2391 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2392 Operands.push_back( \
2393 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2394 Expr = MCConstantExpr::Create(op2, getContext()); \
2395 Operands.push_back( \
2396 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2399 if (Mnemonic == "ic") {
2400 if (!Op.compare_lower("ialluis")) {
2401 // SYS #0, C7, C1, #0
2402 SYS_ALIAS(0, 7, 1, 0);
2403 } else if (!Op.compare_lower("iallu")) {
2404 // SYS #0, C7, C5, #0
2405 SYS_ALIAS(0, 7, 5, 0);
2406 } else if (!Op.compare_lower("ivau")) {
2407 // SYS #3, C7, C5, #1
2408 SYS_ALIAS(3, 7, 5, 1);
2410 return TokError("invalid operand for IC instruction");
2412 } else if (Mnemonic == "dc") {
2413 if (!Op.compare_lower("zva")) {
2414 // SYS #3, C7, C4, #1
2415 SYS_ALIAS(3, 7, 4, 1);
2416 } else if (!Op.compare_lower("ivac")) {
2417 // SYS #3, C7, C6, #1
2418 SYS_ALIAS(0, 7, 6, 1);
2419 } else if (!Op.compare_lower("isw")) {
2420 // SYS #0, C7, C6, #2
2421 SYS_ALIAS(0, 7, 6, 2);
2422 } else if (!Op.compare_lower("cvac")) {
2423 // SYS #3, C7, C10, #1
2424 SYS_ALIAS(3, 7, 10, 1);
2425 } else if (!Op.compare_lower("csw")) {
2426 // SYS #0, C7, C10, #2
2427 SYS_ALIAS(0, 7, 10, 2);
2428 } else if (!Op.compare_lower("cvau")) {
2429 // SYS #3, C7, C11, #1
2430 SYS_ALIAS(3, 7, 11, 1);
2431 } else if (!Op.compare_lower("civac")) {
2432 // SYS #3, C7, C14, #1
2433 SYS_ALIAS(3, 7, 14, 1);
2434 } else if (!Op.compare_lower("cisw")) {
2435 // SYS #0, C7, C14, #2
2436 SYS_ALIAS(0, 7, 14, 2);
2438 return TokError("invalid operand for DC instruction");
2440 } else if (Mnemonic == "at") {
2441 if (!Op.compare_lower("s1e1r")) {
2442 // SYS #0, C7, C8, #0
2443 SYS_ALIAS(0, 7, 8, 0);
2444 } else if (!Op.compare_lower("s1e2r")) {
2445 // SYS #4, C7, C8, #0
2446 SYS_ALIAS(4, 7, 8, 0);
2447 } else if (!Op.compare_lower("s1e3r")) {
2448 // SYS #6, C7, C8, #0
2449 SYS_ALIAS(6, 7, 8, 0);
2450 } else if (!Op.compare_lower("s1e1w")) {
2451 // SYS #0, C7, C8, #1
2452 SYS_ALIAS(0, 7, 8, 1);
2453 } else if (!Op.compare_lower("s1e2w")) {
2454 // SYS #4, C7, C8, #1
2455 SYS_ALIAS(4, 7, 8, 1);
2456 } else if (!Op.compare_lower("s1e3w")) {
2457 // SYS #6, C7, C8, #1
2458 SYS_ALIAS(6, 7, 8, 1);
2459 } else if (!Op.compare_lower("s1e0r")) {
2460 // SYS #0, C7, C8, #3
2461 SYS_ALIAS(0, 7, 8, 2);
2462 } else if (!Op.compare_lower("s1e0w")) {
2463 // SYS #0, C7, C8, #3
2464 SYS_ALIAS(0, 7, 8, 3);
2465 } else if (!Op.compare_lower("s12e1r")) {
2466 // SYS #4, C7, C8, #4
2467 SYS_ALIAS(4, 7, 8, 4);
2468 } else if (!Op.compare_lower("s12e1w")) {
2469 // SYS #4, C7, C8, #5
2470 SYS_ALIAS(4, 7, 8, 5);
2471 } else if (!Op.compare_lower("s12e0r")) {
2472 // SYS #4, C7, C8, #6
2473 SYS_ALIAS(4, 7, 8, 6);
2474 } else if (!Op.compare_lower("s12e0w")) {
2475 // SYS #4, C7, C8, #7
2476 SYS_ALIAS(4, 7, 8, 7);
2478 return TokError("invalid operand for AT instruction");
2480 } else if (Mnemonic == "tlbi") {
2481 if (!Op.compare_lower("vmalle1is")) {
2482 // SYS #0, C8, C3, #0
2483 SYS_ALIAS(0, 8, 3, 0);
2484 } else if (!Op.compare_lower("alle2is")) {
2485 // SYS #4, C8, C3, #0
2486 SYS_ALIAS(4, 8, 3, 0);
2487 } else if (!Op.compare_lower("alle3is")) {
2488 // SYS #6, C8, C3, #0
2489 SYS_ALIAS(6, 8, 3, 0);
2490 } else if (!Op.compare_lower("vae1is")) {
2491 // SYS #0, C8, C3, #1
2492 SYS_ALIAS(0, 8, 3, 1);
2493 } else if (!Op.compare_lower("vae2is")) {
2494 // SYS #4, C8, C3, #1
2495 SYS_ALIAS(4, 8, 3, 1);
2496 } else if (!Op.compare_lower("vae3is")) {
2497 // SYS #6, C8, C3, #1
2498 SYS_ALIAS(6, 8, 3, 1);
2499 } else if (!Op.compare_lower("aside1is")) {
2500 // SYS #0, C8, C3, #2
2501 SYS_ALIAS(0, 8, 3, 2);
2502 } else if (!Op.compare_lower("vaae1is")) {
2503 // SYS #0, C8, C3, #3
2504 SYS_ALIAS(0, 8, 3, 3);
2505 } else if (!Op.compare_lower("alle1is")) {
2506 // SYS #4, C8, C3, #4
2507 SYS_ALIAS(4, 8, 3, 4);
2508 } else if (!Op.compare_lower("vale1is")) {
2509 // SYS #0, C8, C3, #5
2510 SYS_ALIAS(0, 8, 3, 5);
2511 } else if (!Op.compare_lower("vaale1is")) {
2512 // SYS #0, C8, C3, #7
2513 SYS_ALIAS(0, 8, 3, 7);
2514 } else if (!Op.compare_lower("vmalle1")) {
2515 // SYS #0, C8, C7, #0
2516 SYS_ALIAS(0, 8, 7, 0);
2517 } else if (!Op.compare_lower("alle2")) {
2518 // SYS #4, C8, C7, #0
2519 SYS_ALIAS(4, 8, 7, 0);
2520 } else if (!Op.compare_lower("vale2is")) {
2521 // SYS #4, C8, C3, #5
2522 SYS_ALIAS(4, 8, 3, 5);
2523 } else if (!Op.compare_lower("vale3is")) {
2524 // SYS #6, C8, C3, #5
2525 SYS_ALIAS(6, 8, 3, 5);
2526 } else if (!Op.compare_lower("alle3")) {
2527 // SYS #6, C8, C7, #0
2528 SYS_ALIAS(6, 8, 7, 0);
2529 } else if (!Op.compare_lower("vae1")) {
2530 // SYS #0, C8, C7, #1
2531 SYS_ALIAS(0, 8, 7, 1);
2532 } else if (!Op.compare_lower("vae2")) {
2533 // SYS #4, C8, C7, #1
2534 SYS_ALIAS(4, 8, 7, 1);
2535 } else if (!Op.compare_lower("vae3")) {
2536 // SYS #6, C8, C7, #1
2537 SYS_ALIAS(6, 8, 7, 1);
2538 } else if (!Op.compare_lower("aside1")) {
2539 // SYS #0, C8, C7, #2
2540 SYS_ALIAS(0, 8, 7, 2);
2541 } else if (!Op.compare_lower("vaae1")) {
2542 // SYS #0, C8, C7, #3
2543 SYS_ALIAS(0, 8, 7, 3);
2544 } else if (!Op.compare_lower("alle1")) {
2545 // SYS #4, C8, C7, #4
2546 SYS_ALIAS(4, 8, 7, 4);
2547 } else if (!Op.compare_lower("vale1")) {
2548 // SYS #0, C8, C7, #5
2549 SYS_ALIAS(0, 8, 7, 5);
2550 } else if (!Op.compare_lower("vale2")) {
2551 // SYS #4, C8, C7, #5
2552 SYS_ALIAS(4, 8, 7, 5);
2553 } else if (!Op.compare_lower("vale3")) {
2554 // SYS #6, C8, C7, #5
2555 SYS_ALIAS(6, 8, 7, 5);
2556 } else if (!Op.compare_lower("vaale1")) {
2557 // SYS #0, C8, C7, #7
2558 SYS_ALIAS(0, 8, 7, 7);
2559 } else if (!Op.compare_lower("ipas2e1")) {
2560 // SYS #4, C8, C4, #1
2561 SYS_ALIAS(4, 8, 4, 1);
2562 } else if (!Op.compare_lower("ipas2le1")) {
2563 // SYS #4, C8, C4, #5
2564 SYS_ALIAS(4, 8, 4, 5);
2565 } else if (!Op.compare_lower("ipas2e1is")) {
2566 // SYS #4, C8, C4, #1
2567 SYS_ALIAS(4, 8, 0, 1);
2568 } else if (!Op.compare_lower("ipas2le1is")) {
2569 // SYS #4, C8, C4, #5
2570 SYS_ALIAS(4, 8, 0, 5);
2571 } else if (!Op.compare_lower("vmalls12e1")) {
2572 // SYS #4, C8, C7, #6
2573 SYS_ALIAS(4, 8, 7, 6);
2574 } else if (!Op.compare_lower("vmalls12e1is")) {
2575 // SYS #4, C8, C3, #6
2576 SYS_ALIAS(4, 8, 3, 6);
2578 return TokError("invalid operand for TLBI instruction");
2584 Parser.Lex(); // Eat operand.
2586 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2587 bool HasRegister = false;
2589 // Check for the optional register operand.
2590 if (getLexer().is(AsmToken::Comma)) {
2591 Parser.Lex(); // Eat comma.
2593 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2594 return TokError("expected register operand");
2599 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2600 Parser.eatToEndOfStatement();
2601 return TokError("unexpected token in argument list");
2604 if (ExpectRegister && !HasRegister) {
2605 return TokError("specified " + Mnemonic + " op requires a register");
2607 else if (!ExpectRegister && HasRegister) {
2608 return TokError("specified " + Mnemonic + " op does not use a register");
2611 Parser.Lex(); // Consume the EndOfStatement
2615 ARM64AsmParser::OperandMatchResultTy
2616 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2617 const AsmToken &Tok = Parser.getTok();
2619 // Can be either a #imm style literal or an option name
2620 bool Hash = Tok.is(AsmToken::Hash);
2621 if (Hash || Tok.is(AsmToken::Integer)) {
2622 // Immediate operand.
2624 Parser.Lex(); // Eat the '#'
2625 const MCExpr *ImmVal;
2626 SMLoc ExprLoc = getLoc();
2627 if (getParser().parseExpression(ImmVal))
2628 return MatchOperand_ParseFail;
2629 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2631 Error(ExprLoc, "immediate value expected for barrier operand");
2632 return MatchOperand_ParseFail;
2634 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2635 Error(ExprLoc, "barrier operand out of range");
2636 return MatchOperand_ParseFail;
2639 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2640 return MatchOperand_Success;
2643 if (Tok.isNot(AsmToken::Identifier)) {
2644 TokError("invalid operand for instruction");
2645 return MatchOperand_ParseFail;
2649 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2651 TokError("invalid barrier option name");
2652 return MatchOperand_ParseFail;
2655 // The only valid named option for ISB is 'sy'
2656 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2657 TokError("'sy' or #imm operand expected");
2658 return MatchOperand_ParseFail;
2661 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2662 Parser.Lex(); // Consume the option
2664 return MatchOperand_Success;
2667 ARM64AsmParser::OperandMatchResultTy
2668 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2669 const AsmToken &Tok = Parser.getTok();
2671 if (Tok.isNot(AsmToken::Identifier))
2672 return MatchOperand_NoMatch;
2674 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2676 Parser.Lex(); // Eat identifier
2678 return MatchOperand_Success;
2681 /// tryParseVectorRegister - Parse a vector register operand.
2682 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2683 if (Parser.getTok().isNot(AsmToken::Identifier))
2687 // Check for a vector register specifier first.
2689 int64_t Reg = tryMatchVectorRegister(Kind, false);
2693 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2694 // If there was an explicit qualifier, that goes on as a literal text
2697 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2699 // If there is an index specifier following the register, parse that too.
2700 if (Parser.getTok().is(AsmToken::LBrac)) {
2701 SMLoc SIdx = getLoc();
2702 Parser.Lex(); // Eat left bracket token.
2704 const MCExpr *ImmVal;
2705 if (getParser().parseExpression(ImmVal))
2707 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2709 TokError("immediate value expected for vector index");
2714 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2715 Error(E, "']' expected");
2719 Parser.Lex(); // Eat right bracket token.
2721 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2728 /// parseRegister - Parse a non-vector register operand.
2729 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2731 // Try for a vector register.
2732 if (!tryParseVectorRegister(Operands))
2735 // Try for a scalar register.
2736 int64_t Reg = tryParseRegister();
2740 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2742 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2743 // as a string token in the instruction itself.
2744 if (getLexer().getKind() == AsmToken::LBrac) {
2745 SMLoc LBracS = getLoc();
2747 const AsmToken &Tok = Parser.getTok();
2748 if (Tok.is(AsmToken::Integer)) {
2749 SMLoc IntS = getLoc();
2750 int64_t Val = Tok.getIntVal();
2753 if (getLexer().getKind() == AsmToken::RBrac) {
2754 SMLoc RBracS = getLoc();
2757 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2759 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2761 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2771 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2772 /// do not allow base regisrer writeback modes,
2773 /// or those that handle writeback separately from
2774 /// the memory operand (like the AdvSIMD ldX/stX
2776 ARM64AsmParser::OperandMatchResultTy
2777 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2778 if (Parser.getTok().isNot(AsmToken::LBrac))
2779 return MatchOperand_NoMatch;
2781 Parser.Lex(); // Eat left bracket token.
2783 const AsmToken &BaseRegTok = Parser.getTok();
2784 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2785 Error(BaseRegTok.getLoc(), "register expected");
2786 return MatchOperand_ParseFail;
2789 int64_t Reg = tryParseRegister();
2791 Error(BaseRegTok.getLoc(), "register expected");
2792 return MatchOperand_ParseFail;
2796 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2797 Error(E, "']' expected");
2798 return MatchOperand_ParseFail;
2801 Parser.Lex(); // Eat right bracket token.
2803 Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
2804 return MatchOperand_Success;
2807 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2808 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2809 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2811 Parser.Lex(); // Eat left bracket token.
2813 const AsmToken &BaseRegTok = Parser.getTok();
2814 if (BaseRegTok.isNot(AsmToken::Identifier))
2815 return Error(BaseRegTok.getLoc(), "register expected");
2817 int64_t Reg = tryParseRegister();
2819 return Error(BaseRegTok.getLoc(), "register expected");
2821 // If there is an offset expression, parse it.
2822 const MCExpr *OffsetExpr = 0;
2824 if (Parser.getTok().is(AsmToken::Comma)) {
2825 Parser.Lex(); // Eat the comma.
2826 OffsetLoc = getLoc();
2829 const AsmToken &OffsetRegTok = Parser.getTok();
2830 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2832 // Default shift is LSL, with an omitted shift. We use the third bit of
2833 // the extend value to indicate presence/omission of the immediate offset.
2834 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
2835 int64_t ShiftVal = 0;
2836 bool ExplicitShift = false;
2838 if (Parser.getTok().is(AsmToken::Comma)) {
2839 // Embedded extend operand.
2840 Parser.Lex(); // Eat the comma
2842 SMLoc ExtLoc = getLoc();
2843 const AsmToken &Tok = Parser.getTok();
2844 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2845 .Case("uxtw", ARM64_AM::UXTW)
2846 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2847 .Case("sxtw", ARM64_AM::SXTW)
2848 .Case("sxtx", ARM64_AM::SXTX)
2849 .Case("UXTW", ARM64_AM::UXTW)
2850 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2851 .Case("SXTW", ARM64_AM::SXTW)
2852 .Case("SXTX", ARM64_AM::SXTX)
2853 .Default(ARM64_AM::InvalidExtend);
2854 if (ExtOp == ARM64_AM::InvalidExtend)
2855 return Error(ExtLoc, "expected valid extend operation");
2857 Parser.Lex(); // Eat the extend op.
2859 // A 32-bit offset register is only valid for [SU]/XTW extend
2861 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
2862 if (ExtOp != ARM64_AM::UXTW &&
2863 ExtOp != ARM64_AM::SXTW)
2864 return Error(ExtLoc, "32-bit general purpose offset register "
2865 "requires sxtw or uxtw extend");
2866 } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
2868 return Error(OffsetLoc,
2869 "64-bit general purpose offset register expected");
2871 bool Hash = getLexer().is(AsmToken::Hash);
2872 if (getLexer().is(AsmToken::RBrac)) {
2873 // No immediate operand.
2874 if (ExtOp == ARM64_AM::UXTX)
2875 return Error(ExtLoc, "LSL extend requires immediate operand");
2876 } else if (Hash || getLexer().is(AsmToken::Integer)) {
2877 // Immediate operand.
2879 Parser.Lex(); // Eat the '#'
2880 const MCExpr *ImmVal;
2881 SMLoc ExprLoc = getLoc();
2882 if (getParser().parseExpression(ImmVal))
2884 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2886 return TokError("immediate value expected for extend operand");
2888 ExplicitShift = true;
2889 ShiftVal = MCE->getValue();
2890 if (ShiftVal < 0 || ShiftVal > 4)
2891 return Error(ExprLoc, "immediate operand out of range");
2893 return Error(getLoc(), "expected immediate operand");
2896 if (Parser.getTok().isNot(AsmToken::RBrac))
2897 return Error(getLoc(), "']' expected");
2899 Parser.Lex(); // Eat right bracket token.
2902 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
2903 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
2906 // Immediate expressions.
2907 } else if (Parser.getTok().is(AsmToken::Hash) ||
2908 Parser.getTok().is(AsmToken::Integer)) {
2909 if (Parser.getTok().is(AsmToken::Hash))
2910 Parser.Lex(); // Eat hash token.
2912 if (parseSymbolicImmVal(OffsetExpr))
2915 // FIXME: We really should make sure that we're dealing with a LDR/STR
2916 // instruction that can legally have a symbolic expression here.
2917 // Symbol reference.
2918 if (Parser.getTok().isNot(AsmToken::Identifier) &&
2919 Parser.getTok().isNot(AsmToken::String))
2920 return Error(getLoc(), "identifier or immediate expression expected");
2921 if (getParser().parseExpression(OffsetExpr))
2923 // If this is a plain ref, Make sure a legal variant kind was specified.
2924 // Otherwise, it's a more complicated expression and we have to just
2925 // assume it's OK and let the relocation stuff puke if it's not.
2926 ARM64MCExpr::VariantKind ELFRefKind;
2927 MCSymbolRefExpr::VariantKind DarwinRefKind;
2928 const MCConstantExpr *Addend;
2929 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
2931 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
2932 "ELF symbol modifiers not supported here yet");
2934 switch (DarwinRefKind) {
2936 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
2937 case MCSymbolRefExpr::VK_GOTPAGEOFF:
2938 case MCSymbolRefExpr::VK_PAGEOFF:
2939 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
2940 // These are what we're expecting.
2948 if (Parser.getTok().isNot(AsmToken::RBrac))
2949 return Error(E, "']' expected");
2951 Parser.Lex(); // Eat right bracket token.
2953 // Create the memory operand.
2955 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
2957 // Check for a '!', indicating pre-indexed addressing with writeback.
2958 if (Parser.getTok().is(AsmToken::Exclaim)) {
2959 // There needs to have been an immediate or wback doesn't make sense.
2961 return Error(E, "missing offset for pre-indexed addressing");
2962 // Pre-indexed with writeback must have a constant expression for the
2963 // offset. FIXME: Theoretically, we'd like to allow fixups so long
2964 // as they don't require a relocation.
2965 if (!isa<MCConstantExpr>(OffsetExpr))
2966 return Error(OffsetLoc, "constant immediate expression expected");
2968 // Create the Token operand for the '!'.
2969 Operands.push_back(ARM64Operand::CreateToken(
2970 "!", false, Parser.getTok().getLoc(), getContext()));
2971 Parser.Lex(); // Eat the '!' token.
2977 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2978 bool HasELFModifier = false;
2979 ARM64MCExpr::VariantKind RefKind;
2981 if (Parser.getTok().is(AsmToken::Colon)) {
2982 Parser.Lex(); // Eat ':"
2983 HasELFModifier = true;
2985 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2986 Error(Parser.getTok().getLoc(),
2987 "expect relocation specifier in operand after ':'");
2991 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2992 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
2993 .Case("lo12", ARM64MCExpr::VK_LO12)
2994 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
2995 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
2996 .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
2997 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
2998 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
2999 .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
3000 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3001 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3002 .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
3003 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3004 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3005 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3006 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3007 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3008 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3009 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3010 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3011 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3012 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3013 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3014 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3015 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3016 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3017 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3018 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3019 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3020 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3021 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3022 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3023 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3024 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3025 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3026 .Default(ARM64MCExpr::VK_INVALID);
3028 if (RefKind == ARM64MCExpr::VK_INVALID) {
3029 Error(Parser.getTok().getLoc(),
3030 "expect relocation specifier in operand after ':'");
3034 Parser.Lex(); // Eat identifier
3036 if (Parser.getTok().isNot(AsmToken::Colon)) {
3037 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3040 Parser.Lex(); // Eat ':'
3043 if (getParser().parseExpression(ImmVal))
3047 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3052 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3053 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3054 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3056 Parser.Lex(); // Eat left bracket token.
3058 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3061 int64_t PrevReg = FirstReg;
3064 if (Parser.getTok().is(AsmToken::Minus)) {
3065 Parser.Lex(); // Eat the minus.
3067 SMLoc Loc = getLoc();
3069 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3072 // Any Kind suffices must match on all regs in the list.
3073 if (Kind != NextKind)
3074 return Error(Loc, "mismatched register size suffix");
3076 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3078 if (Space == 0 || Space > 3) {
3079 return Error(Loc, "invalid number of vectors");
3085 while (Parser.getTok().is(AsmToken::Comma)) {
3086 Parser.Lex(); // Eat the comma token.
3088 SMLoc Loc = getLoc();
3090 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3093 // Any Kind suffices must match on all regs in the list.
3094 if (Kind != NextKind)
3095 return Error(Loc, "mismatched register size suffix");
3097 // Registers must be incremental (with wraparound at 31)
3098 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3099 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3100 return Error(Loc, "registers must be sequential");
3107 if (Parser.getTok().is(AsmToken::EndOfStatement))
3108 Error(getLoc(), "'}' expected");
3109 Parser.Lex(); // Eat the '}' token.
3111 unsigned NumElements = 0;
3112 char ElementKind = 0;
3114 parseValidVectorKind(Kind, NumElements, ElementKind);
3116 Operands.push_back(ARM64Operand::CreateVectorList(
3117 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3119 // If there is an index specifier following the list, parse that too.
3120 if (Parser.getTok().is(AsmToken::LBrac)) {
3121 SMLoc SIdx = getLoc();
3122 Parser.Lex(); // Eat left bracket token.
3124 const MCExpr *ImmVal;
3125 if (getParser().parseExpression(ImmVal))
3127 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3129 TokError("immediate value expected for vector index");
3134 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3135 Error(E, "']' expected");
3139 Parser.Lex(); // Eat right bracket token.
3141 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3147 /// parseOperand - Parse a arm instruction operand. For now this parses the
3148 /// operand regardless of the mnemonic.
3149 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3150 bool invertCondCode) {
3151 // Check if the current operand has a custom associated parser, if so, try to
3152 // custom parse the operand, or fallback to the general approach.
3153 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3154 if (ResTy == MatchOperand_Success)
3156 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3157 // there was a match, but an error occurred, in which case, just return that
3158 // the operand parsing failed.
3159 if (ResTy == MatchOperand_ParseFail)
3162 // Nothing custom, so do general case parsing.
3164 switch (getLexer().getKind()) {
3168 if (parseSymbolicImmVal(Expr))
3169 return Error(S, "invalid operand");
3171 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3172 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3175 case AsmToken::LBrac:
3176 return parseMemory(Operands);
3177 case AsmToken::LCurly:
3178 return parseVectorList(Operands);
3179 case AsmToken::Identifier: {
3180 // If we're expecting a Condition Code operand, then just parse that.
3182 return parseCondCode(Operands, invertCondCode);
3184 // If it's a register name, parse it.
3185 if (!parseRegister(Operands))
3188 // This could be an optional "shift" operand.
3189 if (!parseOptionalShift(Operands))
3192 // Or maybe it could be an optional "extend" operand.
3193 if (!parseOptionalExtend(Operands))
3196 // This was not a register so parse other operands that start with an
3197 // identifier (like labels) as expressions and create them as immediates.
3198 const MCExpr *IdVal;
3200 if (getParser().parseExpression(IdVal))
3203 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3204 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3207 case AsmToken::Integer:
3208 case AsmToken::Real:
3209 case AsmToken::Hash: {
3210 // #42 -> immediate.
3212 if (getLexer().is(AsmToken::Hash))
3215 // The only Real that should come through here is a literal #0.0 for
3216 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3217 // so convert the value.
3218 const AsmToken &Tok = Parser.getTok();
3219 if (Tok.is(AsmToken::Real)) {
3220 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3221 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3222 if (IntVal != 0 || (Mnemonic != "fcmp" && Mnemonic != "fcmpe"))
3223 return TokError("unexpected floating point literal");
3224 Parser.Lex(); // Eat the token.
3227 ARM64Operand::CreateToken("#0", false, S, getContext()));
3229 ARM64Operand::CreateToken(".0", false, S, getContext()));
3233 const MCExpr *ImmVal;
3234 if (parseSymbolicImmVal(ImmVal))
3237 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3238 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3244 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3246 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3247 StringRef Name, SMLoc NameLoc,
3248 OperandVector &Operands) {
3249 // Create the leading tokens for the mnemonic, split by '.' characters.
3250 size_t Start = 0, Next = Name.find('.');
3251 StringRef Head = Name.slice(Start, Next);
3253 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3254 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3255 return parseSysAlias(Head, NameLoc, Operands);
3258 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3261 // Handle condition codes for a branch mnemonic
3262 if (Head == "b" && Next != StringRef::npos) {
3264 Next = Name.find('.', Start + 1);
3265 Head = Name.slice(Start + 1, Next);
3267 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3268 (Head.data() - Name.data()));
3269 unsigned CC = parseCondCodeString(Head);
3270 if (CC == ARM64CC::Invalid)
3271 return Error(SuffixLoc, "invalid condition code");
3272 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3274 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3277 // Add the remaining tokens in the mnemonic.
3278 while (Next != StringRef::npos) {
3280 Next = Name.find('.', Start + 1);
3281 Head = Name.slice(Start, Next);
3282 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3283 (Head.data() - Name.data()) + 1);
3285 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3288 // Conditional compare instructions have a Condition Code operand, which needs
3289 // to be parsed and an immediate operand created.
3290 bool condCodeFourthOperand =
3291 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3292 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3293 Head == "csinc" || Head == "csinv" || Head == "csneg");
3295 // These instructions are aliases to some of the conditional select
3296 // instructions. However, the condition code is inverted in the aliased
3299 // FIXME: Is this the correct way to handle these? Or should the parser
3300 // generate the aliased instructions directly?
3301 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3302 bool condCodeThirdOperand =
3303 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3305 // Read the remaining operands.
3306 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3307 // Read the first operand.
3308 if (parseOperand(Operands, false, false)) {
3309 Parser.eatToEndOfStatement();
3314 while (getLexer().is(AsmToken::Comma)) {
3315 Parser.Lex(); // Eat the comma.
3317 // Parse and remember the operand.
3318 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3319 (N == 3 && condCodeThirdOperand) ||
3320 (N == 2 && condCodeSecondOperand),
3321 condCodeSecondOperand || condCodeThirdOperand)) {
3322 Parser.eatToEndOfStatement();
3330 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3331 SMLoc Loc = Parser.getTok().getLoc();
3332 Parser.eatToEndOfStatement();
3333 return Error(Loc, "unexpected token in argument list");
3336 Parser.Lex(); // Consume the EndOfStatement
3340 // FIXME: This entire function is a giant hack to provide us with decent
3341 // operand range validation/diagnostics until TableGen/MC can be extended
3342 // to support autogeneration of this kind of validation.
3343 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3344 SmallVectorImpl<SMLoc> &Loc) {
3345 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3346 // Check for indexed addressing modes w/ the base register being the
3347 // same as a destination/source register or pair load where
3348 // the Rt == Rt2. All of those are undefined behaviour.
3349 switch (Inst.getOpcode()) {
3350 case ARM64::LDPSWpre:
3351 case ARM64::LDPWpost:
3352 case ARM64::LDPWpre:
3353 case ARM64::LDPXpost:
3354 case ARM64::LDPXpre: {
3355 unsigned Rt = Inst.getOperand(0).getReg();
3356 unsigned Rt2 = Inst.getOperand(1).getReg();
3357 unsigned Rn = Inst.getOperand(2).getReg();
3358 if (RI->isSubRegisterEq(Rn, Rt))
3359 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3360 "is also a destination");
3361 if (RI->isSubRegisterEq(Rn, Rt2))
3362 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3363 "is also a destination");
3366 case ARM64::LDPDpost:
3367 case ARM64::LDPDpre:
3368 case ARM64::LDPQpost:
3369 case ARM64::LDPQpre:
3370 case ARM64::LDPSpost:
3371 case ARM64::LDPSpre:
3372 case ARM64::LDPSWpost:
3378 case ARM64::LDPXi: {
3379 unsigned Rt = Inst.getOperand(0).getReg();
3380 unsigned Rt2 = Inst.getOperand(1).getReg();
3382 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3385 case ARM64::STPDpost:
3386 case ARM64::STPDpre:
3387 case ARM64::STPQpost:
3388 case ARM64::STPQpre:
3389 case ARM64::STPSpost:
3390 case ARM64::STPSpre:
3391 case ARM64::STPWpost:
3392 case ARM64::STPWpre:
3393 case ARM64::STPXpost:
3394 case ARM64::STPXpre: {
3395 unsigned Rt = Inst.getOperand(0).getReg();
3396 unsigned Rt2 = Inst.getOperand(1).getReg();
3397 unsigned Rn = Inst.getOperand(2).getReg();
3398 if (RI->isSubRegisterEq(Rn, Rt))
3399 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3400 "is also a source");
3401 if (RI->isSubRegisterEq(Rn, Rt2))
3402 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3403 "is also a source");
3406 case ARM64::LDRBBpre:
3407 case ARM64::LDRBpre:
3408 case ARM64::LDRHHpre:
3409 case ARM64::LDRHpre:
3410 case ARM64::LDRSBWpre:
3411 case ARM64::LDRSBXpre:
3412 case ARM64::LDRSHWpre:
3413 case ARM64::LDRSHXpre:
3414 case ARM64::LDRSWpre:
3415 case ARM64::LDRWpre:
3416 case ARM64::LDRXpre:
3417 case ARM64::LDRBBpost:
3418 case ARM64::LDRBpost:
3419 case ARM64::LDRHHpost:
3420 case ARM64::LDRHpost:
3421 case ARM64::LDRSBWpost:
3422 case ARM64::LDRSBXpost:
3423 case ARM64::LDRSHWpost:
3424 case ARM64::LDRSHXpost:
3425 case ARM64::LDRSWpost:
3426 case ARM64::LDRWpost:
3427 case ARM64::LDRXpost: {
3428 unsigned Rt = Inst.getOperand(0).getReg();
3429 unsigned Rn = Inst.getOperand(1).getReg();
3430 if (RI->isSubRegisterEq(Rn, Rt))
3431 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3432 "is also a source");
3435 case ARM64::STRBBpost:
3436 case ARM64::STRBpost:
3437 case ARM64::STRHHpost:
3438 case ARM64::STRHpost:
3439 case ARM64::STRWpost:
3440 case ARM64::STRXpost:
3441 case ARM64::STRBBpre:
3442 case ARM64::STRBpre:
3443 case ARM64::STRHHpre:
3444 case ARM64::STRHpre:
3445 case ARM64::STRWpre:
3446 case ARM64::STRXpre: {
3447 unsigned Rt = Inst.getOperand(0).getReg();
3448 unsigned Rn = Inst.getOperand(1).getReg();
3449 if (RI->isSubRegisterEq(Rn, Rt))
3450 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3451 "is also a source");
3456 // Now check immediate ranges. Separate from the above as there is overlap
3457 // in the instructions being checked and this keeps the nested conditionals
3459 switch (Inst.getOpcode()) {
3461 case ARM64::ANDSWrs:
3463 case ARM64::ORRWrs: {
3464 if (!Inst.getOperand(3).isImm())
3465 return Error(Loc[3], "immediate value expected");
3466 int64_t shifter = Inst.getOperand(3).getImm();
3467 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3468 if (ST == ARM64_AM::LSL && shifter > 31)
3469 return Error(Loc[3], "shift value out of range");
3472 case ARM64::ADDSWri:
3473 case ARM64::ADDSXri:
3476 case ARM64::SUBSWri:
3477 case ARM64::SUBSXri:
3479 case ARM64::SUBXri: {
3480 if (!Inst.getOperand(3).isImm())
3481 return Error(Loc[3], "immediate value expected");
3482 int64_t shifter = Inst.getOperand(3).getImm();
3483 if (shifter != 0 && shifter != 12)
3484 return Error(Loc[3], "shift value out of range");
3485 // The imm12 operand can be an expression. Validate that it's legit.
3486 // FIXME: We really, really want to allow arbitrary expressions here
3487 // and resolve the value and validate the result at fixup time, but
3488 // that's hard as we have long since lost any source information we
3489 // need to generate good diagnostics by that point.
3490 if (Inst.getOpcode() == ARM64::ADDXri && Inst.getOperand(2).isExpr()) {
3491 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3492 ARM64MCExpr::VariantKind ELFRefKind;
3493 MCSymbolRefExpr::VariantKind DarwinRefKind;
3494 const MCConstantExpr *Addend;
3495 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3496 return Error(Loc[2], "invalid immediate expression");
3499 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3500 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
3501 ELFRefKind == ARM64MCExpr::VK_LO12 ||
3502 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3503 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3504 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3505 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3506 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3507 // Note that we don't range-check the addend. It's adjusted
3508 // modulo page size when converted, so there is no "out of range"
3509 // condition when using @pageoff. Any validity checking for the value
3510 // was done in the is*() predicate function.
3512 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3513 // @gotpageoff can only be used directly, not with an addend.
3517 // Otherwise, we're not sure, so don't allow it for now.
3518 return Error(Loc[2], "invalid immediate expression");
3521 // If it's anything but an immediate, it's not legit.
3522 if (!Inst.getOperand(2).isImm())
3523 return Error(Loc[2], "invalid immediate expression");
3524 int64_t imm = Inst.getOperand(2).getImm();
3525 if (imm > 4095 || imm < 0)
3526 return Error(Loc[2], "immediate value out of range");
3529 case ARM64::LDRBpre:
3530 case ARM64::LDRHpre:
3531 case ARM64::LDRSBWpre:
3532 case ARM64::LDRSBXpre:
3533 case ARM64::LDRSHWpre:
3534 case ARM64::LDRSHXpre:
3535 case ARM64::LDRWpre:
3536 case ARM64::LDRXpre:
3537 case ARM64::LDRSpre:
3538 case ARM64::LDRDpre:
3539 case ARM64::LDRQpre:
3540 case ARM64::STRBpre:
3541 case ARM64::STRHpre:
3542 case ARM64::STRWpre:
3543 case ARM64::STRXpre:
3544 case ARM64::STRSpre:
3545 case ARM64::STRDpre:
3546 case ARM64::STRQpre:
3547 case ARM64::LDRBpost:
3548 case ARM64::LDRHpost:
3549 case ARM64::LDRSBWpost:
3550 case ARM64::LDRSBXpost:
3551 case ARM64::LDRSHWpost:
3552 case ARM64::LDRSHXpost:
3553 case ARM64::LDRWpost:
3554 case ARM64::LDRXpost:
3555 case ARM64::LDRSpost:
3556 case ARM64::LDRDpost:
3557 case ARM64::LDRQpost:
3558 case ARM64::STRBpost:
3559 case ARM64::STRHpost:
3560 case ARM64::STRWpost:
3561 case ARM64::STRXpost:
3562 case ARM64::STRSpost:
3563 case ARM64::STRDpost:
3564 case ARM64::STRQpost:
3569 case ARM64::LDTRSHWi:
3570 case ARM64::LDTRSHXi:
3571 case ARM64::LDTRSBWi:
3572 case ARM64::LDTRSBXi:
3573 case ARM64::LDTRSWi:
3585 case ARM64::LDURSHWi:
3586 case ARM64::LDURSHXi:
3587 case ARM64::LDURSBWi:
3588 case ARM64::LDURSBXi:
3589 case ARM64::LDURSWi:
3597 case ARM64::STURBi: {
3598 // FIXME: Should accept expressions and error in fixup evaluation
3600 if (!Inst.getOperand(2).isImm())
3601 return Error(Loc[1], "immediate value expected");
3602 int64_t offset = Inst.getOperand(2).getImm();
3603 if (offset > 255 || offset < -256)
3604 return Error(Loc[1], "offset value out of range");
3609 case ARM64::LDRSWro:
3611 case ARM64::STRSro: {
3612 // FIXME: Should accept expressions and error in fixup evaluation
3614 if (!Inst.getOperand(3).isImm())
3615 return Error(Loc[1], "immediate value expected");
3616 int64_t shift = Inst.getOperand(3).getImm();
3617 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3618 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3619 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3620 return Error(Loc[1], "shift type invalid");
3629 case ARM64::STRQro: {
3630 // FIXME: Should accept expressions and error in fixup evaluation
3632 if (!Inst.getOperand(3).isImm())
3633 return Error(Loc[1], "immediate value expected");
3634 int64_t shift = Inst.getOperand(3).getImm();
3635 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3636 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3637 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3638 return Error(Loc[1], "shift type invalid");
3642 case ARM64::LDRHHro:
3643 case ARM64::LDRSHWro:
3644 case ARM64::LDRSHXro:
3646 case ARM64::STRHHro: {
3647 // FIXME: Should accept expressions and error in fixup evaluation
3649 if (!Inst.getOperand(3).isImm())
3650 return Error(Loc[1], "immediate value expected");
3651 int64_t shift = Inst.getOperand(3).getImm();
3652 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3653 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3654 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3655 return Error(Loc[1], "shift type invalid");
3659 case ARM64::LDRBBro:
3660 case ARM64::LDRSBWro:
3661 case ARM64::LDRSBXro:
3663 case ARM64::STRBBro: {
3664 // FIXME: Should accept expressions and error in fixup evaluation
3666 if (!Inst.getOperand(3).isImm())
3667 return Error(Loc[1], "immediate value expected");
3668 int64_t shift = Inst.getOperand(3).getImm();
3669 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3670 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3671 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3672 return Error(Loc[1], "shift type invalid");
3686 case ARM64::LDPWpre:
3687 case ARM64::LDPXpre:
3688 case ARM64::LDPSpre:
3689 case ARM64::LDPDpre:
3690 case ARM64::LDPQpre:
3691 case ARM64::LDPSWpre:
3692 case ARM64::STPWpre:
3693 case ARM64::STPXpre:
3694 case ARM64::STPSpre:
3695 case ARM64::STPDpre:
3696 case ARM64::STPQpre:
3697 case ARM64::LDPWpost:
3698 case ARM64::LDPXpost:
3699 case ARM64::LDPSpost:
3700 case ARM64::LDPDpost:
3701 case ARM64::LDPQpost:
3702 case ARM64::LDPSWpost:
3703 case ARM64::STPWpost:
3704 case ARM64::STPXpost:
3705 case ARM64::STPSpost:
3706 case ARM64::STPDpost:
3707 case ARM64::STPQpost:
3717 case ARM64::STNPQi: {
3718 // FIXME: Should accept expressions and error in fixup evaluation
3720 if (!Inst.getOperand(3).isImm())
3721 return Error(Loc[2], "immediate value expected");
3722 int64_t offset = Inst.getOperand(3).getImm();
3723 if (offset > 63 || offset < -64)
3724 return Error(Loc[2], "offset value out of range");
3732 static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
3733 StringRef mnemonic, uint64_t imm, unsigned shift,
3734 MCContext &Context) {
3735 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3736 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3738 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
3740 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
3741 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
3742 Op2->getEndLoc(), Context);
3744 Operands.push_back(ARM64Operand::CreateShifter(
3745 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3750 static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
3751 MCContext &Context) {
3752 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3753 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3755 ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
3757 const MCExpr *Imm = MCConstantExpr::Create(0, Context);
3758 Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
3759 Op2->getEndLoc(), Context));
3760 Operands.push_back(ARM64Operand::CreateShifter(
3761 ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3766 static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
3767 MCContext &Context) {
3768 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3769 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3771 ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
3773 // Operands[2] becomes Operands[3].
3774 Operands.push_back(Operands[2]);
3775 // And Operands[2] becomes ZR.
3776 unsigned ZeroReg = ARM64::XZR;
3777 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3778 Operands[2]->getReg()))
3779 ZeroReg = ARM64::WZR;
3782 ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
3783 Op2->getEndLoc(), Context);
3788 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3790 case Match_MissingFeature:
3792 "instruction requires a CPU feature not currently enabled");
3793 case Match_InvalidOperand:
3794 return Error(Loc, "invalid operand for instruction");
3795 case Match_InvalidSuffix:
3796 return Error(Loc, "invalid type suffix for instruction");
3797 case Match_InvalidMemoryIndexedSImm9:
3798 return Error(Loc, "index must be an integer in range [-256,255].");
3799 case Match_InvalidMemoryIndexed32SImm7:
3800 return Error(Loc, "index must be a multiple of 4 in range [-256,252].");
3801 case Match_InvalidMemoryIndexed64SImm7:
3802 return Error(Loc, "index must be a multiple of 8 in range [-512,504].");
3803 case Match_InvalidMemoryIndexed128SImm7:
3804 return Error(Loc, "index must be a multiple of 16 in range [-1024,1008].");
3805 case Match_InvalidMemoryIndexed8:
3806 return Error(Loc, "index must be an integer in range [0,4095].");
3807 case Match_InvalidMemoryIndexed16:
3808 return Error(Loc, "index must be a multiple of 2 in range [0,8190].");
3809 case Match_InvalidMemoryIndexed32:
3810 return Error(Loc, "index must be a multiple of 4 in range [0,16380].");
3811 case Match_InvalidMemoryIndexed64:
3812 return Error(Loc, "index must be a multiple of 8 in range [0,32760].");
3813 case Match_InvalidMemoryIndexed128:
3814 return Error(Loc, "index must be a multiple of 16 in range [0,65520].");
3815 case Match_InvalidImm1_8:
3816 return Error(Loc, "immediate must be an integer in range [1,8].");
3817 case Match_InvalidImm1_16:
3818 return Error(Loc, "immediate must be an integer in range [1,16].");
3819 case Match_InvalidImm1_32:
3820 return Error(Loc, "immediate must be an integer in range [1,32].");
3821 case Match_InvalidImm1_64:
3822 return Error(Loc, "immediate must be an integer in range [1,64].");
3823 case Match_InvalidLabel:
3824 return Error(Loc, "expected label or encodable integer pc offset");
3825 case Match_MnemonicFail:
3826 return Error(Loc, "unrecognized instruction mnemonic");
3828 assert(0 && "unexpected error code!");
3829 return Error(Loc, "invalid instruction format");
3833 static const char *getSubtargetFeatureName(unsigned Val);
3835 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3836 OperandVector &Operands,
3838 unsigned &ErrorInfo,
3839 bool MatchingInlineAsm) {
3840 assert(!Operands.empty() && "Unexpect empty operand list!");
3841 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3842 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3844 StringRef Tok = Op->getToken();
3845 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
3846 // This needs to be done before the special handling of ADD/SUB immediates.
3847 if (Tok == "cmp" || Tok == "cmn") {
3848 // Replace the opcode with either ADDS or SUBS.
3849 const char *Repl = StringSwitch<const char *>(Tok)
3850 .Case("cmp", "subs")
3851 .Case("cmn", "adds")
3853 assert(Repl && "Unknown compare instruction");
3855 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
3857 // Insert WZR or XZR as destination operand.
3858 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3860 if (RegOp->isReg() &&
3861 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3863 ZeroReg = ARM64::WZR;
3865 ZeroReg = ARM64::XZR;
3867 Operands.begin() + 1,
3868 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
3869 // Update since we modified it above.
3870 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3871 Tok = Op->getToken();
3874 unsigned NumOperands = Operands.size();
3876 if (Tok == "mov" && NumOperands == 3) {
3877 // The MOV mnemomic is aliased to movn/movz, depending on the value of
3878 // the immediate being instantiated.
3879 // FIXME: Catching this here is a total hack, and we should use tblgen
3880 // support to implement this instead as soon as it is available.
3882 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3883 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3885 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
3886 uint64_t Val = CE->getValue();
3887 uint64_t NVal = ~Val;
3889 // If this is a 32-bit register and the value has none of the upper
3890 // set, clear the complemented upper 32-bits so the logic below works
3891 // for 32-bit registers too.
3892 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3894 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3896 (Val & 0xFFFFFFFFULL) == Val)
3897 NVal &= 0x00000000FFFFFFFFULL;
3899 // MOVK Rd, imm << 0
3900 if ((Val & 0xFFFF) == Val)
3901 rewriteMOVI(Operands, "movz", Val, 0, getContext());
3903 // MOVK Rd, imm << 16
3904 else if ((Val & 0xFFFF0000ULL) == Val)
3905 rewriteMOVI(Operands, "movz", Val, 16, getContext());
3907 // MOVK Rd, imm << 32
3908 else if ((Val & 0xFFFF00000000ULL) == Val)
3909 rewriteMOVI(Operands, "movz", Val, 32, getContext());
3911 // MOVK Rd, imm << 48
3912 else if ((Val & 0xFFFF000000000000ULL) == Val)
3913 rewriteMOVI(Operands, "movz", Val, 48, getContext());
3915 // MOVN Rd, (~imm << 0)
3916 else if ((NVal & 0xFFFFULL) == NVal)
3917 rewriteMOVI(Operands, "movn", NVal, 0, getContext());
3919 // MOVN Rd, ~(imm << 16)
3920 else if ((NVal & 0xFFFF0000ULL) == NVal)
3921 rewriteMOVI(Operands, "movn", NVal, 16, getContext());
3923 // MOVN Rd, ~(imm << 32)
3924 else if ((NVal & 0xFFFF00000000ULL) == NVal)
3925 rewriteMOVI(Operands, "movn", NVal, 32, getContext());
3927 // MOVN Rd, ~(imm << 48)
3928 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
3929 rewriteMOVI(Operands, "movn", NVal, 48, getContext());
3931 } else if (Op1->isReg() && Op2->isReg()) {
3933 unsigned Reg1 = Op1->getReg();
3934 unsigned Reg2 = Op2->getReg();
3935 if ((Reg1 == ARM64::SP &&
3936 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg2)) ||
3937 (Reg2 == ARM64::SP &&
3938 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg1)) ||
3939 (Reg1 == ARM64::WSP &&
3940 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) ||
3941 (Reg2 == ARM64::WSP &&
3942 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg1)))
3943 rewriteMOVRSP(Operands, getContext());
3945 rewriteMOVR(Operands, getContext());
3947 } else if (NumOperands == 4) {
3948 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
3949 // Handle the uimm24 immediate form, where the shift is not specified.
3950 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3952 if (const MCConstantExpr *CE =
3953 dyn_cast<MCConstantExpr>(Op3->getImm())) {
3954 uint64_t Val = CE->getValue();
3955 if (Val >= (1 << 24)) {
3956 Error(IDLoc, "immediate value is too large");
3959 if (Val < (1 << 12)) {
3960 Operands.push_back(ARM64Operand::CreateShifter(
3961 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3962 } else if ((Val & 0xfff) == 0) {
3964 CE = MCConstantExpr::Create(Val >> 12, getContext());
3966 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
3967 Operands.push_back(ARM64Operand::CreateShifter(
3968 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
3970 Error(IDLoc, "immediate value is too large");
3974 Operands.push_back(ARM64Operand::CreateShifter(
3975 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3979 // FIXME: Horible hack to handle the LSL -> UBFM alias.
3980 } else if (NumOperands == 4 && Tok == "lsl") {
3981 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3982 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3983 if (Op2->isReg() && Op3->isImm()) {
3984 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3986 uint64_t Op3Val = Op3CE->getValue();
3987 uint64_t NewOp3Val = 0;
3988 uint64_t NewOp4Val = 0;
3989 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3991 NewOp3Val = (32 - Op3Val) & 0x1f;
3992 NewOp4Val = 31 - Op3Val;
3994 NewOp3Val = (64 - Op3Val) & 0x3f;
3995 NewOp4Val = 63 - Op3Val;
3998 const MCExpr *NewOp3 =
3999 MCConstantExpr::Create(NewOp3Val, getContext());
4000 const MCExpr *NewOp4 =
4001 MCConstantExpr::Create(NewOp4Val, getContext());
4003 Operands[0] = ARM64Operand::CreateToken(
4004 "ubfm", false, Op->getStartLoc(), getContext());
4005 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4006 Op3->getEndLoc(), getContext());
4007 Operands.push_back(ARM64Operand::CreateImm(
4008 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
4014 // FIXME: Horrible hack to handle the optional LSL shift for vector
4016 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
4017 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4018 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4019 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4020 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4021 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
4022 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
4023 IDLoc, getContext()));
4024 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
4025 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4026 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4027 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4028 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4029 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
4030 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
4031 // Canonicalize on lower-case for ease of comparison.
4032 std::string CanonicalSuffix = Suffix.lower();
4033 if (Tok != "movi" ||
4034 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
4035 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
4036 Operands.push_back(ARM64Operand::CreateShifter(
4037 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4040 } else if (NumOperands == 5) {
4041 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4042 // UBFIZ -> UBFM aliases.
4043 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4044 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4045 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4046 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4048 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4049 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4050 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4052 if (Op3CE && Op4CE) {
4053 uint64_t Op3Val = Op3CE->getValue();
4054 uint64_t Op4Val = Op4CE->getValue();
4056 uint64_t NewOp3Val = 0;
4057 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
4059 NewOp3Val = (32 - Op3Val) & 0x1f;
4061 NewOp3Val = (64 - Op3Val) & 0x3f;
4063 uint64_t NewOp4Val = Op4Val - 1;
4065 const MCExpr *NewOp3 =
4066 MCConstantExpr::Create(NewOp3Val, getContext());
4067 const MCExpr *NewOp4 =
4068 MCConstantExpr::Create(NewOp4Val, getContext());
4069 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4070 Op3->getEndLoc(), getContext());
4071 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
4072 Op4->getEndLoc(), getContext());
4074 Operands[0] = ARM64Operand::CreateToken(
4075 "bfm", false, Op->getStartLoc(), getContext());
4076 else if (Tok == "sbfiz")
4077 Operands[0] = ARM64Operand::CreateToken(
4078 "sbfm", false, Op->getStartLoc(), getContext());
4079 else if (Tok == "ubfiz")
4080 Operands[0] = ARM64Operand::CreateToken(
4081 "ubfm", false, Op->getStartLoc(), getContext());
4083 llvm_unreachable("No valid mnemonic for alias?");
4091 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4092 // UBFX -> UBFM aliases.
4093 } else if (NumOperands == 5 &&
4094 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4095 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4096 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4097 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4099 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4100 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4101 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4103 if (Op3CE && Op4CE) {
4104 uint64_t Op3Val = Op3CE->getValue();
4105 uint64_t Op4Val = Op4CE->getValue();
4106 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4108 if (NewOp4Val >= Op3Val) {
4109 const MCExpr *NewOp4 =
4110 MCConstantExpr::Create(NewOp4Val, getContext());
4111 Operands[4] = ARM64Operand::CreateImm(
4112 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4114 Operands[0] = ARM64Operand::CreateToken(
4115 "bfm", false, Op->getStartLoc(), getContext());
4116 else if (Tok == "sbfx")
4117 Operands[0] = ARM64Operand::CreateToken(
4118 "sbfm", false, Op->getStartLoc(), getContext());
4119 else if (Tok == "ubfx")
4120 Operands[0] = ARM64Operand::CreateToken(
4121 "ubfm", false, Op->getStartLoc(), getContext());
4123 llvm_unreachable("No valid mnemonic for alias?");
4132 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4133 // InstAlias can't quite handle this since the reg classes aren't
4135 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4136 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4138 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4139 if (OpCE->getValue() < 32) {
4140 // The source register can be Wn here, but the matcher expects a
4141 // GPR64. Twiddle it here if necessary.
4142 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4144 unsigned Reg = getXRegFromWReg(Op->getReg());
4145 Operands[1] = ARM64Operand::CreateReg(
4146 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4153 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4154 // InstAlias can't quite handle this since the reg classes aren't
4156 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4157 // The source register can be Wn here, but the matcher expects a
4158 // GPR64. Twiddle it here if necessary.
4159 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4161 unsigned Reg = getXRegFromWReg(Op->getReg());
4162 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4163 Op->getEndLoc(), getContext());
4167 // FIXME: Likewise for [su]xt[bh] with a Xd dst operand
4168 else if (NumOperands == 3 &&
4169 (Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
4170 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4172 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
4174 // The source register can be Wn here, but the matcher expects a
4175 // GPR64. Twiddle it here if necessary.
4176 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4178 unsigned Reg = getXRegFromWReg(Op->getReg());
4179 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4180 Op->getEndLoc(), getContext());
4186 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4187 if (NumOperands == 3 && Tok == "fmov") {
4188 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4189 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4190 if (RegOp->isReg() && ImmOp->isFPImm() &&
4191 ImmOp->getFPImm() == (unsigned)-1) {
4192 unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
4196 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4197 Op->getEndLoc(), getContext());
4202 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4203 // FMOV instructions. The index isn't an actual instruction operand
4204 // but rather syntactic sugar. It really should be part of the mnemonic,
4205 // not the operand, but whatever.
4206 if ((NumOperands == 5) && Tok == "fmov") {
4207 // If the last operand is a vectorindex of '1', then replace it with
4208 // a '[' '1' ']' token sequence, which is what the matcher
4209 // (annoyingly) expects for a literal vector index operand.
4210 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4211 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4212 SMLoc Loc = Op->getStartLoc();
4213 Operands.pop_back();
4216 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4218 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4220 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4221 } else if (Op->isReg()) {
4222 // Similarly, check the destination operand for the GPR->High-lane
4224 unsigned OpNo = NumOperands - 2;
4225 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4226 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4227 SMLoc Loc = Op->getStartLoc();
4229 ARM64Operand::CreateToken("[", false, Loc, getContext());
4231 Operands.begin() + OpNo + 1,
4232 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4234 Operands.begin() + OpNo + 2,
4235 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4242 // First try to match against the secondary set of tables containing the
4243 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4244 unsigned MatchResult =
4245 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4247 // If that fails, try against the alternate table containing long-form NEON:
4248 // "fadd v0.2s, v1.2s, v2.2s"
4249 if (MatchResult != Match_Success)
4251 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4253 switch (MatchResult) {
4254 case Match_Success: {
4255 // Perform range checking and other semantic validations
4256 SmallVector<SMLoc, 8> OperandLocs;
4257 NumOperands = Operands.size();
4258 for (unsigned i = 1; i < NumOperands; ++i)
4259 OperandLocs.push_back(Operands[i]->getStartLoc());
4260 if (validateInstruction(Inst, OperandLocs))
4264 Out.EmitInstruction(Inst, STI);
4267 case Match_MissingFeature: {
4268 assert(ErrorInfo && "Unknown missing feature!");
4269 // Special case the error message for the very common case where only
4270 // a single subtarget feature is missing (neon, e.g.).
4271 std::string Msg = "instruction requires:";
4273 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4274 if (ErrorInfo & Mask) {
4276 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4280 return Error(IDLoc, Msg);
4282 case Match_MnemonicFail:
4283 return showMatchError(IDLoc, MatchResult);
4284 case Match_InvalidOperand: {
4285 SMLoc ErrorLoc = IDLoc;
4286 if (ErrorInfo != ~0U) {
4287 if (ErrorInfo >= Operands.size())
4288 return Error(IDLoc, "too few operands for instruction");
4290 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4291 if (ErrorLoc == SMLoc())
4294 // If the match failed on a suffix token operand, tweak the diagnostic
4296 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4297 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4298 MatchResult = Match_InvalidSuffix;
4300 return showMatchError(ErrorLoc, MatchResult);
4302 case Match_InvalidMemoryIndexedSImm9: {
4303 // If there is not a '!' after the memory operand that failed, we really
4304 // want the diagnostic for the non-pre-indexed instruction variant instead.
4305 // Be careful to check for the post-indexed variant as well, which also
4306 // uses this match diagnostic. Also exclude the explicitly unscaled
4307 // mnemonics, as they want the unscaled diagnostic as well.
4308 if (Operands.size() == ErrorInfo + 1 &&
4309 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4310 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4311 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4312 // the register class of the previous operand. Default to 64 in case
4313 // we see something unexpected.
4314 MatchResult = Match_InvalidMemoryIndexed64;
4316 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4317 if (PrevOp->isReg() &&
4318 ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
4320 MatchResult = Match_InvalidMemoryIndexed32;
4323 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4324 if (ErrorLoc == SMLoc())
4326 return showMatchError(ErrorLoc, MatchResult);
4328 case Match_InvalidMemoryIndexed32:
4329 case Match_InvalidMemoryIndexed64:
4330 case Match_InvalidMemoryIndexed128:
4331 // If there is a '!' after the memory operand that failed, we really
4332 // want the diagnostic for the pre-indexed instruction variant instead.
4333 if (Operands.size() > ErrorInfo + 1 &&
4334 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4335 MatchResult = Match_InvalidMemoryIndexedSImm9;
4337 case Match_InvalidMemoryIndexed8:
4338 case Match_InvalidMemoryIndexed16:
4339 case Match_InvalidMemoryIndexed32SImm7:
4340 case Match_InvalidMemoryIndexed64SImm7:
4341 case Match_InvalidMemoryIndexed128SImm7:
4342 case Match_InvalidImm1_8:
4343 case Match_InvalidImm1_16:
4344 case Match_InvalidImm1_32:
4345 case Match_InvalidImm1_64:
4346 case Match_InvalidLabel: {
4347 // Any time we get here, there's nothing fancy to do. Just get the
4348 // operand SMLoc and display the diagnostic.
4349 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4350 // If it's a memory operand, the error is with the offset immediate,
4351 // so get that location instead.
4352 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4353 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4354 if (ErrorLoc == SMLoc())
4356 return showMatchError(ErrorLoc, MatchResult);
4360 llvm_unreachable("Implement any new match types added!");
4364 /// ParseDirective parses the arm specific directives
4365 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4366 StringRef IDVal = DirectiveID.getIdentifier();
4367 SMLoc Loc = DirectiveID.getLoc();
4368 if (IDVal == ".hword")
4369 return parseDirectiveWord(2, Loc);
4370 if (IDVal == ".word")
4371 return parseDirectiveWord(4, Loc);
4372 if (IDVal == ".xword")
4373 return parseDirectiveWord(8, Loc);
4374 if (IDVal == ".tlsdesccall")
4375 return parseDirectiveTLSDescCall(Loc);
4377 return parseDirectiveLOH(IDVal, Loc);
4380 /// parseDirectiveWord
4381 /// ::= .word [ expression (, expression)* ]
4382 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4383 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4385 const MCExpr *Value;
4386 if (getParser().parseExpression(Value))
4389 getParser().getStreamer().EmitValue(Value, Size);
4391 if (getLexer().is(AsmToken::EndOfStatement))
4394 // FIXME: Improve diagnostic.
4395 if (getLexer().isNot(AsmToken::Comma))
4396 return Error(L, "unexpected token in directive");
4405 // parseDirectiveTLSDescCall:
4406 // ::= .tlsdesccall symbol
4407 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4409 if (getParser().parseIdentifier(Name))
4410 return Error(L, "expected symbol after directive");
4412 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4413 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4414 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4417 Inst.setOpcode(ARM64::TLSDESCCALL);
4418 Inst.addOperand(MCOperand::CreateExpr(Expr));
4420 getParser().getStreamer().EmitInstruction(Inst, STI);
4424 /// ::= .loh <lohName | lohId> label1, ..., labelN
4425 /// The number of arguments depends on the loh identifier.
4426 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4427 if (IDVal != MCLOHDirectiveName())
4430 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4431 if (getParser().getTok().isNot(AsmToken::Integer))
4432 return TokError("expected an identifier or a number in directive");
4433 // We successfully get a numeric value for the identifier.
4434 // Check if it is valid.
4435 int64_t Id = getParser().getTok().getIntVal();
4436 Kind = (MCLOHType)Id;
4437 // Check that Id does not overflow MCLOHType.
4438 if (!isValidMCLOHType(Kind) || Id != Kind)
4439 return TokError("invalid numeric identifier in directive");
4441 StringRef Name = getTok().getIdentifier();
4442 // We successfully parse an identifier.
4443 // Check if it is a recognized one.
4444 int Id = MCLOHNameToId(Name);
4447 return TokError("invalid identifier in directive");
4448 Kind = (MCLOHType)Id;
4450 // Consume the identifier.
4452 // Get the number of arguments of this LOH.
4453 int NbArgs = MCLOHIdToNbArgs(Kind);
4455 assert(NbArgs != -1 && "Invalid number of arguments");
4457 SmallVector<MCSymbol *, 3> Args;
4458 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4460 if (getParser().parseIdentifier(Name))
4461 return TokError("expected identifier in directive");
4462 Args.push_back(getContext().GetOrCreateSymbol(Name));
4464 if (Idx + 1 == NbArgs)
4466 if (getLexer().isNot(AsmToken::Comma))
4467 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4470 if (getLexer().isNot(AsmToken::EndOfStatement))
4471 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4473 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4478 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4479 ARM64MCExpr::VariantKind &ELFRefKind,
4480 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4481 const MCConstantExpr *&Addend) {
4482 ELFRefKind = ARM64MCExpr::VK_INVALID;
4483 DarwinRefKind = MCSymbolRefExpr::VK_None;
4485 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4486 ELFRefKind = AE->getKind();
4487 Expr = AE->getSubExpr();
4490 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4492 // It's a simple symbol reference with no addend.
4493 DarwinRefKind = SE->getKind();
4498 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4502 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4505 DarwinRefKind = SE->getKind();
4507 if (BE->getOpcode() != MCBinaryExpr::Add)
4510 // See if the addend is is a constant, otherwise there's more going
4511 // on here than we can deal with.
4512 Addend = dyn_cast<MCConstantExpr>(BE->getRHS());
4516 // It's some symbol reference + a constant addend, but really
4517 // shouldn't use both Darwin and ELF syntax.
4518 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4519 DarwinRefKind == MCSymbolRefExpr::VK_None;
4522 /// Force static initialization.
4523 extern "C" void LLVMInitializeARM64AsmParser() {
4524 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
4525 RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
4528 #define GET_REGISTER_MATCHER
4529 #define GET_SUBTARGET_FEATURE_NAME
4530 #define GET_MATCHER_IMPLEMENTATION
4531 #include "ARM64GenAsmMatcher.inc"
4533 // Define this matcher function after the auto-generated include so we
4534 // have the match class enum definitions.
4535 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4537 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4538 // If the kind is a token for a literal immediate, check if our asm
4539 // operand matches. This is for InstAliases which have a fixed-value
4540 // immediate in the syntax.
4541 int64_t ExpectedVal;
4544 return Match_InvalidOperand;
4586 return Match_InvalidOperand;
4587 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4589 return Match_InvalidOperand;
4590 if (CE->getValue() == ExpectedVal)
4591 return Match_Success;
4592 return Match_InvalidOperand;