1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
80 unsigned &ErrorInfo, bool MatchingInlineAsm);
81 /// @name Auto-generated Match Functions
84 #define GET_ASSEMBLER_HEADER
85 #include "ARM64GenAsmMatcher.inc"
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
94 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
95 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum ARM64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "ARM64GenAsmMatcher.inc"
106 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII,
108 const MCTargetOptions &Options)
109 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
110 MCAsmParserExtension::Initialize(_Parser);
112 // Initialize the set of available features.
113 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
116 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
117 SMLoc NameLoc, OperandVector &Operands);
118 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
119 virtual bool ParseDirective(AsmToken DirectiveID);
120 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
122 static bool classifySymbolRef(const MCExpr *Expr,
123 ARM64MCExpr::VariantKind &ELFRefKind,
124 MCSymbolRefExpr::VariantKind &DarwinRefKind,
127 } // end anonymous namespace
131 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
133 class ARM64Operand : public MCParsedAsmOperand {
136 ImmediateOffset, // pre-indexed, no writeback
137 RegisterOffset // register offset, with optional extend
157 SMLoc StartLoc, EndLoc, OffsetLoc;
162 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
170 struct VectorListOp {
173 unsigned NumElements;
174 unsigned ElementKind;
177 struct VectorIndexOp {
186 unsigned Val; // Encoded 8-bit representation.
190 unsigned Val; // Not the enum since not all values have names.
214 // This is for all forms of ARM64 address expressions
216 unsigned BaseRegNum, OffsetRegNum;
217 ARM64_AM::ExtendType ExtType;
220 const MCExpr *OffsetImm;
227 struct VectorListOp VectorList;
228 struct VectorIndexOp VectorIndex;
230 struct FPImmOp FPImm;
231 struct BarrierOp Barrier;
232 struct SysRegOp SysReg;
233 struct SysCRImmOp SysCRImm;
234 struct PrefetchOp Prefetch;
235 struct ShifterOp Shifter;
236 struct ExtendOp Extend;
240 // Keep the MCContext around as the MCExprs may need manipulated during
241 // the add<>Operands() calls.
244 ARM64Operand(KindTy K, MCContext &_Ctx)
245 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
248 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
250 StartLoc = o.StartLoc;
269 VectorList = o.VectorList;
272 VectorIndex = o.VectorIndex;
278 SysCRImm = o.SysCRImm;
281 Prefetch = o.Prefetch;
295 /// getStartLoc - Get the location of the first token of this operand.
296 SMLoc getStartLoc() const { return StartLoc; }
297 /// getEndLoc - Get the location of the last token of this operand.
298 SMLoc getEndLoc() const { return EndLoc; }
299 /// getOffsetLoc - Get the location of the offset of this memory operand.
300 SMLoc getOffsetLoc() const { return OffsetLoc; }
302 StringRef getToken() const {
303 assert(Kind == k_Token && "Invalid access!");
304 return StringRef(Tok.Data, Tok.Length);
307 bool isTokenSuffix() const {
308 assert(Kind == k_Token && "Invalid access!");
312 const MCExpr *getImm() const {
313 assert(Kind == k_Immediate && "Invalid access!");
317 unsigned getFPImm() const {
318 assert(Kind == k_FPImm && "Invalid access!");
322 unsigned getBarrier() const {
323 assert(Kind == k_Barrier && "Invalid access!");
327 unsigned getReg() const {
328 assert(Kind == k_Register && "Invalid access!");
332 unsigned getVectorListStart() const {
333 assert(Kind == k_VectorList && "Invalid access!");
334 return VectorList.RegNum;
337 unsigned getVectorListCount() const {
338 assert(Kind == k_VectorList && "Invalid access!");
339 return VectorList.Count;
342 unsigned getVectorIndex() const {
343 assert(Kind == k_VectorIndex && "Invalid access!");
344 return VectorIndex.Val;
347 StringRef getSysReg() const {
348 assert(Kind == k_SysReg && "Invalid access!");
349 return StringRef(SysReg.Data, SysReg.Length);
352 unsigned getSysCR() const {
353 assert(Kind == k_SysCR && "Invalid access!");
357 unsigned getPrefetch() const {
358 assert(Kind == k_Prefetch && "Invalid access!");
362 unsigned getShifter() const {
363 assert(Kind == k_Shifter && "Invalid access!");
367 unsigned getExtend() const {
368 assert(Kind == k_Extend && "Invalid access!");
372 bool isImm() const { return Kind == k_Immediate; }
373 bool isSImm9() const {
376 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
379 int64_t Val = MCE->getValue();
380 return (Val >= -256 && Val < 256);
382 bool isSImm7s4() const {
385 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
388 int64_t Val = MCE->getValue();
389 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
391 bool isSImm7s8() const {
394 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
397 int64_t Val = MCE->getValue();
398 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
400 bool isSImm7s16() const {
403 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
406 int64_t Val = MCE->getValue();
407 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
409 bool isImm0_7() const {
412 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
415 int64_t Val = MCE->getValue();
416 return (Val >= 0 && Val < 8);
418 bool isImm1_8() const {
421 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
424 int64_t Val = MCE->getValue();
425 return (Val > 0 && Val < 9);
427 bool isImm0_15() const {
430 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
433 int64_t Val = MCE->getValue();
434 return (Val >= 0 && Val < 16);
436 bool isImm1_16() const {
439 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
442 int64_t Val = MCE->getValue();
443 return (Val > 0 && Val < 17);
445 bool isImm0_31() const {
448 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
451 int64_t Val = MCE->getValue();
452 return (Val >= 0 && Val < 32);
454 bool isImm1_31() const {
457 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
460 int64_t Val = MCE->getValue();
461 return (Val >= 1 && Val < 32);
463 bool isImm1_32() const {
466 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
469 int64_t Val = MCE->getValue();
470 return (Val >= 1 && Val < 33);
472 bool isImm0_63() const {
475 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
478 int64_t Val = MCE->getValue();
479 return (Val >= 0 && Val < 64);
481 bool isImm1_63() const {
484 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
487 int64_t Val = MCE->getValue();
488 return (Val >= 1 && Val < 64);
490 bool isImm1_64() const {
493 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
496 int64_t Val = MCE->getValue();
497 return (Val >= 1 && Val < 65);
499 bool isImm0_127() const {
502 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
505 int64_t Val = MCE->getValue();
506 return (Val >= 0 && Val < 128);
508 bool isImm0_255() const {
511 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
514 int64_t Val = MCE->getValue();
515 return (Val >= 0 && Val < 256);
517 bool isImm0_65535() const {
520 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
523 int64_t Val = MCE->getValue();
524 return (Val >= 0 && Val < 65536);
526 bool isLogicalImm32() const {
529 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
532 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
534 bool isLogicalImm64() const {
537 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
540 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
542 bool isSIMDImmType10() const {
545 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
548 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
550 bool isBranchTarget26() const {
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 int64_t Val = MCE->getValue();
559 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
561 bool isPCRelLabel19() const {
564 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
567 int64_t Val = MCE->getValue();
570 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
572 bool isBranchTarget14() const {
575 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
578 int64_t Val = MCE->getValue();
581 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
584 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
588 ARM64MCExpr::VariantKind ELFRefKind;
589 MCSymbolRefExpr::VariantKind DarwinRefKind;
591 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
595 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
598 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
599 if (ELFRefKind == AllowedModifiers[i])
606 bool isMovZSymbolG3() const {
607 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
608 return isMovWSymbol(Variants);
611 bool isMovZSymbolG2() const {
612 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
613 ARM64MCExpr::VK_ABS_G2_S,
614 ARM64MCExpr::VK_TPREL_G2,
615 ARM64MCExpr::VK_DTPREL_G2 };
616 return isMovWSymbol(Variants);
619 bool isMovZSymbolG1() const {
620 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
621 ARM64MCExpr::VK_ABS_G1_S,
622 ARM64MCExpr::VK_GOTTPREL_G1,
623 ARM64MCExpr::VK_TPREL_G1,
624 ARM64MCExpr::VK_DTPREL_G1, };
625 return isMovWSymbol(Variants);
628 bool isMovZSymbolG0() const {
629 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
630 ARM64MCExpr::VK_ABS_G0_S,
631 ARM64MCExpr::VK_TPREL_G0,
632 ARM64MCExpr::VK_DTPREL_G0 };
633 return isMovWSymbol(Variants);
636 bool isMovKSymbolG3() const {
637 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
638 return isMovWSymbol(Variants);
641 bool isMovKSymbolG2() const {
642 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
643 return isMovWSymbol(Variants);
646 bool isMovKSymbolG1() const {
647 static ARM64MCExpr::VariantKind Variants[] = {
648 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
649 ARM64MCExpr::VK_DTPREL_G1_NC
651 return isMovWSymbol(Variants);
654 bool isMovKSymbolG0() const {
655 static ARM64MCExpr::VariantKind Variants[] = {
656 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
657 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
659 return isMovWSymbol(Variants);
662 bool isFPImm() const { return Kind == k_FPImm; }
663 bool isBarrier() const { return Kind == k_Barrier; }
664 bool isSysReg() const { return Kind == k_SysReg; }
665 bool isMRSSystemRegister() const {
666 if (!isSysReg()) return false;
668 bool IsKnownRegister;
669 ARM64SysReg::MRSMapper().fromString(getSysReg(), IsKnownRegister);
671 return IsKnownRegister;
673 bool isMSRSystemRegister() const {
674 if (!isSysReg()) return false;
676 bool IsKnownRegister;
677 ARM64SysReg::MSRMapper().fromString(getSysReg(), IsKnownRegister);
679 return IsKnownRegister;
681 bool isSystemCPSRField() const {
682 if (!isSysReg()) return false;
684 bool IsKnownRegister;
685 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
687 return IsKnownRegister;
689 bool isReg() const { return Kind == k_Register && !Reg.isVector; }
690 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
692 /// Is this a vector list with the type implicit (presumably attached to the
693 /// instruction itself)?
694 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
695 return Kind == k_VectorList && VectorList.Count == NumRegs &&
696 !VectorList.ElementKind;
699 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
700 bool isTypedVectorList() const {
701 if (Kind != k_VectorList)
703 if (VectorList.Count != NumRegs)
705 if (VectorList.ElementKind != ElementKind)
707 return VectorList.NumElements == NumElements;
710 bool isVectorIndexB() const {
711 return Kind == k_VectorIndex && VectorIndex.Val < 16;
713 bool isVectorIndexH() const {
714 return Kind == k_VectorIndex && VectorIndex.Val < 8;
716 bool isVectorIndexS() const {
717 return Kind == k_VectorIndex && VectorIndex.Val < 4;
719 bool isVectorIndexD() const {
720 return Kind == k_VectorIndex && VectorIndex.Val < 2;
722 bool isToken() const { return Kind == k_Token; }
723 bool isTokenEqual(StringRef Str) const {
724 return Kind == k_Token && getToken() == Str;
726 bool isMem() const { return Kind == k_Memory; }
727 bool isSysCR() const { return Kind == k_SysCR; }
728 bool isPrefetch() const { return Kind == k_Prefetch; }
729 bool isShifter() const { return Kind == k_Shifter; }
730 bool isExtend() const {
731 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
733 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
734 return ST == ARM64_AM::LSL;
736 return Kind == k_Extend;
738 bool isExtend64() const {
739 if (Kind != k_Extend)
741 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
742 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
743 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
745 bool isExtendLSL64() const {
746 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
748 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
749 return ST == ARM64_AM::LSL;
751 if (Kind != k_Extend)
753 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
754 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
757 bool isArithmeticShifter() const {
761 // An arithmetic shifter is LSL, LSR, or ASR.
762 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
763 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
766 bool isMovImm32Shifter() const {
770 // A MOVi shifter is LSL of 0, 16, 32, or 48.
771 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
772 if (ST != ARM64_AM::LSL)
774 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
775 return (Val == 0 || Val == 16);
778 bool isMovImm64Shifter() const {
782 // A MOVi shifter is LSL of 0 or 16.
783 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
784 if (ST != ARM64_AM::LSL)
786 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
787 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
790 bool isAddSubShifter() const {
794 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
795 unsigned Val = Shifter.Val;
796 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
797 (ARM64_AM::getShiftValue(Val) == 0 ||
798 ARM64_AM::getShiftValue(Val) == 12);
801 bool isLogicalVecShifter() const {
805 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
806 unsigned Val = Shifter.Val;
807 unsigned Shift = ARM64_AM::getShiftValue(Val);
808 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
809 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
812 bool isLogicalVecHalfWordShifter() const {
813 if (!isLogicalVecShifter())
816 // A logical vector shifter is a left shift by 0 or 8.
817 unsigned Val = Shifter.Val;
818 unsigned Shift = ARM64_AM::getShiftValue(Val);
819 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
820 (Shift == 0 || Shift == 8);
823 bool isMoveVecShifter() const {
827 // A logical vector shifter is a left shift by 8 or 16.
828 unsigned Val = Shifter.Val;
829 unsigned Shift = ARM64_AM::getShiftValue(Val);
830 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
831 (Shift == 8 || Shift == 16);
834 bool isMemoryRegisterOffset8() const {
835 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
838 bool isMemoryRegisterOffset16() const {
839 return isMem() && Mem.Mode == RegisterOffset &&
840 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
843 bool isMemoryRegisterOffset32() const {
844 return isMem() && Mem.Mode == RegisterOffset &&
845 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
848 bool isMemoryRegisterOffset64() const {
849 return isMem() && Mem.Mode == RegisterOffset &&
850 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
853 bool isMemoryRegisterOffset128() const {
854 return isMem() && Mem.Mode == RegisterOffset &&
855 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
858 bool isMemoryUnscaled() const {
861 if (Mem.Mode != ImmediateOffset)
865 // Make sure the immediate value is valid.
866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
869 // The offset must fit in a signed 9-bit unscaled immediate.
870 int64_t Value = CE->getValue();
871 return (Value >= -256 && Value < 256);
873 // Fallback unscaled operands are for aliases of LDR/STR that fall back
874 // to LDUR/STUR when the offset is not legal for the former but is for
875 // the latter. As such, in addition to checking for being a legal unscaled
876 // address, also check that it is not a legal scaled address. This avoids
877 // ambiguity in the matcher.
878 bool isMemoryUnscaledFB8() const {
879 return isMemoryUnscaled() && !isMemoryIndexed8();
881 bool isMemoryUnscaledFB16() const {
882 return isMemoryUnscaled() && !isMemoryIndexed16();
884 bool isMemoryUnscaledFB32() const {
885 return isMemoryUnscaled() && !isMemoryIndexed32();
887 bool isMemoryUnscaledFB64() const {
888 return isMemoryUnscaled() && !isMemoryIndexed64();
890 bool isMemoryUnscaledFB128() const {
891 return isMemoryUnscaled() && !isMemoryIndexed128();
893 bool isMemoryIndexed(unsigned Scale) const {
896 if (Mem.Mode != ImmediateOffset)
900 // Make sure the immediate value is valid.
901 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
904 // The offset must be a positive multiple of the scale and in range of
905 // encoding with a 12-bit immediate.
906 int64_t Value = CE->getValue();
907 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
910 // If it's not a constant, check for some expressions we know.
911 const MCExpr *Expr = Mem.OffsetImm;
912 ARM64MCExpr::VariantKind ELFRefKind;
913 MCSymbolRefExpr::VariantKind DarwinRefKind;
915 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
917 // If we don't understand the expression, assume the best and
918 // let the fixup and relocation code deal with it.
922 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
923 ELFRefKind == ARM64MCExpr::VK_LO12 ||
924 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
925 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
926 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
927 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
928 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
929 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
930 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
931 // Note that we don't range-check the addend. It's adjusted modulo page
932 // size when converted, so there is no "out of range" condition when using
934 return Addend >= 0 && (Addend % Scale) == 0;
935 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
936 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
937 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
943 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
944 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
945 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
946 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
947 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
948 bool isMemoryNoIndex() const {
951 if (Mem.Mode != ImmediateOffset)
956 // Make sure the immediate value is valid. Only zero is allowed.
957 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
958 if (!CE || CE->getValue() != 0)
962 bool isMemorySIMDNoIndex() const {
965 if (Mem.Mode != ImmediateOffset)
967 return Mem.OffsetImm == 0;
969 bool isMemoryIndexedSImm9() const {
970 if (!isMem() || Mem.Mode != ImmediateOffset)
974 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
975 assert(CE && "Non-constant pre-indexed offset!");
976 int64_t Value = CE->getValue();
977 return Value >= -256 && Value <= 255;
979 bool isMemoryIndexed32SImm7() const {
980 if (!isMem() || Mem.Mode != ImmediateOffset)
984 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
985 assert(CE && "Non-constant pre-indexed offset!");
986 int64_t Value = CE->getValue();
987 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
989 bool isMemoryIndexed64SImm7() const {
990 if (!isMem() || Mem.Mode != ImmediateOffset)
994 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
995 assert(CE && "Non-constant pre-indexed offset!");
996 int64_t Value = CE->getValue();
997 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
999 bool isMemoryIndexed128SImm7() const {
1000 if (!isMem() || Mem.Mode != ImmediateOffset)
1004 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1005 assert(CE && "Non-constant pre-indexed offset!");
1006 int64_t Value = CE->getValue();
1007 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
1010 bool isAdrpLabel() const {
1011 // Validation was handled during parsing, so we just sanity check that
1012 // something didn't go haywire.
1016 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1017 int64_t Val = CE->getValue();
1018 int64_t Min = - (4096 * (1LL << (21 - 1)));
1019 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1020 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1026 bool isAdrLabel() const {
1027 // Validation was handled during parsing, so we just sanity check that
1028 // something didn't go haywire.
1032 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1033 int64_t Val = CE->getValue();
1034 int64_t Min = - (1LL << (21 - 1));
1035 int64_t Max = ((1LL << (21 - 1)) - 1);
1036 return Val >= Min && Val <= Max;
1042 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1043 // Add as immediates when possible. Null MCExpr = 0.
1045 Inst.addOperand(MCOperand::CreateImm(0));
1046 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1047 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1049 Inst.addOperand(MCOperand::CreateExpr(Expr));
1052 void addRegOperands(MCInst &Inst, unsigned N) const {
1053 assert(N == 1 && "Invalid number of operands!");
1054 Inst.addOperand(MCOperand::CreateReg(getReg()));
1057 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1058 assert(N == 1 && "Invalid number of operands!");
1059 Inst.addOperand(MCOperand::CreateReg(getReg()));
1062 template <unsigned NumRegs>
1063 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1064 assert(N == 1 && "Invalid number of operands!");
1065 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1066 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1067 unsigned FirstReg = FirstRegs[NumRegs - 1];
1070 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1073 template <unsigned NumRegs>
1074 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1075 assert(N == 1 && "Invalid number of operands!");
1076 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1077 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1078 unsigned FirstReg = FirstRegs[NumRegs - 1];
1081 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1084 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1085 assert(N == 1 && "Invalid number of operands!");
1086 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1089 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1090 assert(N == 1 && "Invalid number of operands!");
1091 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1094 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1095 assert(N == 1 && "Invalid number of operands!");
1096 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1099 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1100 assert(N == 1 && "Invalid number of operands!");
1101 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1104 void addImmOperands(MCInst &Inst, unsigned N) const {
1105 assert(N == 1 && "Invalid number of operands!");
1106 // If this is a pageoff symrefexpr with an addend, adjust the addend
1107 // to be only the page-offset portion. Otherwise, just add the expr
1109 addExpr(Inst, getImm());
1112 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1113 assert(N == 1 && "Invalid number of operands!");
1114 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1116 addExpr(Inst, getImm());
1118 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1121 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1122 addImmOperands(Inst, N);
1125 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1127 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1128 assert(MCE && "Invalid constant immediate operand!");
1129 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1132 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1133 assert(N == 1 && "Invalid number of operands!");
1134 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1135 assert(MCE && "Invalid constant immediate operand!");
1136 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1139 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1142 assert(MCE && "Invalid constant immediate operand!");
1143 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1146 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1148 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1149 assert(MCE && "Invalid constant immediate operand!");
1150 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1153 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1155 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1156 assert(MCE && "Invalid constant immediate operand!");
1157 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1160 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1161 assert(N == 1 && "Invalid number of operands!");
1162 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1163 assert(MCE && "Invalid constant immediate operand!");
1164 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1167 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1169 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1170 assert(MCE && "Invalid constant immediate operand!");
1171 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1174 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1176 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1177 assert(MCE && "Invalid constant immediate operand!");
1178 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1181 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1184 assert(MCE && "Invalid constant immediate operand!");
1185 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1188 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1191 assert(MCE && "Invalid constant immediate operand!");
1192 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1195 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1196 assert(N == 1 && "Invalid number of operands!");
1197 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1198 assert(MCE && "Invalid constant immediate operand!");
1199 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1202 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1203 assert(N == 1 && "Invalid number of operands!");
1204 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1205 assert(MCE && "Invalid constant immediate operand!");
1206 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1209 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1210 assert(N == 1 && "Invalid number of operands!");
1211 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1212 assert(MCE && "Invalid constant immediate operand!");
1213 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1216 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 assert(MCE && "Invalid constant immediate operand!");
1220 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1223 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1224 assert(N == 1 && "Invalid number of operands!");
1225 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1226 assert(MCE && "Invalid constant immediate operand!");
1227 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1230 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1232 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 assert(MCE && "Invalid constant immediate operand!");
1234 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1237 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1238 assert(N == 1 && "Invalid number of operands!");
1239 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1240 assert(MCE && "Invalid constant immediate operand!");
1241 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1244 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1247 assert(MCE && "Invalid logical immediate operand!");
1248 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1249 Inst.addOperand(MCOperand::CreateImm(encoding));
1252 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1253 assert(N == 1 && "Invalid number of operands!");
1254 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1255 assert(MCE && "Invalid logical immediate operand!");
1256 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1257 Inst.addOperand(MCOperand::CreateImm(encoding));
1260 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1261 assert(N == 1 && "Invalid number of operands!");
1262 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1263 assert(MCE && "Invalid immediate operand!");
1264 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1265 Inst.addOperand(MCOperand::CreateImm(encoding));
1268 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1269 // Branch operands don't encode the low bits, so shift them off
1270 // here. If it's a label, however, just put it on directly as there's
1271 // not enough information now to do anything.
1272 assert(N == 1 && "Invalid number of operands!");
1273 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1275 addExpr(Inst, getImm());
1278 assert(MCE && "Invalid constant immediate operand!");
1279 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1282 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1283 // Branch operands don't encode the low bits, so shift them off
1284 // here. If it's a label, however, just put it on directly as there's
1285 // not enough information now to do anything.
1286 assert(N == 1 && "Invalid number of operands!");
1287 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1289 addExpr(Inst, getImm());
1292 assert(MCE && "Invalid constant immediate operand!");
1293 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1296 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1297 // Branch operands don't encode the low bits, so shift them off
1298 // here. If it's a label, however, just put it on directly as there's
1299 // not enough information now to do anything.
1300 assert(N == 1 && "Invalid number of operands!");
1301 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1303 addExpr(Inst, getImm());
1306 assert(MCE && "Invalid constant immediate operand!");
1307 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1310 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1311 assert(N == 1 && "Invalid number of operands!");
1312 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1315 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1316 assert(N == 1 && "Invalid number of operands!");
1317 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1320 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1321 assert(N == 1 && "Invalid number of operands!");
1324 uint32_t Bits = ARM64SysReg::MRSMapper().fromString(getSysReg(), Valid);
1326 Inst.addOperand(MCOperand::CreateImm(Bits));
1329 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1330 assert(N == 1 && "Invalid number of operands!");
1333 uint32_t Bits = ARM64SysReg::MSRMapper().fromString(getSysReg(), Valid);
1335 Inst.addOperand(MCOperand::CreateImm(Bits));
1338 void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
1339 assert(N == 1 && "Invalid number of operands!");
1342 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1344 Inst.addOperand(MCOperand::CreateImm(Bits));
1347 void addSysCROperands(MCInst &Inst, unsigned N) const {
1348 assert(N == 1 && "Invalid number of operands!");
1349 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1352 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1353 assert(N == 1 && "Invalid number of operands!");
1354 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1357 void addShifterOperands(MCInst &Inst, unsigned N) const {
1358 assert(N == 1 && "Invalid number of operands!");
1359 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1362 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1363 assert(N == 1 && "Invalid number of operands!");
1364 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1367 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1368 assert(N == 1 && "Invalid number of operands!");
1369 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1372 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1373 assert(N == 1 && "Invalid number of operands!");
1374 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1377 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1378 assert(N == 1 && "Invalid number of operands!");
1379 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1382 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1383 assert(N == 1 && "Invalid number of operands!");
1384 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1387 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1388 assert(N == 1 && "Invalid number of operands!");
1389 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1392 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1393 assert(N == 1 && "Invalid number of operands!");
1394 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1397 void addExtendOperands(MCInst &Inst, unsigned N) const {
1398 assert(N == 1 && "Invalid number of operands!");
1399 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
1401 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1402 unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
1403 ARM64_AM::getShiftValue(getShifter()));
1404 Inst.addOperand(MCOperand::CreateImm(imm));
1406 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1409 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1410 assert(N == 1 && "Invalid number of operands!");
1411 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1414 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1415 assert(N == 1 && "Invalid number of operands!");
1416 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1418 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1419 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1420 ARM64_AM::getShiftValue(getShifter()));
1421 Inst.addOperand(MCOperand::CreateImm(imm));
1423 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1426 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1427 assert(N == 3 && "Invalid number of operands!");
1429 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1430 Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
1431 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1432 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1435 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1436 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1439 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1440 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1443 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1444 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1447 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1448 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1451 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1452 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1455 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1456 unsigned Scale) const {
1457 // Add the base register operand.
1458 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1460 if (!Mem.OffsetImm) {
1461 // There isn't an offset.
1462 Inst.addOperand(MCOperand::CreateImm(0));
1466 // Add the offset operand.
1467 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1468 assert(CE->getValue() % Scale == 0 &&
1469 "Offset operand must be multiple of the scale!");
1471 // The MCInst offset operand doesn't include the low bits (like the
1472 // instruction encoding).
1473 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1476 // If this is a pageoff symrefexpr with an addend, the linker will
1477 // do the scaling of the addend.
1479 // Otherwise we don't know what this is, so just add the scaling divide to
1480 // the expression and let the MC fixup evaluation code deal with it.
1481 const MCExpr *Expr = Mem.OffsetImm;
1482 ARM64MCExpr::VariantKind ELFRefKind;
1483 MCSymbolRefExpr::VariantKind DarwinRefKind;
1486 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1488 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1489 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1493 Inst.addOperand(MCOperand::CreateExpr(Expr));
1496 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1497 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1498 // Add the base register operand.
1499 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1501 // Add the offset operand.
1503 Inst.addOperand(MCOperand::CreateImm(0));
1505 // Only constant offsets supported.
1506 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1507 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1511 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1512 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1513 addMemoryIndexedOperands(Inst, N, 16);
1516 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1517 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1518 addMemoryIndexedOperands(Inst, N, 8);
1521 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1522 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1523 addMemoryIndexedOperands(Inst, N, 4);
1526 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1527 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1528 addMemoryIndexedOperands(Inst, N, 2);
1531 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1532 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1533 addMemoryIndexedOperands(Inst, N, 1);
1536 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1537 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1538 // Add the base register operand (the offset is always zero, so ignore it).
1539 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1542 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1543 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1544 // Add the base register operand (the offset is always zero, so ignore it).
1545 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1548 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1549 unsigned Scale) const {
1550 assert(N == 2 && "Invalid number of operands!");
1552 // Add the base register operand.
1553 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1555 // Add the offset operand.
1557 if (Mem.OffsetImm) {
1558 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1559 assert(CE && "Non-constant indexed offset operand!");
1560 Offset = CE->getValue();
1564 assert(Offset % Scale == 0 &&
1565 "Offset operand must be a multiple of the scale!");
1569 Inst.addOperand(MCOperand::CreateImm(Offset));
1572 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1573 addMemoryWritebackIndexedOperands(Inst, N, 1);
1576 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1577 addMemoryWritebackIndexedOperands(Inst, N, 4);
1580 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1581 addMemoryWritebackIndexedOperands(Inst, N, 8);
1584 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1585 addMemoryWritebackIndexedOperands(Inst, N, 16);
1588 virtual void print(raw_ostream &OS) const;
1590 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1592 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1593 Op->Tok.Data = Str.data();
1594 Op->Tok.Length = Str.size();
1595 Op->Tok.IsSuffix = IsSuffix;
1601 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1602 SMLoc E, MCContext &Ctx) {
1603 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1604 Op->Reg.RegNum = RegNum;
1605 Op->Reg.isVector = isVector;
1611 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1612 unsigned NumElements, char ElementKind,
1613 SMLoc S, SMLoc E, MCContext &Ctx) {
1614 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1615 Op->VectorList.RegNum = RegNum;
1616 Op->VectorList.Count = Count;
1617 Op->VectorList.NumElements = NumElements;
1618 Op->VectorList.ElementKind = ElementKind;
1624 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1626 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1627 Op->VectorIndex.Val = Idx;
1633 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1635 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1642 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1643 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1644 Op->FPImm.Val = Val;
1650 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1651 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1652 Op->Barrier.Val = Val;
1658 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S, MCContext &Ctx) {
1659 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1660 Op->SysReg.Data = Str.data();
1661 Op->SysReg.Length = Str.size();
1667 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1668 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1670 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1671 Op->Mem.BaseRegNum = BaseRegNum;
1672 Op->Mem.OffsetRegNum = 0;
1673 Op->Mem.OffsetImm = Off;
1674 Op->Mem.ExtType = ARM64_AM::UXTX;
1675 Op->Mem.ShiftVal = 0;
1676 Op->Mem.ExplicitShift = false;
1677 Op->Mem.Mode = ImmediateOffset;
1678 Op->OffsetLoc = OffsetLoc;
1684 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1685 ARM64_AM::ExtendType ExtType,
1686 unsigned ShiftVal, bool ExplicitShift,
1687 SMLoc S, SMLoc E, MCContext &Ctx) {
1688 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1689 Op->Mem.BaseRegNum = BaseReg;
1690 Op->Mem.OffsetRegNum = OffsetReg;
1691 Op->Mem.OffsetImm = 0;
1692 Op->Mem.ExtType = ExtType;
1693 Op->Mem.ShiftVal = ShiftVal;
1694 Op->Mem.ExplicitShift = ExplicitShift;
1695 Op->Mem.Mode = RegisterOffset;
1701 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1703 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1704 Op->SysCRImm.Val = Val;
1710 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1711 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1712 Op->Prefetch.Val = Val;
1718 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1719 SMLoc S, SMLoc E, MCContext &Ctx) {
1720 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1721 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1727 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1728 SMLoc S, SMLoc E, MCContext &Ctx) {
1729 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1730 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1737 } // end anonymous namespace.
1739 void ARM64Operand::print(raw_ostream &OS) const {
1742 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1747 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1749 OS << "<barrier " << Name << ">";
1751 OS << "<barrier invalid #" << getBarrier() << ">";
1755 getImm()->print(OS);
1761 OS << "<register " << getReg() << ">";
1763 case k_VectorList: {
1764 OS << "<vectorlist ";
1765 unsigned Reg = getVectorListStart();
1766 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1767 OS << Reg + i << " ";
1772 OS << "<vectorindex " << getVectorIndex() << ">";
1775 OS << "<sysreg: " << getSysReg() << '>';
1778 OS << "'" << getToken() << "'";
1781 OS << "c" << getSysCR();
1785 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1787 OS << "<prfop " << Name << ">";
1789 OS << "<prfop invalid #" << getPrefetch() << ">";
1793 unsigned Val = getShifter();
1794 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1795 << ARM64_AM::getShiftValue(Val) << ">";
1799 unsigned Val = getExtend();
1800 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1801 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1807 /// @name Auto-generated Match Functions
1810 static unsigned MatchRegisterName(StringRef Name);
1814 static unsigned matchVectorRegName(StringRef Name) {
1815 return StringSwitch<unsigned>(Name)
1816 .Case("v0", ARM64::Q0)
1817 .Case("v1", ARM64::Q1)
1818 .Case("v2", ARM64::Q2)
1819 .Case("v3", ARM64::Q3)
1820 .Case("v4", ARM64::Q4)
1821 .Case("v5", ARM64::Q5)
1822 .Case("v6", ARM64::Q6)
1823 .Case("v7", ARM64::Q7)
1824 .Case("v8", ARM64::Q8)
1825 .Case("v9", ARM64::Q9)
1826 .Case("v10", ARM64::Q10)
1827 .Case("v11", ARM64::Q11)
1828 .Case("v12", ARM64::Q12)
1829 .Case("v13", ARM64::Q13)
1830 .Case("v14", ARM64::Q14)
1831 .Case("v15", ARM64::Q15)
1832 .Case("v16", ARM64::Q16)
1833 .Case("v17", ARM64::Q17)
1834 .Case("v18", ARM64::Q18)
1835 .Case("v19", ARM64::Q19)
1836 .Case("v20", ARM64::Q20)
1837 .Case("v21", ARM64::Q21)
1838 .Case("v22", ARM64::Q22)
1839 .Case("v23", ARM64::Q23)
1840 .Case("v24", ARM64::Q24)
1841 .Case("v25", ARM64::Q25)
1842 .Case("v26", ARM64::Q26)
1843 .Case("v27", ARM64::Q27)
1844 .Case("v28", ARM64::Q28)
1845 .Case("v29", ARM64::Q29)
1846 .Case("v30", ARM64::Q30)
1847 .Case("v31", ARM64::Q31)
1851 static bool isValidVectorKind(StringRef Name) {
1852 return StringSwitch<bool>(Name.lower())
1862 // Accept the width neutral ones, too, for verbose syntax. If those
1863 // aren't used in the right places, the token operand won't match so
1864 // all will work out.
1872 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1873 char &ElementKind) {
1874 assert(isValidVectorKind(Name));
1876 ElementKind = Name.lower()[Name.size() - 1];
1879 if (Name.size() == 2)
1882 // Parse the lane count
1883 Name = Name.drop_front();
1884 while (isdigit(Name.front())) {
1885 NumElements = 10 * NumElements + (Name.front() - '0');
1886 Name = Name.drop_front();
1890 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1892 StartLoc = getLoc();
1893 RegNo = tryParseRegister();
1894 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1895 return (RegNo == (unsigned)-1);
1898 /// tryParseRegister - Try to parse a register name. The token must be an
1899 /// Identifier when called, and if it is a register name the token is eaten and
1900 /// the register is added to the operand list.
1901 int ARM64AsmParser::tryParseRegister() {
1902 const AsmToken &Tok = Parser.getTok();
1903 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1905 std::string lowerCase = Tok.getString().lower();
1906 unsigned RegNum = MatchRegisterName(lowerCase);
1907 // Also handle a few aliases of registers.
1909 RegNum = StringSwitch<unsigned>(lowerCase)
1910 .Case("fp", ARM64::FP)
1911 .Case("lr", ARM64::LR)
1912 .Case("x31", ARM64::XZR)
1913 .Case("w31", ARM64::WZR)
1919 Parser.Lex(); // Eat identifier token.
1923 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1924 /// kind specifier. If it is a register specifier, eat the token and return it.
1925 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1926 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1927 TokError("vector register expected");
1931 StringRef Name = Parser.getTok().getString();
1932 // If there is a kind specifier, it's separated from the register name by
1934 size_t Start = 0, Next = Name.find('.');
1935 StringRef Head = Name.slice(Start, Next);
1936 unsigned RegNum = matchVectorRegName(Head);
1938 if (Next != StringRef::npos) {
1939 Kind = Name.slice(Next, StringRef::npos);
1940 if (!isValidVectorKind(Kind)) {
1941 TokError("invalid vector kind qualifier");
1945 Parser.Lex(); // Eat the register token.
1950 TokError("vector register expected");
1954 static int MatchSysCRName(StringRef Name) {
1955 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1957 switch (Name.size()) {
1961 if (Name[0] != 'c' && Name[0] != 'C')
1989 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
2010 llvm_unreachable("Unhandled SysCR operand string!");
2014 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2015 ARM64AsmParser::OperandMatchResultTy
2016 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2018 const AsmToken &Tok = Parser.getTok();
2019 if (Tok.isNot(AsmToken::Identifier))
2020 return MatchOperand_NoMatch;
2022 int Num = MatchSysCRName(Tok.getString());
2024 return MatchOperand_NoMatch;
2026 Parser.Lex(); // Eat identifier token.
2027 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
2028 return MatchOperand_Success;
2031 /// tryParsePrefetch - Try to parse a prefetch operand.
2032 ARM64AsmParser::OperandMatchResultTy
2033 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2035 const AsmToken &Tok = Parser.getTok();
2036 // Either an identifier for named values or a 5-bit immediate.
2037 bool Hash = Tok.is(AsmToken::Hash);
2038 if (Hash || Tok.is(AsmToken::Integer)) {
2040 Parser.Lex(); // Eat hash token.
2041 const MCExpr *ImmVal;
2042 if (getParser().parseExpression(ImmVal))
2043 return MatchOperand_ParseFail;
2045 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2047 TokError("immediate value expected for prefetch operand");
2048 return MatchOperand_ParseFail;
2050 unsigned prfop = MCE->getValue();
2052 TokError("prefetch operand out of range, [0,31] expected");
2053 return MatchOperand_ParseFail;
2056 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2057 return MatchOperand_Success;
2060 if (Tok.isNot(AsmToken::Identifier)) {
2061 TokError("pre-fetch hint expected");
2062 return MatchOperand_ParseFail;
2066 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2068 TokError("pre-fetch hint expected");
2069 return MatchOperand_ParseFail;
2072 Parser.Lex(); // Eat identifier token.
2073 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2074 return MatchOperand_Success;
2077 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2079 ARM64AsmParser::OperandMatchResultTy
2080 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2084 if (Parser.getTok().is(AsmToken::Hash)) {
2085 Parser.Lex(); // Eat hash token.
2088 if (parseSymbolicImmVal(Expr))
2089 return MatchOperand_ParseFail;
2091 ARM64MCExpr::VariantKind ELFRefKind;
2092 MCSymbolRefExpr::VariantKind DarwinRefKind;
2094 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2095 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2096 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2097 // No modifier was specified at all; this is the syntax for an ELF basic
2098 // ADRP relocation (unfortunately).
2099 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2100 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2101 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2103 Error(S, "gotpage label reference not allowed an addend");
2104 return MatchOperand_ParseFail;
2105 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2106 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2107 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2108 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2109 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2110 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2111 // The operand must be an @page or @gotpage qualified symbolref.
2112 Error(S, "page or gotpage label reference expected");
2113 return MatchOperand_ParseFail;
2117 // We have either a label reference possibly with addend or an immediate. The
2118 // addend is a raw value here. The linker will adjust it to only reference the
2120 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2121 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2123 return MatchOperand_Success;
2126 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2128 ARM64AsmParser::OperandMatchResultTy
2129 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2133 if (Parser.getTok().is(AsmToken::Hash)) {
2134 Parser.Lex(); // Eat hash token.
2137 if (getParser().parseExpression(Expr))
2138 return MatchOperand_ParseFail;
2140 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2141 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2143 return MatchOperand_Success;
2146 /// tryParseFPImm - A floating point immediate expression operand.
2147 ARM64AsmParser::OperandMatchResultTy
2148 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2152 if (Parser.getTok().is(AsmToken::Hash)) {
2153 Parser.Lex(); // Eat '#'
2157 // Handle negation, as that still comes through as a separate token.
2158 bool isNegative = false;
2159 if (Parser.getTok().is(AsmToken::Minus)) {
2163 const AsmToken &Tok = Parser.getTok();
2164 if (Tok.is(AsmToken::Real)) {
2165 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2166 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2167 // If we had a '-' in front, toggle the sign bit.
2168 IntVal ^= (uint64_t)isNegative << 63;
2169 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2170 Parser.Lex(); // Eat the token.
2171 // Check for out of range values. As an exception, we let Zero through,
2172 // as we handle that special case in post-processing before matching in
2173 // order to use the zero register for it.
2174 if (Val == -1 && !RealVal.isZero()) {
2175 TokError("floating point value out of range");
2176 return MatchOperand_ParseFail;
2178 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2179 return MatchOperand_Success;
2181 if (Tok.is(AsmToken::Integer)) {
2183 if (!isNegative && Tok.getString().startswith("0x")) {
2184 Val = Tok.getIntVal();
2185 if (Val > 255 || Val < 0) {
2186 TokError("encoded floating point value out of range");
2187 return MatchOperand_ParseFail;
2190 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2191 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2192 // If we had a '-' in front, toggle the sign bit.
2193 IntVal ^= (uint64_t)isNegative << 63;
2194 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2196 Parser.Lex(); // Eat the token.
2197 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2198 return MatchOperand_Success;
2202 return MatchOperand_NoMatch;
2204 TokError("invalid floating point immediate");
2205 return MatchOperand_ParseFail;
2208 /// parseCondCodeString - Parse a Condition Code string.
2209 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2210 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2211 .Case("eq", ARM64CC::EQ)
2212 .Case("ne", ARM64CC::NE)
2213 .Case("cs", ARM64CC::CS)
2214 .Case("hs", ARM64CC::CS)
2215 .Case("cc", ARM64CC::CC)
2216 .Case("lo", ARM64CC::CC)
2217 .Case("mi", ARM64CC::MI)
2218 .Case("pl", ARM64CC::PL)
2219 .Case("vs", ARM64CC::VS)
2220 .Case("vc", ARM64CC::VC)
2221 .Case("hi", ARM64CC::HI)
2222 .Case("ls", ARM64CC::LS)
2223 .Case("ge", ARM64CC::GE)
2224 .Case("lt", ARM64CC::LT)
2225 .Case("gt", ARM64CC::GT)
2226 .Case("le", ARM64CC::LE)
2227 .Case("al", ARM64CC::AL)
2228 .Case("nv", ARM64CC::NV)
2229 .Default(ARM64CC::Invalid);
2233 /// parseCondCode - Parse a Condition Code operand.
2234 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2235 bool invertCondCode) {
2237 const AsmToken &Tok = Parser.getTok();
2238 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2240 StringRef Cond = Tok.getString();
2241 unsigned CC = parseCondCodeString(Cond);
2242 if (CC == ARM64CC::Invalid)
2243 return TokError("invalid condition code");
2244 Parser.Lex(); // Eat identifier token.
2247 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2249 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2251 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2255 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2256 /// them if present.
2257 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2258 const AsmToken &Tok = Parser.getTok();
2259 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2260 .Case("lsl", ARM64_AM::LSL)
2261 .Case("lsr", ARM64_AM::LSR)
2262 .Case("asr", ARM64_AM::ASR)
2263 .Case("ror", ARM64_AM::ROR)
2264 .Case("msl", ARM64_AM::MSL)
2265 .Case("LSL", ARM64_AM::LSL)
2266 .Case("LSR", ARM64_AM::LSR)
2267 .Case("ASR", ARM64_AM::ASR)
2268 .Case("ROR", ARM64_AM::ROR)
2269 .Case("MSL", ARM64_AM::MSL)
2270 .Default(ARM64_AM::InvalidShift);
2271 if (ShOp == ARM64_AM::InvalidShift)
2274 SMLoc S = Tok.getLoc();
2277 // We expect a number here.
2278 bool Hash = getLexer().is(AsmToken::Hash);
2279 if (!Hash && getLexer().isNot(AsmToken::Integer))
2280 return TokError("immediate value expected for shifter operand");
2283 Parser.Lex(); // Eat the '#'.
2285 SMLoc ExprLoc = getLoc();
2286 const MCExpr *ImmVal;
2287 if (getParser().parseExpression(ImmVal))
2290 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2292 return TokError("immediate value expected for shifter operand");
2294 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2295 return Error(ExprLoc, "immediate value too large for shifter operand");
2297 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2299 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2303 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2304 /// them if present.
2305 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2306 const AsmToken &Tok = Parser.getTok();
2307 ARM64_AM::ExtendType ExtOp =
2308 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2309 .Case("uxtb", ARM64_AM::UXTB)
2310 .Case("uxth", ARM64_AM::UXTH)
2311 .Case("uxtw", ARM64_AM::UXTW)
2312 .Case("uxtx", ARM64_AM::UXTX)
2313 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2314 .Case("sxtb", ARM64_AM::SXTB)
2315 .Case("sxth", ARM64_AM::SXTH)
2316 .Case("sxtw", ARM64_AM::SXTW)
2317 .Case("sxtx", ARM64_AM::SXTX)
2318 .Case("UXTB", ARM64_AM::UXTB)
2319 .Case("UXTH", ARM64_AM::UXTH)
2320 .Case("UXTW", ARM64_AM::UXTW)
2321 .Case("UXTX", ARM64_AM::UXTX)
2322 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2323 .Case("SXTB", ARM64_AM::SXTB)
2324 .Case("SXTH", ARM64_AM::SXTH)
2325 .Case("SXTW", ARM64_AM::SXTW)
2326 .Case("SXTX", ARM64_AM::SXTX)
2327 .Default(ARM64_AM::InvalidExtend);
2328 if (ExtOp == ARM64_AM::InvalidExtend)
2331 SMLoc S = Tok.getLoc();
2334 if (getLexer().is(AsmToken::EndOfStatement) ||
2335 getLexer().is(AsmToken::Comma)) {
2336 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2338 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2342 bool Hash = getLexer().is(AsmToken::Hash);
2343 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2344 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2346 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2351 Parser.Lex(); // Eat the '#'.
2353 const MCExpr *ImmVal;
2354 if (getParser().parseExpression(ImmVal))
2357 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2359 return TokError("immediate value expected for extend operand");
2361 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2363 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2367 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2368 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2369 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2370 OperandVector &Operands) {
2371 if (Name.find('.') != StringRef::npos)
2372 return TokError("invalid operand");
2376 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2378 const AsmToken &Tok = Parser.getTok();
2379 StringRef Op = Tok.getString();
2380 SMLoc S = Tok.getLoc();
2382 const MCExpr *Expr = 0;
2384 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2386 Expr = MCConstantExpr::Create(op1, getContext()); \
2387 Operands.push_back( \
2388 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2389 Operands.push_back( \
2390 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2391 Operands.push_back( \
2392 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2393 Expr = MCConstantExpr::Create(op2, getContext()); \
2394 Operands.push_back( \
2395 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2398 if (Mnemonic == "ic") {
2399 if (!Op.compare_lower("ialluis")) {
2400 // SYS #0, C7, C1, #0
2401 SYS_ALIAS(0, 7, 1, 0);
2402 } else if (!Op.compare_lower("iallu")) {
2403 // SYS #0, C7, C5, #0
2404 SYS_ALIAS(0, 7, 5, 0);
2405 } else if (!Op.compare_lower("ivau")) {
2406 // SYS #3, C7, C5, #1
2407 SYS_ALIAS(3, 7, 5, 1);
2409 return TokError("invalid operand for IC instruction");
2411 } else if (Mnemonic == "dc") {
2412 if (!Op.compare_lower("zva")) {
2413 // SYS #3, C7, C4, #1
2414 SYS_ALIAS(3, 7, 4, 1);
2415 } else if (!Op.compare_lower("ivac")) {
2416 // SYS #3, C7, C6, #1
2417 SYS_ALIAS(0, 7, 6, 1);
2418 } else if (!Op.compare_lower("isw")) {
2419 // SYS #0, C7, C6, #2
2420 SYS_ALIAS(0, 7, 6, 2);
2421 } else if (!Op.compare_lower("cvac")) {
2422 // SYS #3, C7, C10, #1
2423 SYS_ALIAS(3, 7, 10, 1);
2424 } else if (!Op.compare_lower("csw")) {
2425 // SYS #0, C7, C10, #2
2426 SYS_ALIAS(0, 7, 10, 2);
2427 } else if (!Op.compare_lower("cvau")) {
2428 // SYS #3, C7, C11, #1
2429 SYS_ALIAS(3, 7, 11, 1);
2430 } else if (!Op.compare_lower("civac")) {
2431 // SYS #3, C7, C14, #1
2432 SYS_ALIAS(3, 7, 14, 1);
2433 } else if (!Op.compare_lower("cisw")) {
2434 // SYS #0, C7, C14, #2
2435 SYS_ALIAS(0, 7, 14, 2);
2437 return TokError("invalid operand for DC instruction");
2439 } else if (Mnemonic == "at") {
2440 if (!Op.compare_lower("s1e1r")) {
2441 // SYS #0, C7, C8, #0
2442 SYS_ALIAS(0, 7, 8, 0);
2443 } else if (!Op.compare_lower("s1e2r")) {
2444 // SYS #4, C7, C8, #0
2445 SYS_ALIAS(4, 7, 8, 0);
2446 } else if (!Op.compare_lower("s1e3r")) {
2447 // SYS #6, C7, C8, #0
2448 SYS_ALIAS(6, 7, 8, 0);
2449 } else if (!Op.compare_lower("s1e1w")) {
2450 // SYS #0, C7, C8, #1
2451 SYS_ALIAS(0, 7, 8, 1);
2452 } else if (!Op.compare_lower("s1e2w")) {
2453 // SYS #4, C7, C8, #1
2454 SYS_ALIAS(4, 7, 8, 1);
2455 } else if (!Op.compare_lower("s1e3w")) {
2456 // SYS #6, C7, C8, #1
2457 SYS_ALIAS(6, 7, 8, 1);
2458 } else if (!Op.compare_lower("s1e0r")) {
2459 // SYS #0, C7, C8, #3
2460 SYS_ALIAS(0, 7, 8, 2);
2461 } else if (!Op.compare_lower("s1e0w")) {
2462 // SYS #0, C7, C8, #3
2463 SYS_ALIAS(0, 7, 8, 3);
2464 } else if (!Op.compare_lower("s12e1r")) {
2465 // SYS #4, C7, C8, #4
2466 SYS_ALIAS(4, 7, 8, 4);
2467 } else if (!Op.compare_lower("s12e1w")) {
2468 // SYS #4, C7, C8, #5
2469 SYS_ALIAS(4, 7, 8, 5);
2470 } else if (!Op.compare_lower("s12e0r")) {
2471 // SYS #4, C7, C8, #6
2472 SYS_ALIAS(4, 7, 8, 6);
2473 } else if (!Op.compare_lower("s12e0w")) {
2474 // SYS #4, C7, C8, #7
2475 SYS_ALIAS(4, 7, 8, 7);
2477 return TokError("invalid operand for AT instruction");
2479 } else if (Mnemonic == "tlbi") {
2480 if (!Op.compare_lower("vmalle1is")) {
2481 // SYS #0, C8, C3, #0
2482 SYS_ALIAS(0, 8, 3, 0);
2483 } else if (!Op.compare_lower("alle2is")) {
2484 // SYS #4, C8, C3, #0
2485 SYS_ALIAS(4, 8, 3, 0);
2486 } else if (!Op.compare_lower("alle3is")) {
2487 // SYS #6, C8, C3, #0
2488 SYS_ALIAS(6, 8, 3, 0);
2489 } else if (!Op.compare_lower("vae1is")) {
2490 // SYS #0, C8, C3, #1
2491 SYS_ALIAS(0, 8, 3, 1);
2492 } else if (!Op.compare_lower("vae2is")) {
2493 // SYS #4, C8, C3, #1
2494 SYS_ALIAS(4, 8, 3, 1);
2495 } else if (!Op.compare_lower("vae3is")) {
2496 // SYS #6, C8, C3, #1
2497 SYS_ALIAS(6, 8, 3, 1);
2498 } else if (!Op.compare_lower("aside1is")) {
2499 // SYS #0, C8, C3, #2
2500 SYS_ALIAS(0, 8, 3, 2);
2501 } else if (!Op.compare_lower("vaae1is")) {
2502 // SYS #0, C8, C3, #3
2503 SYS_ALIAS(0, 8, 3, 3);
2504 } else if (!Op.compare_lower("alle1is")) {
2505 // SYS #4, C8, C3, #4
2506 SYS_ALIAS(4, 8, 3, 4);
2507 } else if (!Op.compare_lower("vale1is")) {
2508 // SYS #0, C8, C3, #5
2509 SYS_ALIAS(0, 8, 3, 5);
2510 } else if (!Op.compare_lower("vaale1is")) {
2511 // SYS #0, C8, C3, #7
2512 SYS_ALIAS(0, 8, 3, 7);
2513 } else if (!Op.compare_lower("vmalle1")) {
2514 // SYS #0, C8, C7, #0
2515 SYS_ALIAS(0, 8, 7, 0);
2516 } else if (!Op.compare_lower("alle2")) {
2517 // SYS #4, C8, C7, #0
2518 SYS_ALIAS(4, 8, 7, 0);
2519 } else if (!Op.compare_lower("vale2is")) {
2520 // SYS #4, C8, C3, #5
2521 SYS_ALIAS(4, 8, 3, 5);
2522 } else if (!Op.compare_lower("vale3is")) {
2523 // SYS #6, C8, C3, #5
2524 SYS_ALIAS(6, 8, 3, 5);
2525 } else if (!Op.compare_lower("alle3")) {
2526 // SYS #6, C8, C7, #0
2527 SYS_ALIAS(6, 8, 7, 0);
2528 } else if (!Op.compare_lower("vae1")) {
2529 // SYS #0, C8, C7, #1
2530 SYS_ALIAS(0, 8, 7, 1);
2531 } else if (!Op.compare_lower("vae2")) {
2532 // SYS #4, C8, C7, #1
2533 SYS_ALIAS(4, 8, 7, 1);
2534 } else if (!Op.compare_lower("vae3")) {
2535 // SYS #6, C8, C7, #1
2536 SYS_ALIAS(6, 8, 7, 1);
2537 } else if (!Op.compare_lower("aside1")) {
2538 // SYS #0, C8, C7, #2
2539 SYS_ALIAS(0, 8, 7, 2);
2540 } else if (!Op.compare_lower("vaae1")) {
2541 // SYS #0, C8, C7, #3
2542 SYS_ALIAS(0, 8, 7, 3);
2543 } else if (!Op.compare_lower("alle1")) {
2544 // SYS #4, C8, C7, #4
2545 SYS_ALIAS(4, 8, 7, 4);
2546 } else if (!Op.compare_lower("vale1")) {
2547 // SYS #0, C8, C7, #5
2548 SYS_ALIAS(0, 8, 7, 5);
2549 } else if (!Op.compare_lower("vale2")) {
2550 // SYS #4, C8, C7, #5
2551 SYS_ALIAS(4, 8, 7, 5);
2552 } else if (!Op.compare_lower("vale3")) {
2553 // SYS #6, C8, C7, #5
2554 SYS_ALIAS(6, 8, 7, 5);
2555 } else if (!Op.compare_lower("vaale1")) {
2556 // SYS #0, C8, C7, #7
2557 SYS_ALIAS(0, 8, 7, 7);
2558 } else if (!Op.compare_lower("ipas2e1")) {
2559 // SYS #4, C8, C4, #1
2560 SYS_ALIAS(4, 8, 4, 1);
2561 } else if (!Op.compare_lower("ipas2le1")) {
2562 // SYS #4, C8, C4, #5
2563 SYS_ALIAS(4, 8, 4, 5);
2564 } else if (!Op.compare_lower("ipas2e1is")) {
2565 // SYS #4, C8, C4, #1
2566 SYS_ALIAS(4, 8, 0, 1);
2567 } else if (!Op.compare_lower("ipas2le1is")) {
2568 // SYS #4, C8, C4, #5
2569 SYS_ALIAS(4, 8, 0, 5);
2570 } else if (!Op.compare_lower("vmalls12e1")) {
2571 // SYS #4, C8, C7, #6
2572 SYS_ALIAS(4, 8, 7, 6);
2573 } else if (!Op.compare_lower("vmalls12e1is")) {
2574 // SYS #4, C8, C3, #6
2575 SYS_ALIAS(4, 8, 3, 6);
2577 return TokError("invalid operand for TLBI instruction");
2583 Parser.Lex(); // Eat operand.
2585 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2586 bool HasRegister = false;
2588 // Check for the optional register operand.
2589 if (getLexer().is(AsmToken::Comma)) {
2590 Parser.Lex(); // Eat comma.
2592 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2593 return TokError("expected register operand");
2598 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2599 Parser.eatToEndOfStatement();
2600 return TokError("unexpected token in argument list");
2603 if (ExpectRegister && !HasRegister) {
2604 return TokError("specified " + Mnemonic + " op requires a register");
2606 else if (!ExpectRegister && HasRegister) {
2607 return TokError("specified " + Mnemonic + " op does not use a register");
2610 Parser.Lex(); // Consume the EndOfStatement
2614 ARM64AsmParser::OperandMatchResultTy
2615 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2616 const AsmToken &Tok = Parser.getTok();
2618 // Can be either a #imm style literal or an option name
2619 bool Hash = Tok.is(AsmToken::Hash);
2620 if (Hash || Tok.is(AsmToken::Integer)) {
2621 // Immediate operand.
2623 Parser.Lex(); // Eat the '#'
2624 const MCExpr *ImmVal;
2625 SMLoc ExprLoc = getLoc();
2626 if (getParser().parseExpression(ImmVal))
2627 return MatchOperand_ParseFail;
2628 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2630 Error(ExprLoc, "immediate value expected for barrier operand");
2631 return MatchOperand_ParseFail;
2633 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2634 Error(ExprLoc, "barrier operand out of range");
2635 return MatchOperand_ParseFail;
2638 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2639 return MatchOperand_Success;
2642 if (Tok.isNot(AsmToken::Identifier)) {
2643 TokError("invalid operand for instruction");
2644 return MatchOperand_ParseFail;
2648 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2650 TokError("invalid barrier option name");
2651 return MatchOperand_ParseFail;
2654 // The only valid named option for ISB is 'sy'
2655 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2656 TokError("'sy' or #imm operand expected");
2657 return MatchOperand_ParseFail;
2660 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2661 Parser.Lex(); // Consume the option
2663 return MatchOperand_Success;
2666 ARM64AsmParser::OperandMatchResultTy
2667 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2668 const AsmToken &Tok = Parser.getTok();
2670 if (Tok.isNot(AsmToken::Identifier))
2671 return MatchOperand_NoMatch;
2673 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2675 Parser.Lex(); // Eat identifier
2677 return MatchOperand_Success;
2680 /// tryParseVectorRegister - Parse a vector register operand.
2681 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2682 if (Parser.getTok().isNot(AsmToken::Identifier))
2686 // Check for a vector register specifier first.
2688 int64_t Reg = tryMatchVectorRegister(Kind, false);
2692 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2693 // If there was an explicit qualifier, that goes on as a literal text
2696 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2698 // If there is an index specifier following the register, parse that too.
2699 if (Parser.getTok().is(AsmToken::LBrac)) {
2700 SMLoc SIdx = getLoc();
2701 Parser.Lex(); // Eat left bracket token.
2703 const MCExpr *ImmVal;
2704 if (getParser().parseExpression(ImmVal))
2706 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2708 TokError("immediate value expected for vector index");
2713 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2714 Error(E, "']' expected");
2718 Parser.Lex(); // Eat right bracket token.
2720 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2727 /// parseRegister - Parse a non-vector register operand.
2728 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2730 // Try for a vector register.
2731 if (!tryParseVectorRegister(Operands))
2734 // Try for a scalar register.
2735 int64_t Reg = tryParseRegister();
2739 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2741 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2742 // as a string token in the instruction itself.
2743 if (getLexer().getKind() == AsmToken::LBrac) {
2744 SMLoc LBracS = getLoc();
2746 const AsmToken &Tok = Parser.getTok();
2747 if (Tok.is(AsmToken::Integer)) {
2748 SMLoc IntS = getLoc();
2749 int64_t Val = Tok.getIntVal();
2752 if (getLexer().getKind() == AsmToken::RBrac) {
2753 SMLoc RBracS = getLoc();
2756 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2758 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2760 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2770 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2771 /// do not allow base regisrer writeback modes,
2772 /// or those that handle writeback separately from
2773 /// the memory operand (like the AdvSIMD ldX/stX
2775 ARM64AsmParser::OperandMatchResultTy
2776 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2777 if (Parser.getTok().isNot(AsmToken::LBrac))
2778 return MatchOperand_NoMatch;
2780 Parser.Lex(); // Eat left bracket token.
2782 const AsmToken &BaseRegTok = Parser.getTok();
2783 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2784 Error(BaseRegTok.getLoc(), "register expected");
2785 return MatchOperand_ParseFail;
2788 int64_t Reg = tryParseRegister();
2790 Error(BaseRegTok.getLoc(), "register expected");
2791 return MatchOperand_ParseFail;
2795 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2796 Error(E, "']' expected");
2797 return MatchOperand_ParseFail;
2800 Parser.Lex(); // Eat right bracket token.
2802 Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
2803 return MatchOperand_Success;
2806 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2807 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2808 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2810 Parser.Lex(); // Eat left bracket token.
2812 const AsmToken &BaseRegTok = Parser.getTok();
2813 if (BaseRegTok.isNot(AsmToken::Identifier))
2814 return Error(BaseRegTok.getLoc(), "register expected");
2816 int64_t Reg = tryParseRegister();
2818 return Error(BaseRegTok.getLoc(), "register expected");
2820 // If there is an offset expression, parse it.
2821 const MCExpr *OffsetExpr = 0;
2823 if (Parser.getTok().is(AsmToken::Comma)) {
2824 Parser.Lex(); // Eat the comma.
2825 OffsetLoc = getLoc();
2828 const AsmToken &OffsetRegTok = Parser.getTok();
2829 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2831 // Default shift is LSL, with an omitted shift. We use the third bit of
2832 // the extend value to indicate presence/omission of the immediate offset.
2833 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
2834 int64_t ShiftVal = 0;
2835 bool ExplicitShift = false;
2837 if (Parser.getTok().is(AsmToken::Comma)) {
2838 // Embedded extend operand.
2839 Parser.Lex(); // Eat the comma
2841 SMLoc ExtLoc = getLoc();
2842 const AsmToken &Tok = Parser.getTok();
2843 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2844 .Case("uxtw", ARM64_AM::UXTW)
2845 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2846 .Case("sxtw", ARM64_AM::SXTW)
2847 .Case("sxtx", ARM64_AM::SXTX)
2848 .Case("UXTW", ARM64_AM::UXTW)
2849 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2850 .Case("SXTW", ARM64_AM::SXTW)
2851 .Case("SXTX", ARM64_AM::SXTX)
2852 .Default(ARM64_AM::InvalidExtend);
2853 if (ExtOp == ARM64_AM::InvalidExtend)
2854 return Error(ExtLoc, "expected valid extend operation");
2856 Parser.Lex(); // Eat the extend op.
2858 // A 32-bit offset register is only valid for [SU]/XTW extend
2860 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
2861 if (ExtOp != ARM64_AM::UXTW &&
2862 ExtOp != ARM64_AM::SXTW)
2863 return Error(ExtLoc, "32-bit general purpose offset register "
2864 "requires sxtw or uxtw extend");
2865 } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
2867 return Error(OffsetLoc,
2868 "64-bit general purpose offset register expected");
2870 bool Hash = getLexer().is(AsmToken::Hash);
2871 if (getLexer().is(AsmToken::RBrac)) {
2872 // No immediate operand.
2873 if (ExtOp == ARM64_AM::UXTX)
2874 return Error(ExtLoc, "LSL extend requires immediate operand");
2875 } else if (Hash || getLexer().is(AsmToken::Integer)) {
2876 // Immediate operand.
2878 Parser.Lex(); // Eat the '#'
2879 const MCExpr *ImmVal;
2880 SMLoc ExprLoc = getLoc();
2881 if (getParser().parseExpression(ImmVal))
2883 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2885 return TokError("immediate value expected for extend operand");
2887 ExplicitShift = true;
2888 ShiftVal = MCE->getValue();
2889 if (ShiftVal < 0 || ShiftVal > 4)
2890 return Error(ExprLoc, "immediate operand out of range");
2892 return Error(getLoc(), "expected immediate operand");
2895 if (Parser.getTok().isNot(AsmToken::RBrac))
2896 return Error(getLoc(), "']' expected");
2898 Parser.Lex(); // Eat right bracket token.
2901 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
2902 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
2905 // Immediate expressions.
2906 } else if (Parser.getTok().is(AsmToken::Hash) ||
2907 Parser.getTok().is(AsmToken::Integer)) {
2908 if (Parser.getTok().is(AsmToken::Hash))
2909 Parser.Lex(); // Eat hash token.
2911 if (parseSymbolicImmVal(OffsetExpr))
2914 // FIXME: We really should make sure that we're dealing with a LDR/STR
2915 // instruction that can legally have a symbolic expression here.
2916 // Symbol reference.
2917 if (Parser.getTok().isNot(AsmToken::Identifier) &&
2918 Parser.getTok().isNot(AsmToken::String))
2919 return Error(getLoc(), "identifier or immediate expression expected");
2920 if (getParser().parseExpression(OffsetExpr))
2922 // If this is a plain ref, Make sure a legal variant kind was specified.
2923 // Otherwise, it's a more complicated expression and we have to just
2924 // assume it's OK and let the relocation stuff puke if it's not.
2925 ARM64MCExpr::VariantKind ELFRefKind;
2926 MCSymbolRefExpr::VariantKind DarwinRefKind;
2928 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
2930 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
2931 "ELF symbol modifiers not supported here yet");
2933 switch (DarwinRefKind) {
2935 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
2936 case MCSymbolRefExpr::VK_GOTPAGEOFF:
2937 case MCSymbolRefExpr::VK_PAGEOFF:
2938 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
2939 // These are what we're expecting.
2947 if (Parser.getTok().isNot(AsmToken::RBrac))
2948 return Error(E, "']' expected");
2950 Parser.Lex(); // Eat right bracket token.
2952 // Create the memory operand.
2954 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
2956 // Check for a '!', indicating pre-indexed addressing with writeback.
2957 if (Parser.getTok().is(AsmToken::Exclaim)) {
2958 // There needs to have been an immediate or wback doesn't make sense.
2960 return Error(E, "missing offset for pre-indexed addressing");
2961 // Pre-indexed with writeback must have a constant expression for the
2962 // offset. FIXME: Theoretically, we'd like to allow fixups so long
2963 // as they don't require a relocation.
2964 if (!isa<MCConstantExpr>(OffsetExpr))
2965 return Error(OffsetLoc, "constant immediate expression expected");
2967 // Create the Token operand for the '!'.
2968 Operands.push_back(ARM64Operand::CreateToken(
2969 "!", false, Parser.getTok().getLoc(), getContext()));
2970 Parser.Lex(); // Eat the '!' token.
2976 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2977 bool HasELFModifier = false;
2978 ARM64MCExpr::VariantKind RefKind;
2980 if (Parser.getTok().is(AsmToken::Colon)) {
2981 Parser.Lex(); // Eat ':"
2982 HasELFModifier = true;
2984 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2985 Error(Parser.getTok().getLoc(),
2986 "expect relocation specifier in operand after ':'");
2990 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2991 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
2992 .Case("lo12", ARM64MCExpr::VK_LO12)
2993 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
2994 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
2995 .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
2996 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
2997 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
2998 .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
2999 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3000 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3001 .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
3002 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3003 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3004 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3005 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3006 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3007 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3008 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3009 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3010 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3011 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3012 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3013 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3014 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3015 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3016 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3017 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3018 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3019 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3020 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3021 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3022 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3023 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3024 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3025 .Default(ARM64MCExpr::VK_INVALID);
3027 if (RefKind == ARM64MCExpr::VK_INVALID) {
3028 Error(Parser.getTok().getLoc(),
3029 "expect relocation specifier in operand after ':'");
3033 Parser.Lex(); // Eat identifier
3035 if (Parser.getTok().isNot(AsmToken::Colon)) {
3036 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3039 Parser.Lex(); // Eat ':'
3042 if (getParser().parseExpression(ImmVal))
3046 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3051 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3052 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3053 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3055 Parser.Lex(); // Eat left bracket token.
3057 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3060 int64_t PrevReg = FirstReg;
3063 if (Parser.getTok().is(AsmToken::Minus)) {
3064 Parser.Lex(); // Eat the minus.
3066 SMLoc Loc = getLoc();
3068 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3071 // Any Kind suffices must match on all regs in the list.
3072 if (Kind != NextKind)
3073 return Error(Loc, "mismatched register size suffix");
3075 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3077 if (Space == 0 || Space > 3) {
3078 return Error(Loc, "invalid number of vectors");
3084 while (Parser.getTok().is(AsmToken::Comma)) {
3085 Parser.Lex(); // Eat the comma token.
3087 SMLoc Loc = getLoc();
3089 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3092 // Any Kind suffices must match on all regs in the list.
3093 if (Kind != NextKind)
3094 return Error(Loc, "mismatched register size suffix");
3096 // Registers must be incremental (with wraparound at 31)
3097 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3098 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3099 return Error(Loc, "registers must be sequential");
3106 if (Parser.getTok().is(AsmToken::EndOfStatement))
3107 Error(getLoc(), "'}' expected");
3108 Parser.Lex(); // Eat the '}' token.
3110 unsigned NumElements = 0;
3111 char ElementKind = 0;
3113 parseValidVectorKind(Kind, NumElements, ElementKind);
3115 Operands.push_back(ARM64Operand::CreateVectorList(
3116 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3118 // If there is an index specifier following the list, parse that too.
3119 if (Parser.getTok().is(AsmToken::LBrac)) {
3120 SMLoc SIdx = getLoc();
3121 Parser.Lex(); // Eat left bracket token.
3123 const MCExpr *ImmVal;
3124 if (getParser().parseExpression(ImmVal))
3126 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3128 TokError("immediate value expected for vector index");
3133 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3134 Error(E, "']' expected");
3138 Parser.Lex(); // Eat right bracket token.
3140 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3146 /// parseOperand - Parse a arm instruction operand. For now this parses the
3147 /// operand regardless of the mnemonic.
3148 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3149 bool invertCondCode) {
3150 // Check if the current operand has a custom associated parser, if so, try to
3151 // custom parse the operand, or fallback to the general approach.
3152 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3153 if (ResTy == MatchOperand_Success)
3155 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3156 // there was a match, but an error occurred, in which case, just return that
3157 // the operand parsing failed.
3158 if (ResTy == MatchOperand_ParseFail)
3161 // Nothing custom, so do general case parsing.
3163 switch (getLexer().getKind()) {
3167 if (parseSymbolicImmVal(Expr))
3168 return Error(S, "invalid operand");
3170 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3171 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3174 case AsmToken::LBrac:
3175 return parseMemory(Operands);
3176 case AsmToken::LCurly:
3177 return parseVectorList(Operands);
3178 case AsmToken::Identifier: {
3179 // If we're expecting a Condition Code operand, then just parse that.
3181 return parseCondCode(Operands, invertCondCode);
3183 // If it's a register name, parse it.
3184 if (!parseRegister(Operands))
3187 // This could be an optional "shift" operand.
3188 if (!parseOptionalShift(Operands))
3191 // Or maybe it could be an optional "extend" operand.
3192 if (!parseOptionalExtend(Operands))
3195 // This was not a register so parse other operands that start with an
3196 // identifier (like labels) as expressions and create them as immediates.
3197 const MCExpr *IdVal;
3199 if (getParser().parseExpression(IdVal))
3202 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3203 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3206 case AsmToken::Integer:
3207 case AsmToken::Real:
3208 case AsmToken::Hash: {
3209 // #42 -> immediate.
3211 if (getLexer().is(AsmToken::Hash))
3214 // The only Real that should come through here is a literal #0.0 for
3215 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3216 // so convert the value.
3217 const AsmToken &Tok = Parser.getTok();
3218 if (Tok.is(AsmToken::Real)) {
3219 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3220 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3221 if (IntVal != 0 || (Mnemonic != "fcmp" && Mnemonic != "fcmpe"))
3222 return TokError("unexpected floating point literal");
3223 Parser.Lex(); // Eat the token.
3226 ARM64Operand::CreateToken("#0", false, S, getContext()));
3228 ARM64Operand::CreateToken(".0", false, S, getContext()));
3232 const MCExpr *ImmVal;
3233 if (parseSymbolicImmVal(ImmVal))
3236 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3237 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3243 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3245 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3246 StringRef Name, SMLoc NameLoc,
3247 OperandVector &Operands) {
3248 // Create the leading tokens for the mnemonic, split by '.' characters.
3249 size_t Start = 0, Next = Name.find('.');
3250 StringRef Head = Name.slice(Start, Next);
3252 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3253 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3254 return parseSysAlias(Head, NameLoc, Operands);
3257 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3260 // Handle condition codes for a branch mnemonic
3261 if (Head == "b" && Next != StringRef::npos) {
3263 Next = Name.find('.', Start + 1);
3264 Head = Name.slice(Start + 1, Next);
3266 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3267 (Head.data() - Name.data()));
3268 unsigned CC = parseCondCodeString(Head);
3269 if (CC == ARM64CC::Invalid)
3270 return Error(SuffixLoc, "invalid condition code");
3271 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3273 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3276 // Add the remaining tokens in the mnemonic.
3277 while (Next != StringRef::npos) {
3279 Next = Name.find('.', Start + 1);
3280 Head = Name.slice(Start, Next);
3281 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3282 (Head.data() - Name.data()) + 1);
3284 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3287 // Conditional compare instructions have a Condition Code operand, which needs
3288 // to be parsed and an immediate operand created.
3289 bool condCodeFourthOperand =
3290 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3291 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3292 Head == "csinc" || Head == "csinv" || Head == "csneg");
3294 // These instructions are aliases to some of the conditional select
3295 // instructions. However, the condition code is inverted in the aliased
3298 // FIXME: Is this the correct way to handle these? Or should the parser
3299 // generate the aliased instructions directly?
3300 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3301 bool condCodeThirdOperand =
3302 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3304 // Read the remaining operands.
3305 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3306 // Read the first operand.
3307 if (parseOperand(Operands, false, false)) {
3308 Parser.eatToEndOfStatement();
3313 while (getLexer().is(AsmToken::Comma)) {
3314 Parser.Lex(); // Eat the comma.
3316 // Parse and remember the operand.
3317 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3318 (N == 3 && condCodeThirdOperand) ||
3319 (N == 2 && condCodeSecondOperand),
3320 condCodeSecondOperand || condCodeThirdOperand)) {
3321 Parser.eatToEndOfStatement();
3329 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3330 SMLoc Loc = Parser.getTok().getLoc();
3331 Parser.eatToEndOfStatement();
3332 return Error(Loc, "unexpected token in argument list");
3335 Parser.Lex(); // Consume the EndOfStatement
3339 // FIXME: This entire function is a giant hack to provide us with decent
3340 // operand range validation/diagnostics until TableGen/MC can be extended
3341 // to support autogeneration of this kind of validation.
3342 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3343 SmallVectorImpl<SMLoc> &Loc) {
3344 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3345 // Check for indexed addressing modes w/ the base register being the
3346 // same as a destination/source register or pair load where
3347 // the Rt == Rt2. All of those are undefined behaviour.
3348 switch (Inst.getOpcode()) {
3349 case ARM64::LDPSWpre:
3350 case ARM64::LDPWpost:
3351 case ARM64::LDPWpre:
3352 case ARM64::LDPXpost:
3353 case ARM64::LDPXpre: {
3354 unsigned Rt = Inst.getOperand(0).getReg();
3355 unsigned Rt2 = Inst.getOperand(1).getReg();
3356 unsigned Rn = Inst.getOperand(2).getReg();
3357 if (RI->isSubRegisterEq(Rn, Rt))
3358 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3359 "is also a destination");
3360 if (RI->isSubRegisterEq(Rn, Rt2))
3361 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3362 "is also a destination");
3365 case ARM64::LDPDpost:
3366 case ARM64::LDPDpre:
3367 case ARM64::LDPQpost:
3368 case ARM64::LDPQpre:
3369 case ARM64::LDPSpost:
3370 case ARM64::LDPSpre:
3371 case ARM64::LDPSWpost:
3377 case ARM64::LDPXi: {
3378 unsigned Rt = Inst.getOperand(0).getReg();
3379 unsigned Rt2 = Inst.getOperand(1).getReg();
3381 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3384 case ARM64::STPDpost:
3385 case ARM64::STPDpre:
3386 case ARM64::STPQpost:
3387 case ARM64::STPQpre:
3388 case ARM64::STPSpost:
3389 case ARM64::STPSpre:
3390 case ARM64::STPWpost:
3391 case ARM64::STPWpre:
3392 case ARM64::STPXpost:
3393 case ARM64::STPXpre: {
3394 unsigned Rt = Inst.getOperand(0).getReg();
3395 unsigned Rt2 = Inst.getOperand(1).getReg();
3396 unsigned Rn = Inst.getOperand(2).getReg();
3397 if (RI->isSubRegisterEq(Rn, Rt))
3398 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3399 "is also a source");
3400 if (RI->isSubRegisterEq(Rn, Rt2))
3401 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3402 "is also a source");
3405 case ARM64::LDRBBpre:
3406 case ARM64::LDRBpre:
3407 case ARM64::LDRHHpre:
3408 case ARM64::LDRHpre:
3409 case ARM64::LDRSBWpre:
3410 case ARM64::LDRSBXpre:
3411 case ARM64::LDRSHWpre:
3412 case ARM64::LDRSHXpre:
3413 case ARM64::LDRSWpre:
3414 case ARM64::LDRWpre:
3415 case ARM64::LDRXpre:
3416 case ARM64::LDRBBpost:
3417 case ARM64::LDRBpost:
3418 case ARM64::LDRHHpost:
3419 case ARM64::LDRHpost:
3420 case ARM64::LDRSBWpost:
3421 case ARM64::LDRSBXpost:
3422 case ARM64::LDRSHWpost:
3423 case ARM64::LDRSHXpost:
3424 case ARM64::LDRSWpost:
3425 case ARM64::LDRWpost:
3426 case ARM64::LDRXpost: {
3427 unsigned Rt = Inst.getOperand(0).getReg();
3428 unsigned Rn = Inst.getOperand(1).getReg();
3429 if (RI->isSubRegisterEq(Rn, Rt))
3430 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3431 "is also a source");
3434 case ARM64::STRBBpost:
3435 case ARM64::STRBpost:
3436 case ARM64::STRHHpost:
3437 case ARM64::STRHpost:
3438 case ARM64::STRWpost:
3439 case ARM64::STRXpost:
3440 case ARM64::STRBBpre:
3441 case ARM64::STRBpre:
3442 case ARM64::STRHHpre:
3443 case ARM64::STRHpre:
3444 case ARM64::STRWpre:
3445 case ARM64::STRXpre: {
3446 unsigned Rt = Inst.getOperand(0).getReg();
3447 unsigned Rn = Inst.getOperand(1).getReg();
3448 if (RI->isSubRegisterEq(Rn, Rt))
3449 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3450 "is also a source");
3455 // Now check immediate ranges. Separate from the above as there is overlap
3456 // in the instructions being checked and this keeps the nested conditionals
3458 switch (Inst.getOpcode()) {
3460 case ARM64::ANDSWrs:
3462 case ARM64::ORRWrs: {
3463 if (!Inst.getOperand(3).isImm())
3464 return Error(Loc[3], "immediate value expected");
3465 int64_t shifter = Inst.getOperand(3).getImm();
3466 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3467 if (ST == ARM64_AM::LSL && shifter > 31)
3468 return Error(Loc[3], "shift value out of range");
3471 case ARM64::ADDSWri:
3472 case ARM64::ADDSXri:
3475 case ARM64::SUBSWri:
3476 case ARM64::SUBSXri:
3478 case ARM64::SUBXri: {
3479 if (!Inst.getOperand(3).isImm())
3480 return Error(Loc[3], "immediate value expected");
3481 int64_t shifter = Inst.getOperand(3).getImm();
3482 if (shifter != 0 && shifter != 12)
3483 return Error(Loc[3], "shift value out of range");
3484 // The imm12 operand can be an expression. Validate that it's legit.
3485 // FIXME: We really, really want to allow arbitrary expressions here
3486 // and resolve the value and validate the result at fixup time, but
3487 // that's hard as we have long since lost any source information we
3488 // need to generate good diagnostics by that point.
3489 if (Inst.getOpcode() == ARM64::ADDXri && Inst.getOperand(2).isExpr()) {
3490 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3491 ARM64MCExpr::VariantKind ELFRefKind;
3492 MCSymbolRefExpr::VariantKind DarwinRefKind;
3494 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3495 return Error(Loc[2], "invalid immediate expression");
3498 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3499 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
3500 ELFRefKind == ARM64MCExpr::VK_LO12 ||
3501 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3502 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3503 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3504 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3505 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3506 // Note that we don't range-check the addend. It's adjusted
3507 // modulo page size when converted, so there is no "out of range"
3508 // condition when using @pageoff. Any validity checking for the value
3509 // was done in the is*() predicate function.
3511 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3512 // @gotpageoff can only be used directly, not with an addend.
3516 // Otherwise, we're not sure, so don't allow it for now.
3517 return Error(Loc[2], "invalid immediate expression");
3520 // If it's anything but an immediate, it's not legit.
3521 if (!Inst.getOperand(2).isImm())
3522 return Error(Loc[2], "invalid immediate expression");
3523 int64_t imm = Inst.getOperand(2).getImm();
3524 if (imm > 4095 || imm < 0)
3525 return Error(Loc[2], "immediate value out of range");
3528 case ARM64::LDRBpre:
3529 case ARM64::LDRHpre:
3530 case ARM64::LDRSBWpre:
3531 case ARM64::LDRSBXpre:
3532 case ARM64::LDRSHWpre:
3533 case ARM64::LDRSHXpre:
3534 case ARM64::LDRWpre:
3535 case ARM64::LDRXpre:
3536 case ARM64::LDRSpre:
3537 case ARM64::LDRDpre:
3538 case ARM64::LDRQpre:
3539 case ARM64::STRBpre:
3540 case ARM64::STRHpre:
3541 case ARM64::STRWpre:
3542 case ARM64::STRXpre:
3543 case ARM64::STRSpre:
3544 case ARM64::STRDpre:
3545 case ARM64::STRQpre:
3546 case ARM64::LDRBpost:
3547 case ARM64::LDRHpost:
3548 case ARM64::LDRSBWpost:
3549 case ARM64::LDRSBXpost:
3550 case ARM64::LDRSHWpost:
3551 case ARM64::LDRSHXpost:
3552 case ARM64::LDRWpost:
3553 case ARM64::LDRXpost:
3554 case ARM64::LDRSpost:
3555 case ARM64::LDRDpost:
3556 case ARM64::LDRQpost:
3557 case ARM64::STRBpost:
3558 case ARM64::STRHpost:
3559 case ARM64::STRWpost:
3560 case ARM64::STRXpost:
3561 case ARM64::STRSpost:
3562 case ARM64::STRDpost:
3563 case ARM64::STRQpost:
3568 case ARM64::LDTRSHWi:
3569 case ARM64::LDTRSHXi:
3570 case ARM64::LDTRSBWi:
3571 case ARM64::LDTRSBXi:
3572 case ARM64::LDTRSWi:
3584 case ARM64::LDURSHWi:
3585 case ARM64::LDURSHXi:
3586 case ARM64::LDURSBWi:
3587 case ARM64::LDURSBXi:
3588 case ARM64::LDURSWi:
3596 case ARM64::STURBi: {
3597 // FIXME: Should accept expressions and error in fixup evaluation
3599 if (!Inst.getOperand(2).isImm())
3600 return Error(Loc[1], "immediate value expected");
3601 int64_t offset = Inst.getOperand(2).getImm();
3602 if (offset > 255 || offset < -256)
3603 return Error(Loc[1], "offset value out of range");
3608 case ARM64::LDRSWro:
3610 case ARM64::STRSro: {
3611 // FIXME: Should accept expressions and error in fixup evaluation
3613 if (!Inst.getOperand(3).isImm())
3614 return Error(Loc[1], "immediate value expected");
3615 int64_t shift = Inst.getOperand(3).getImm();
3616 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3617 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3618 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3619 return Error(Loc[1], "shift type invalid");
3628 case ARM64::STRQro: {
3629 // FIXME: Should accept expressions and error in fixup evaluation
3631 if (!Inst.getOperand(3).isImm())
3632 return Error(Loc[1], "immediate value expected");
3633 int64_t shift = Inst.getOperand(3).getImm();
3634 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3635 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3636 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3637 return Error(Loc[1], "shift type invalid");
3641 case ARM64::LDRHHro:
3642 case ARM64::LDRSHWro:
3643 case ARM64::LDRSHXro:
3645 case ARM64::STRHHro: {
3646 // FIXME: Should accept expressions and error in fixup evaluation
3648 if (!Inst.getOperand(3).isImm())
3649 return Error(Loc[1], "immediate value expected");
3650 int64_t shift = Inst.getOperand(3).getImm();
3651 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3652 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3653 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3654 return Error(Loc[1], "shift type invalid");
3658 case ARM64::LDRBBro:
3659 case ARM64::LDRSBWro:
3660 case ARM64::LDRSBXro:
3662 case ARM64::STRBBro: {
3663 // FIXME: Should accept expressions and error in fixup evaluation
3665 if (!Inst.getOperand(3).isImm())
3666 return Error(Loc[1], "immediate value expected");
3667 int64_t shift = Inst.getOperand(3).getImm();
3668 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3669 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3670 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3671 return Error(Loc[1], "shift type invalid");
3685 case ARM64::LDPWpre:
3686 case ARM64::LDPXpre:
3687 case ARM64::LDPSpre:
3688 case ARM64::LDPDpre:
3689 case ARM64::LDPQpre:
3690 case ARM64::LDPSWpre:
3691 case ARM64::STPWpre:
3692 case ARM64::STPXpre:
3693 case ARM64::STPSpre:
3694 case ARM64::STPDpre:
3695 case ARM64::STPQpre:
3696 case ARM64::LDPWpost:
3697 case ARM64::LDPXpost:
3698 case ARM64::LDPSpost:
3699 case ARM64::LDPDpost:
3700 case ARM64::LDPQpost:
3701 case ARM64::LDPSWpost:
3702 case ARM64::STPWpost:
3703 case ARM64::STPXpost:
3704 case ARM64::STPSpost:
3705 case ARM64::STPDpost:
3706 case ARM64::STPQpost:
3716 case ARM64::STNPQi: {
3717 // FIXME: Should accept expressions and error in fixup evaluation
3719 if (!Inst.getOperand(3).isImm())
3720 return Error(Loc[2], "immediate value expected");
3721 int64_t offset = Inst.getOperand(3).getImm();
3722 if (offset > 63 || offset < -64)
3723 return Error(Loc[2], "offset value out of range");
3731 static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
3732 StringRef mnemonic, uint64_t imm, unsigned shift,
3733 MCContext &Context) {
3734 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3735 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3737 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
3739 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
3740 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
3741 Op2->getEndLoc(), Context);
3743 Operands.push_back(ARM64Operand::CreateShifter(
3744 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3749 static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
3750 MCContext &Context) {
3751 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3752 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3754 ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
3756 const MCExpr *Imm = MCConstantExpr::Create(0, Context);
3757 Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
3758 Op2->getEndLoc(), Context));
3759 Operands.push_back(ARM64Operand::CreateShifter(
3760 ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3765 static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
3766 MCContext &Context) {
3767 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3768 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3770 ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
3772 // Operands[2] becomes Operands[3].
3773 Operands.push_back(Operands[2]);
3774 // And Operands[2] becomes ZR.
3775 unsigned ZeroReg = ARM64::XZR;
3776 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3777 Operands[2]->getReg()))
3778 ZeroReg = ARM64::WZR;
3781 ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
3782 Op2->getEndLoc(), Context);
3787 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3789 case Match_MissingFeature:
3791 "instruction requires a CPU feature not currently enabled");
3792 case Match_InvalidOperand:
3793 return Error(Loc, "invalid operand for instruction");
3794 case Match_InvalidSuffix:
3795 return Error(Loc, "invalid type suffix for instruction");
3796 case Match_InvalidMemoryIndexedSImm9:
3797 return Error(Loc, "index must be an integer in range [-256,255].");
3798 case Match_InvalidMemoryIndexed32SImm7:
3799 return Error(Loc, "index must be a multiple of 4 in range [-256,252].");
3800 case Match_InvalidMemoryIndexed64SImm7:
3801 return Error(Loc, "index must be a multiple of 8 in range [-512,504].");
3802 case Match_InvalidMemoryIndexed128SImm7:
3803 return Error(Loc, "index must be a multiple of 16 in range [-1024,1008].");
3804 case Match_InvalidMemoryIndexed8:
3805 return Error(Loc, "index must be an integer in range [0,4095].");
3806 case Match_InvalidMemoryIndexed16:
3807 return Error(Loc, "index must be a multiple of 2 in range [0,8190].");
3808 case Match_InvalidMemoryIndexed32:
3809 return Error(Loc, "index must be a multiple of 4 in range [0,16380].");
3810 case Match_InvalidMemoryIndexed64:
3811 return Error(Loc, "index must be a multiple of 8 in range [0,32760].");
3812 case Match_InvalidMemoryIndexed128:
3813 return Error(Loc, "index must be a multiple of 16 in range [0,65520].");
3814 case Match_InvalidImm1_8:
3815 return Error(Loc, "immediate must be an integer in range [1,8].");
3816 case Match_InvalidImm1_16:
3817 return Error(Loc, "immediate must be an integer in range [1,16].");
3818 case Match_InvalidImm1_32:
3819 return Error(Loc, "immediate must be an integer in range [1,32].");
3820 case Match_InvalidImm1_64:
3821 return Error(Loc, "immediate must be an integer in range [1,64].");
3822 case Match_InvalidLabel:
3823 return Error(Loc, "expected label or encodable integer pc offset");
3824 case Match_MnemonicFail:
3825 return Error(Loc, "unrecognized instruction mnemonic");
3827 assert(0 && "unexpected error code!");
3828 return Error(Loc, "invalid instruction format");
3832 static const char *getSubtargetFeatureName(unsigned Val);
3834 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3835 OperandVector &Operands,
3837 unsigned &ErrorInfo,
3838 bool MatchingInlineAsm) {
3839 assert(!Operands.empty() && "Unexpect empty operand list!");
3840 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3841 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3843 StringRef Tok = Op->getToken();
3844 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
3845 // This needs to be done before the special handling of ADD/SUB immediates.
3846 if (Tok == "cmp" || Tok == "cmn") {
3847 // Replace the opcode with either ADDS or SUBS.
3848 const char *Repl = StringSwitch<const char *>(Tok)
3849 .Case("cmp", "subs")
3850 .Case("cmn", "adds")
3852 assert(Repl && "Unknown compare instruction");
3854 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
3856 // Insert WZR or XZR as destination operand.
3857 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3859 if (RegOp->isReg() &&
3860 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3862 ZeroReg = ARM64::WZR;
3864 ZeroReg = ARM64::XZR;
3866 Operands.begin() + 1,
3867 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
3868 // Update since we modified it above.
3869 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3870 Tok = Op->getToken();
3873 unsigned NumOperands = Operands.size();
3875 if (Tok == "mov" && NumOperands == 3) {
3876 // The MOV mnemomic is aliased to movn/movz, depending on the value of
3877 // the immediate being instantiated.
3878 // FIXME: Catching this here is a total hack, and we should use tblgen
3879 // support to implement this instead as soon as it is available.
3881 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3882 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3884 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
3885 uint64_t Val = CE->getValue();
3886 uint64_t NVal = ~Val;
3888 // If this is a 32-bit register and the value has none of the upper
3889 // set, clear the complemented upper 32-bits so the logic below works
3890 // for 32-bit registers too.
3891 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3893 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3895 (Val & 0xFFFFFFFFULL) == Val)
3896 NVal &= 0x00000000FFFFFFFFULL;
3898 // MOVK Rd, imm << 0
3899 if ((Val & 0xFFFF) == Val)
3900 rewriteMOVI(Operands, "movz", Val, 0, getContext());
3902 // MOVK Rd, imm << 16
3903 else if ((Val & 0xFFFF0000ULL) == Val)
3904 rewriteMOVI(Operands, "movz", Val, 16, getContext());
3906 // MOVK Rd, imm << 32
3907 else if ((Val & 0xFFFF00000000ULL) == Val)
3908 rewriteMOVI(Operands, "movz", Val, 32, getContext());
3910 // MOVK Rd, imm << 48
3911 else if ((Val & 0xFFFF000000000000ULL) == Val)
3912 rewriteMOVI(Operands, "movz", Val, 48, getContext());
3914 // MOVN Rd, (~imm << 0)
3915 else if ((NVal & 0xFFFFULL) == NVal)
3916 rewriteMOVI(Operands, "movn", NVal, 0, getContext());
3918 // MOVN Rd, ~(imm << 16)
3919 else if ((NVal & 0xFFFF0000ULL) == NVal)
3920 rewriteMOVI(Operands, "movn", NVal, 16, getContext());
3922 // MOVN Rd, ~(imm << 32)
3923 else if ((NVal & 0xFFFF00000000ULL) == NVal)
3924 rewriteMOVI(Operands, "movn", NVal, 32, getContext());
3926 // MOVN Rd, ~(imm << 48)
3927 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
3928 rewriteMOVI(Operands, "movn", NVal, 48, getContext());
3930 } else if (Op1->isReg() && Op2->isReg()) {
3932 unsigned Reg1 = Op1->getReg();
3933 unsigned Reg2 = Op2->getReg();
3934 if ((Reg1 == ARM64::SP &&
3935 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg2)) ||
3936 (Reg2 == ARM64::SP &&
3937 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg1)) ||
3938 (Reg1 == ARM64::WSP &&
3939 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) ||
3940 (Reg2 == ARM64::WSP &&
3941 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg1)))
3942 rewriteMOVRSP(Operands, getContext());
3944 rewriteMOVR(Operands, getContext());
3946 } else if (NumOperands == 4) {
3947 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
3948 // Handle the uimm24 immediate form, where the shift is not specified.
3949 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3951 if (const MCConstantExpr *CE =
3952 dyn_cast<MCConstantExpr>(Op3->getImm())) {
3953 uint64_t Val = CE->getValue();
3954 if (Val >= (1 << 24)) {
3955 Error(IDLoc, "immediate value is too large");
3958 if (Val < (1 << 12)) {
3959 Operands.push_back(ARM64Operand::CreateShifter(
3960 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3961 } else if ((Val & 0xfff) == 0) {
3963 CE = MCConstantExpr::Create(Val >> 12, getContext());
3965 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
3966 Operands.push_back(ARM64Operand::CreateShifter(
3967 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
3969 Error(IDLoc, "immediate value is too large");
3973 Operands.push_back(ARM64Operand::CreateShifter(
3974 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3978 // FIXME: Horible hack to handle the LSL -> UBFM alias.
3979 } else if (NumOperands == 4 && Tok == "lsl") {
3980 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3981 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3982 if (Op2->isReg() && Op3->isImm()) {
3983 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3985 uint64_t Op3Val = Op3CE->getValue();
3986 uint64_t NewOp3Val = 0;
3987 uint64_t NewOp4Val = 0;
3988 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3990 NewOp3Val = (32 - Op3Val) & 0x1f;
3991 NewOp4Val = 31 - Op3Val;
3993 NewOp3Val = (64 - Op3Val) & 0x3f;
3994 NewOp4Val = 63 - Op3Val;
3997 const MCExpr *NewOp3 =
3998 MCConstantExpr::Create(NewOp3Val, getContext());
3999 const MCExpr *NewOp4 =
4000 MCConstantExpr::Create(NewOp4Val, getContext());
4002 Operands[0] = ARM64Operand::CreateToken(
4003 "ubfm", false, Op->getStartLoc(), getContext());
4004 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4005 Op3->getEndLoc(), getContext());
4006 Operands.push_back(ARM64Operand::CreateImm(
4007 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
4013 // FIXME: Horrible hack to handle the optional LSL shift for vector
4015 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
4016 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4017 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4018 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4019 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4020 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
4021 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
4022 IDLoc, getContext()));
4023 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
4024 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4025 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4026 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4027 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4028 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
4029 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
4030 // Canonicalize on lower-case for ease of comparison.
4031 std::string CanonicalSuffix = Suffix.lower();
4032 if (Tok != "movi" ||
4033 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
4034 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
4035 Operands.push_back(ARM64Operand::CreateShifter(
4036 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4039 } else if (NumOperands == 5) {
4040 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4041 // UBFIZ -> UBFM aliases.
4042 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4043 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4044 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4045 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4047 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4048 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4049 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4051 if (Op3CE && Op4CE) {
4052 uint64_t Op3Val = Op3CE->getValue();
4053 uint64_t Op4Val = Op4CE->getValue();
4055 uint64_t NewOp3Val = 0;
4056 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
4058 NewOp3Val = (32 - Op3Val) & 0x1f;
4060 NewOp3Val = (64 - Op3Val) & 0x3f;
4062 uint64_t NewOp4Val = Op4Val - 1;
4064 const MCExpr *NewOp3 =
4065 MCConstantExpr::Create(NewOp3Val, getContext());
4066 const MCExpr *NewOp4 =
4067 MCConstantExpr::Create(NewOp4Val, getContext());
4068 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4069 Op3->getEndLoc(), getContext());
4070 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
4071 Op4->getEndLoc(), getContext());
4073 Operands[0] = ARM64Operand::CreateToken(
4074 "bfm", false, Op->getStartLoc(), getContext());
4075 else if (Tok == "sbfiz")
4076 Operands[0] = ARM64Operand::CreateToken(
4077 "sbfm", false, Op->getStartLoc(), getContext());
4078 else if (Tok == "ubfiz")
4079 Operands[0] = ARM64Operand::CreateToken(
4080 "ubfm", false, Op->getStartLoc(), getContext());
4082 llvm_unreachable("No valid mnemonic for alias?");
4090 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4091 // UBFX -> UBFM aliases.
4092 } else if (NumOperands == 5 &&
4093 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4094 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4095 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4096 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4098 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4099 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4100 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4102 if (Op3CE && Op4CE) {
4103 uint64_t Op3Val = Op3CE->getValue();
4104 uint64_t Op4Val = Op4CE->getValue();
4105 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4107 if (NewOp4Val >= Op3Val) {
4108 const MCExpr *NewOp4 =
4109 MCConstantExpr::Create(NewOp4Val, getContext());
4110 Operands[4] = ARM64Operand::CreateImm(
4111 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4113 Operands[0] = ARM64Operand::CreateToken(
4114 "bfm", false, Op->getStartLoc(), getContext());
4115 else if (Tok == "sbfx")
4116 Operands[0] = ARM64Operand::CreateToken(
4117 "sbfm", false, Op->getStartLoc(), getContext());
4118 else if (Tok == "ubfx")
4119 Operands[0] = ARM64Operand::CreateToken(
4120 "ubfm", false, Op->getStartLoc(), getContext());
4122 llvm_unreachable("No valid mnemonic for alias?");
4131 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4132 // InstAlias can't quite handle this since the reg classes aren't
4134 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4135 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4137 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4138 if (OpCE->getValue() < 32) {
4139 // The source register can be Wn here, but the matcher expects a
4140 // GPR64. Twiddle it here if necessary.
4141 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4143 unsigned Reg = getXRegFromWReg(Op->getReg());
4144 Operands[1] = ARM64Operand::CreateReg(
4145 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4152 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4153 // InstAlias can't quite handle this since the reg classes aren't
4155 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4156 // The source register can be Wn here, but the matcher expects a
4157 // GPR64. Twiddle it here if necessary.
4158 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4160 unsigned Reg = getXRegFromWReg(Op->getReg());
4161 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4162 Op->getEndLoc(), getContext());
4166 // FIXME: Likewise for [su]xt[bh] with a Xd dst operand
4167 else if (NumOperands == 3 &&
4168 (Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
4169 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4171 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
4173 // The source register can be Wn here, but the matcher expects a
4174 // GPR64. Twiddle it here if necessary.
4175 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4177 unsigned Reg = getXRegFromWReg(Op->getReg());
4178 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4179 Op->getEndLoc(), getContext());
4185 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4186 if (NumOperands == 3 && Tok == "fmov") {
4187 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4188 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4189 if (RegOp->isReg() && ImmOp->isFPImm() &&
4190 ImmOp->getFPImm() == (unsigned)-1) {
4191 unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
4195 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4196 Op->getEndLoc(), getContext());
4201 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4202 // FMOV instructions. The index isn't an actual instruction operand
4203 // but rather syntactic sugar. It really should be part of the mnemonic,
4204 // not the operand, but whatever.
4205 if ((NumOperands == 5) && Tok == "fmov") {
4206 // If the last operand is a vectorindex of '1', then replace it with
4207 // a '[' '1' ']' token sequence, which is what the matcher
4208 // (annoyingly) expects for a literal vector index operand.
4209 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4210 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4211 SMLoc Loc = Op->getStartLoc();
4212 Operands.pop_back();
4215 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4217 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4219 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4220 } else if (Op->isReg()) {
4221 // Similarly, check the destination operand for the GPR->High-lane
4223 unsigned OpNo = NumOperands - 2;
4224 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4225 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4226 SMLoc Loc = Op->getStartLoc();
4228 ARM64Operand::CreateToken("[", false, Loc, getContext());
4230 Operands.begin() + OpNo + 1,
4231 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4233 Operands.begin() + OpNo + 2,
4234 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4241 // First try to match against the secondary set of tables containing the
4242 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4243 unsigned MatchResult =
4244 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4246 // If that fails, try against the alternate table containing long-form NEON:
4247 // "fadd v0.2s, v1.2s, v2.2s"
4248 if (MatchResult != Match_Success)
4250 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4252 switch (MatchResult) {
4253 case Match_Success: {
4254 // Perform range checking and other semantic validations
4255 SmallVector<SMLoc, 8> OperandLocs;
4256 NumOperands = Operands.size();
4257 for (unsigned i = 1; i < NumOperands; ++i)
4258 OperandLocs.push_back(Operands[i]->getStartLoc());
4259 if (validateInstruction(Inst, OperandLocs))
4263 Out.EmitInstruction(Inst, STI);
4266 case Match_MissingFeature: {
4267 assert(ErrorInfo && "Unknown missing feature!");
4268 // Special case the error message for the very common case where only
4269 // a single subtarget feature is missing (neon, e.g.).
4270 std::string Msg = "instruction requires:";
4272 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4273 if (ErrorInfo & Mask) {
4275 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4279 return Error(IDLoc, Msg);
4281 case Match_MnemonicFail:
4282 return showMatchError(IDLoc, MatchResult);
4283 case Match_InvalidOperand: {
4284 SMLoc ErrorLoc = IDLoc;
4285 if (ErrorInfo != ~0U) {
4286 if (ErrorInfo >= Operands.size())
4287 return Error(IDLoc, "too few operands for instruction");
4289 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4290 if (ErrorLoc == SMLoc())
4293 // If the match failed on a suffix token operand, tweak the diagnostic
4295 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4296 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4297 MatchResult = Match_InvalidSuffix;
4299 return showMatchError(ErrorLoc, MatchResult);
4301 case Match_InvalidMemoryIndexedSImm9: {
4302 // If there is not a '!' after the memory operand that failed, we really
4303 // want the diagnostic for the non-pre-indexed instruction variant instead.
4304 // Be careful to check for the post-indexed variant as well, which also
4305 // uses this match diagnostic. Also exclude the explicitly unscaled
4306 // mnemonics, as they want the unscaled diagnostic as well.
4307 if (Operands.size() == ErrorInfo + 1 &&
4308 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4309 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4310 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4311 // the register class of the previous operand. Default to 64 in case
4312 // we see something unexpected.
4313 MatchResult = Match_InvalidMemoryIndexed64;
4315 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4316 if (PrevOp->isReg() &&
4317 ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
4319 MatchResult = Match_InvalidMemoryIndexed32;
4322 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4323 if (ErrorLoc == SMLoc())
4325 return showMatchError(ErrorLoc, MatchResult);
4327 case Match_InvalidMemoryIndexed32:
4328 case Match_InvalidMemoryIndexed64:
4329 case Match_InvalidMemoryIndexed128:
4330 // If there is a '!' after the memory operand that failed, we really
4331 // want the diagnostic for the pre-indexed instruction variant instead.
4332 if (Operands.size() > ErrorInfo + 1 &&
4333 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4334 MatchResult = Match_InvalidMemoryIndexedSImm9;
4336 case Match_InvalidMemoryIndexed8:
4337 case Match_InvalidMemoryIndexed16:
4338 case Match_InvalidMemoryIndexed32SImm7:
4339 case Match_InvalidMemoryIndexed64SImm7:
4340 case Match_InvalidMemoryIndexed128SImm7:
4341 case Match_InvalidImm1_8:
4342 case Match_InvalidImm1_16:
4343 case Match_InvalidImm1_32:
4344 case Match_InvalidImm1_64:
4345 case Match_InvalidLabel: {
4346 // Any time we get here, there's nothing fancy to do. Just get the
4347 // operand SMLoc and display the diagnostic.
4348 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4349 // If it's a memory operand, the error is with the offset immediate,
4350 // so get that location instead.
4351 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4352 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4353 if (ErrorLoc == SMLoc())
4355 return showMatchError(ErrorLoc, MatchResult);
4359 llvm_unreachable("Implement any new match types added!");
4363 /// ParseDirective parses the arm specific directives
4364 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4365 StringRef IDVal = DirectiveID.getIdentifier();
4366 SMLoc Loc = DirectiveID.getLoc();
4367 if (IDVal == ".hword")
4368 return parseDirectiveWord(2, Loc);
4369 if (IDVal == ".word")
4370 return parseDirectiveWord(4, Loc);
4371 if (IDVal == ".xword")
4372 return parseDirectiveWord(8, Loc);
4373 if (IDVal == ".tlsdesccall")
4374 return parseDirectiveTLSDescCall(Loc);
4376 return parseDirectiveLOH(IDVal, Loc);
4379 /// parseDirectiveWord
4380 /// ::= .word [ expression (, expression)* ]
4381 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4382 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4384 const MCExpr *Value;
4385 if (getParser().parseExpression(Value))
4388 getParser().getStreamer().EmitValue(Value, Size);
4390 if (getLexer().is(AsmToken::EndOfStatement))
4393 // FIXME: Improve diagnostic.
4394 if (getLexer().isNot(AsmToken::Comma))
4395 return Error(L, "unexpected token in directive");
4404 // parseDirectiveTLSDescCall:
4405 // ::= .tlsdesccall symbol
4406 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4408 if (getParser().parseIdentifier(Name))
4409 return Error(L, "expected symbol after directive");
4411 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4412 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4413 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4416 Inst.setOpcode(ARM64::TLSDESCCALL);
4417 Inst.addOperand(MCOperand::CreateExpr(Expr));
4419 getParser().getStreamer().EmitInstruction(Inst, STI);
4423 /// ::= .loh <lohName | lohId> label1, ..., labelN
4424 /// The number of arguments depends on the loh identifier.
4425 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4426 if (IDVal != MCLOHDirectiveName())
4429 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4430 if (getParser().getTok().isNot(AsmToken::Integer))
4431 return TokError("expected an identifier or a number in directive");
4432 // We successfully get a numeric value for the identifier.
4433 // Check if it is valid.
4434 int64_t Id = getParser().getTok().getIntVal();
4435 Kind = (MCLOHType)Id;
4436 // Check that Id does not overflow MCLOHType.
4437 if (!isValidMCLOHType(Kind) || Id != Kind)
4438 return TokError("invalid numeric identifier in directive");
4440 StringRef Name = getTok().getIdentifier();
4441 // We successfully parse an identifier.
4442 // Check if it is a recognized one.
4443 int Id = MCLOHNameToId(Name);
4446 return TokError("invalid identifier in directive");
4447 Kind = (MCLOHType)Id;
4449 // Consume the identifier.
4451 // Get the number of arguments of this LOH.
4452 int NbArgs = MCLOHIdToNbArgs(Kind);
4454 assert(NbArgs != -1 && "Invalid number of arguments");
4456 SmallVector<MCSymbol *, 3> Args;
4457 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4459 if (getParser().parseIdentifier(Name))
4460 return TokError("expected identifier in directive");
4461 Args.push_back(getContext().GetOrCreateSymbol(Name));
4463 if (Idx + 1 == NbArgs)
4465 if (getLexer().isNot(AsmToken::Comma))
4466 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4469 if (getLexer().isNot(AsmToken::EndOfStatement))
4470 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4472 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4477 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4478 ARM64MCExpr::VariantKind &ELFRefKind,
4479 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4481 ELFRefKind = ARM64MCExpr::VK_INVALID;
4482 DarwinRefKind = MCSymbolRefExpr::VK_None;
4485 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4486 ELFRefKind = AE->getKind();
4487 Expr = AE->getSubExpr();
4490 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4492 // It's a simple symbol reference with no addend.
4493 DarwinRefKind = SE->getKind();
4497 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4501 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4504 DarwinRefKind = SE->getKind();
4506 if (BE->getOpcode() != MCBinaryExpr::Add &&
4507 BE->getOpcode() != MCBinaryExpr::Sub)
4510 // See if the addend is is a constant, otherwise there's more going
4511 // on here than we can deal with.
4512 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4516 Addend = AddendExpr->getValue();
4517 if (BE->getOpcode() == MCBinaryExpr::Sub)
4520 // It's some symbol reference + a constant addend, but really
4521 // shouldn't use both Darwin and ELF syntax.
4522 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4523 DarwinRefKind == MCSymbolRefExpr::VK_None;
4526 /// Force static initialization.
4527 extern "C" void LLVMInitializeARM64AsmParser() {
4528 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
4529 RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
4532 #define GET_REGISTER_MATCHER
4533 #define GET_SUBTARGET_FEATURE_NAME
4534 #define GET_MATCHER_IMPLEMENTATION
4535 #include "ARM64GenAsmMatcher.inc"
4537 // Define this matcher function after the auto-generated include so we
4538 // have the match class enum definitions.
4539 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4541 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4542 // If the kind is a token for a literal immediate, check if our asm
4543 // operand matches. This is for InstAliases which have a fixed-value
4544 // immediate in the syntax.
4545 int64_t ExpectedVal;
4548 return Match_InvalidOperand;
4590 return Match_InvalidOperand;
4591 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4593 return Match_InvalidOperand;
4594 if (CE->getValue() == ExpectedVal)
4595 return Match_Success;
4596 return Match_InvalidOperand;