1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
81 bool MatchingInlineAsm) override;
82 /// @name Auto-generated Match Functions
85 #define GET_ASSEMBLER_HEADER
86 #include "ARM64GenAsmMatcher.inc"
90 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
91 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
92 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
94 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
95 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
98 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
99 bool tryParseVectorRegister(OperandVector &Operands);
102 enum ARM64MatchResultTy {
103 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
104 #define GET_OPERAND_DIAGNOSTIC_TYPES
105 #include "ARM64GenAsmMatcher.inc"
107 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
108 const MCInstrInfo &MII,
109 const MCTargetOptions &Options)
110 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
111 MCAsmParserExtension::Initialize(_Parser);
113 // Initialize the set of available features.
114 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
117 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
118 SMLoc NameLoc, OperandVector &Operands) override;
119 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
120 bool ParseDirective(AsmToken DirectiveID) override;
121 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
122 unsigned Kind) override;
124 static bool classifySymbolRef(const MCExpr *Expr,
125 ARM64MCExpr::VariantKind &ELFRefKind,
126 MCSymbolRefExpr::VariantKind &DarwinRefKind,
129 } // end anonymous namespace
133 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
135 class ARM64Operand : public MCParsedAsmOperand {
138 ImmediateOffset, // pre-indexed, no writeback
139 RegisterOffset // register offset, with optional extend
159 SMLoc StartLoc, EndLoc, OffsetLoc;
164 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
172 struct VectorListOp {
175 unsigned NumElements;
176 unsigned ElementKind;
179 struct VectorIndexOp {
188 unsigned Val; // Encoded 8-bit representation.
192 unsigned Val; // Not the enum since not all values have names.
198 uint64_t FeatureBits; // We need to pass through information about which
199 // core we are compiling for so that the SysReg
200 // Mappers can appropriately conditionalize.
219 // This is for all forms of ARM64 address expressions
221 unsigned BaseRegNum, OffsetRegNum;
222 ARM64_AM::ExtendType ExtType;
225 const MCExpr *OffsetImm;
232 struct VectorListOp VectorList;
233 struct VectorIndexOp VectorIndex;
235 struct FPImmOp FPImm;
236 struct BarrierOp Barrier;
237 struct SysRegOp SysReg;
238 struct SysCRImmOp SysCRImm;
239 struct PrefetchOp Prefetch;
240 struct ShifterOp Shifter;
241 struct ExtendOp Extend;
245 // Keep the MCContext around as the MCExprs may need manipulated during
246 // the add<>Operands() calls.
249 ARM64Operand(KindTy K, MCContext &_Ctx)
250 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
253 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
255 StartLoc = o.StartLoc;
274 VectorList = o.VectorList;
277 VectorIndex = o.VectorIndex;
283 SysCRImm = o.SysCRImm;
286 Prefetch = o.Prefetch;
300 /// getStartLoc - Get the location of the first token of this operand.
301 SMLoc getStartLoc() const override { return StartLoc; }
302 /// getEndLoc - Get the location of the last token of this operand.
303 SMLoc getEndLoc() const override { return EndLoc; }
304 /// getOffsetLoc - Get the location of the offset of this memory operand.
305 SMLoc getOffsetLoc() const { return OffsetLoc; }
307 StringRef getToken() const {
308 assert(Kind == k_Token && "Invalid access!");
309 return StringRef(Tok.Data, Tok.Length);
312 bool isTokenSuffix() const {
313 assert(Kind == k_Token && "Invalid access!");
317 const MCExpr *getImm() const {
318 assert(Kind == k_Immediate && "Invalid access!");
322 unsigned getFPImm() const {
323 assert(Kind == k_FPImm && "Invalid access!");
327 unsigned getBarrier() const {
328 assert(Kind == k_Barrier && "Invalid access!");
332 unsigned getReg() const override {
333 assert(Kind == k_Register && "Invalid access!");
337 unsigned getVectorListStart() const {
338 assert(Kind == k_VectorList && "Invalid access!");
339 return VectorList.RegNum;
342 unsigned getVectorListCount() const {
343 assert(Kind == k_VectorList && "Invalid access!");
344 return VectorList.Count;
347 unsigned getVectorIndex() const {
348 assert(Kind == k_VectorIndex && "Invalid access!");
349 return VectorIndex.Val;
352 StringRef getSysReg() const {
353 assert(Kind == k_SysReg && "Invalid access!");
354 return StringRef(SysReg.Data, SysReg.Length);
357 uint64_t getSysRegFeatureBits() const {
358 assert(Kind == k_SysReg && "Invalid access!");
359 return SysReg.FeatureBits;
362 unsigned getSysCR() const {
363 assert(Kind == k_SysCR && "Invalid access!");
367 unsigned getPrefetch() const {
368 assert(Kind == k_Prefetch && "Invalid access!");
372 unsigned getShifter() const {
373 assert(Kind == k_Shifter && "Invalid access!");
377 unsigned getExtend() const {
378 assert(Kind == k_Extend && "Invalid access!");
382 bool isImm() const override { return Kind == k_Immediate; }
383 bool isSImm9() const {
386 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
389 int64_t Val = MCE->getValue();
390 return (Val >= -256 && Val < 256);
392 bool isSImm7s4() const {
395 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
398 int64_t Val = MCE->getValue();
399 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
401 bool isSImm7s8() const {
404 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
407 int64_t Val = MCE->getValue();
408 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
410 bool isSImm7s16() const {
413 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
416 int64_t Val = MCE->getValue();
417 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
419 bool isImm0_7() const {
422 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
425 int64_t Val = MCE->getValue();
426 return (Val >= 0 && Val < 8);
428 bool isImm1_8() const {
431 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
434 int64_t Val = MCE->getValue();
435 return (Val > 0 && Val < 9);
437 bool isImm0_15() const {
440 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
443 int64_t Val = MCE->getValue();
444 return (Val >= 0 && Val < 16);
446 bool isImm1_16() const {
449 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
452 int64_t Val = MCE->getValue();
453 return (Val > 0 && Val < 17);
455 bool isImm0_31() const {
458 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
461 int64_t Val = MCE->getValue();
462 return (Val >= 0 && Val < 32);
464 bool isImm1_31() const {
467 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
470 int64_t Val = MCE->getValue();
471 return (Val >= 1 && Val < 32);
473 bool isImm1_32() const {
476 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
479 int64_t Val = MCE->getValue();
480 return (Val >= 1 && Val < 33);
482 bool isImm0_63() const {
485 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
488 int64_t Val = MCE->getValue();
489 return (Val >= 0 && Val < 64);
491 bool isImm1_63() const {
494 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
497 int64_t Val = MCE->getValue();
498 return (Val >= 1 && Val < 64);
500 bool isImm1_64() const {
503 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
506 int64_t Val = MCE->getValue();
507 return (Val >= 1 && Val < 65);
509 bool isImm0_127() const {
512 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
515 int64_t Val = MCE->getValue();
516 return (Val >= 0 && Val < 128);
518 bool isImm0_255() const {
521 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
524 int64_t Val = MCE->getValue();
525 return (Val >= 0 && Val < 256);
527 bool isImm0_65535() const {
530 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
533 int64_t Val = MCE->getValue();
534 return (Val >= 0 && Val < 65536);
536 bool isLogicalImm32() const {
539 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
542 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
544 bool isLogicalImm64() const {
547 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
550 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
552 bool isSIMDImmType10() const {
555 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
558 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
560 bool isBranchTarget26() const {
563 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
566 int64_t Val = MCE->getValue();
569 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
571 bool isPCRelLabel19() const {
574 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
577 int64_t Val = MCE->getValue();
580 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
582 bool isBranchTarget14() const {
585 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
588 int64_t Val = MCE->getValue();
591 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
594 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
598 ARM64MCExpr::VariantKind ELFRefKind;
599 MCSymbolRefExpr::VariantKind DarwinRefKind;
601 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
605 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
608 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
609 if (ELFRefKind == AllowedModifiers[i])
616 bool isMovZSymbolG3() const {
617 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
618 return isMovWSymbol(Variants);
621 bool isMovZSymbolG2() const {
622 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
623 ARM64MCExpr::VK_ABS_G2_S,
624 ARM64MCExpr::VK_TPREL_G2,
625 ARM64MCExpr::VK_DTPREL_G2 };
626 return isMovWSymbol(Variants);
629 bool isMovZSymbolG1() const {
630 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
631 ARM64MCExpr::VK_ABS_G1_S,
632 ARM64MCExpr::VK_GOTTPREL_G1,
633 ARM64MCExpr::VK_TPREL_G1,
634 ARM64MCExpr::VK_DTPREL_G1, };
635 return isMovWSymbol(Variants);
638 bool isMovZSymbolG0() const {
639 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
640 ARM64MCExpr::VK_ABS_G0_S,
641 ARM64MCExpr::VK_TPREL_G0,
642 ARM64MCExpr::VK_DTPREL_G0 };
643 return isMovWSymbol(Variants);
646 bool isMovKSymbolG3() const {
647 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
648 return isMovWSymbol(Variants);
651 bool isMovKSymbolG2() const {
652 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
653 return isMovWSymbol(Variants);
656 bool isMovKSymbolG1() const {
657 static ARM64MCExpr::VariantKind Variants[] = {
658 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
659 ARM64MCExpr::VK_DTPREL_G1_NC
661 return isMovWSymbol(Variants);
664 bool isMovKSymbolG0() const {
665 static ARM64MCExpr::VariantKind Variants[] = {
666 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
667 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
669 return isMovWSymbol(Variants);
672 bool isFPImm() const { return Kind == k_FPImm; }
673 bool isBarrier() const { return Kind == k_Barrier; }
674 bool isSysReg() const { return Kind == k_SysReg; }
675 bool isMRSSystemRegister() const {
676 if (!isSysReg()) return false;
678 bool IsKnownRegister;
679 auto Mapper = ARM64SysReg::MRSMapper(getSysRegFeatureBits());
680 Mapper.fromString(getSysReg(), IsKnownRegister);
682 return IsKnownRegister;
684 bool isMSRSystemRegister() const {
685 if (!isSysReg()) return false;
687 bool IsKnownRegister;
688 auto Mapper = ARM64SysReg::MSRMapper(getSysRegFeatureBits());
689 Mapper.fromString(getSysReg(), IsKnownRegister);
691 return IsKnownRegister;
693 bool isSystemPStateField() const {
694 if (!isSysReg()) return false;
696 bool IsKnownRegister;
697 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
699 return IsKnownRegister;
701 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
702 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
703 bool isVectorRegLo() const {
704 return Kind == k_Register && Reg.isVector &&
705 ARM64MCRegisterClasses[ARM64::FPR128_loRegClassID].contains(Reg.RegNum);
708 /// Is this a vector list with the type implicit (presumably attached to the
709 /// instruction itself)?
710 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
711 return Kind == k_VectorList && VectorList.Count == NumRegs &&
712 !VectorList.ElementKind;
715 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
716 bool isTypedVectorList() const {
717 if (Kind != k_VectorList)
719 if (VectorList.Count != NumRegs)
721 if (VectorList.ElementKind != ElementKind)
723 return VectorList.NumElements == NumElements;
726 bool isVectorIndexB() const {
727 return Kind == k_VectorIndex && VectorIndex.Val < 16;
729 bool isVectorIndexH() const {
730 return Kind == k_VectorIndex && VectorIndex.Val < 8;
732 bool isVectorIndexS() const {
733 return Kind == k_VectorIndex && VectorIndex.Val < 4;
735 bool isVectorIndexD() const {
736 return Kind == k_VectorIndex && VectorIndex.Val < 2;
738 bool isToken() const override { return Kind == k_Token; }
739 bool isTokenEqual(StringRef Str) const {
740 return Kind == k_Token && getToken() == Str;
742 bool isMem() const override { return Kind == k_Memory; }
743 bool isSysCR() const { return Kind == k_SysCR; }
744 bool isPrefetch() const { return Kind == k_Prefetch; }
745 bool isShifter() const { return Kind == k_Shifter; }
746 bool isExtend() const {
747 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
749 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
750 return ST == ARM64_AM::LSL;
752 return Kind == k_Extend;
754 bool isExtend64() const {
755 if (Kind != k_Extend)
757 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
758 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
759 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
761 bool isExtendLSL64() const {
762 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
764 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
765 return ST == ARM64_AM::LSL;
767 if (Kind != k_Extend)
769 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
770 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
773 bool isArithmeticShifter() const {
777 // An arithmetic shifter is LSL, LSR, or ASR.
778 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
779 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
782 bool isMovImm32Shifter() const {
786 // A MOVi shifter is LSL of 0, 16, 32, or 48.
787 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
788 if (ST != ARM64_AM::LSL)
790 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
791 return (Val == 0 || Val == 16);
794 bool isMovImm64Shifter() const {
798 // A MOVi shifter is LSL of 0 or 16.
799 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
800 if (ST != ARM64_AM::LSL)
802 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
803 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
806 bool isAddSubShifter() const {
810 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
811 unsigned Val = Shifter.Val;
812 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
813 (ARM64_AM::getShiftValue(Val) == 0 ||
814 ARM64_AM::getShiftValue(Val) == 12);
817 bool isLogicalVecShifter() const {
821 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
822 unsigned Val = Shifter.Val;
823 unsigned Shift = ARM64_AM::getShiftValue(Val);
824 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
825 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
828 bool isLogicalVecHalfWordShifter() const {
829 if (!isLogicalVecShifter())
832 // A logical vector shifter is a left shift by 0 or 8.
833 unsigned Val = Shifter.Val;
834 unsigned Shift = ARM64_AM::getShiftValue(Val);
835 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
836 (Shift == 0 || Shift == 8);
839 bool isMoveVecShifter() const {
843 // A logical vector shifter is a left shift by 8 or 16.
844 unsigned Val = Shifter.Val;
845 unsigned Shift = ARM64_AM::getShiftValue(Val);
846 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
847 (Shift == 8 || Shift == 16);
850 bool isMemoryRegisterOffset8() const {
851 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
854 bool isMemoryRegisterOffset16() const {
855 return isMem() && Mem.Mode == RegisterOffset &&
856 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
859 bool isMemoryRegisterOffset32() const {
860 return isMem() && Mem.Mode == RegisterOffset &&
861 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
864 bool isMemoryRegisterOffset64() const {
865 return isMem() && Mem.Mode == RegisterOffset &&
866 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
869 bool isMemoryRegisterOffset128() const {
870 return isMem() && Mem.Mode == RegisterOffset &&
871 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
874 bool isMemoryUnscaled() const {
877 if (Mem.Mode != ImmediateOffset)
881 // Make sure the immediate value is valid.
882 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
885 // The offset must fit in a signed 9-bit unscaled immediate.
886 int64_t Value = CE->getValue();
887 return (Value >= -256 && Value < 256);
889 // Fallback unscaled operands are for aliases of LDR/STR that fall back
890 // to LDUR/STUR when the offset is not legal for the former but is for
891 // the latter. As such, in addition to checking for being a legal unscaled
892 // address, also check that it is not a legal scaled address. This avoids
893 // ambiguity in the matcher.
894 bool isMemoryUnscaledFB8() const {
895 return isMemoryUnscaled() && !isMemoryIndexed8();
897 bool isMemoryUnscaledFB16() const {
898 return isMemoryUnscaled() && !isMemoryIndexed16();
900 bool isMemoryUnscaledFB32() const {
901 return isMemoryUnscaled() && !isMemoryIndexed32();
903 bool isMemoryUnscaledFB64() const {
904 return isMemoryUnscaled() && !isMemoryIndexed64();
906 bool isMemoryUnscaledFB128() const {
907 return isMemoryUnscaled() && !isMemoryIndexed128();
909 bool isMemoryIndexed(unsigned Scale) const {
912 if (Mem.Mode != ImmediateOffset)
916 // Make sure the immediate value is valid.
917 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
920 // The offset must be a positive multiple of the scale and in range of
921 // encoding with a 12-bit immediate.
922 int64_t Value = CE->getValue();
923 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
926 // If it's not a constant, check for some expressions we know.
927 const MCExpr *Expr = Mem.OffsetImm;
928 ARM64MCExpr::VariantKind ELFRefKind;
929 MCSymbolRefExpr::VariantKind DarwinRefKind;
931 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
933 // If we don't understand the expression, assume the best and
934 // let the fixup and relocation code deal with it.
938 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
939 ELFRefKind == ARM64MCExpr::VK_LO12 ||
940 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
941 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
942 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
943 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
944 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
945 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
946 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
947 // Note that we don't range-check the addend. It's adjusted modulo page
948 // size when converted, so there is no "out of range" condition when using
950 return Addend >= 0 && (Addend % Scale) == 0;
951 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
952 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
953 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
959 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
960 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
961 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
962 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
963 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
964 bool isMemoryNoIndex() const {
967 if (Mem.Mode != ImmediateOffset)
972 // Make sure the immediate value is valid. Only zero is allowed.
973 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
974 if (!CE || CE->getValue() != 0)
978 bool isMemorySIMDNoIndex() const {
981 if (Mem.Mode != ImmediateOffset)
983 return Mem.OffsetImm == nullptr;
985 bool isMemoryIndexedSImm9() const {
986 if (!isMem() || Mem.Mode != ImmediateOffset)
990 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
991 assert(CE && "Non-constant pre-indexed offset!");
992 int64_t Value = CE->getValue();
993 return Value >= -256 && Value <= 255;
995 bool isMemoryIndexed32SImm7() const {
996 if (!isMem() || Mem.Mode != ImmediateOffset)
1000 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1001 assert(CE && "Non-constant pre-indexed offset!");
1002 int64_t Value = CE->getValue();
1003 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
1005 bool isMemoryIndexed64SImm7() const {
1006 if (!isMem() || Mem.Mode != ImmediateOffset)
1010 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1011 assert(CE && "Non-constant pre-indexed offset!");
1012 int64_t Value = CE->getValue();
1013 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
1015 bool isMemoryIndexed128SImm7() const {
1016 if (!isMem() || Mem.Mode != ImmediateOffset)
1020 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1021 assert(CE && "Non-constant pre-indexed offset!");
1022 int64_t Value = CE->getValue();
1023 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
1026 bool isAdrpLabel() const {
1027 // Validation was handled during parsing, so we just sanity check that
1028 // something didn't go haywire.
1032 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1033 int64_t Val = CE->getValue();
1034 int64_t Min = - (4096 * (1LL << (21 - 1)));
1035 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1036 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1042 bool isAdrLabel() const {
1043 // Validation was handled during parsing, so we just sanity check that
1044 // something didn't go haywire.
1048 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1049 int64_t Val = CE->getValue();
1050 int64_t Min = - (1LL << (21 - 1));
1051 int64_t Max = ((1LL << (21 - 1)) - 1);
1052 return Val >= Min && Val <= Max;
1058 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1059 // Add as immediates when possible. Null MCExpr = 0.
1061 Inst.addOperand(MCOperand::CreateImm(0));
1062 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1063 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1065 Inst.addOperand(MCOperand::CreateExpr(Expr));
1068 void addRegOperands(MCInst &Inst, unsigned N) const {
1069 assert(N == 1 && "Invalid number of operands!");
1070 Inst.addOperand(MCOperand::CreateReg(getReg()));
1073 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1074 assert(N == 1 && "Invalid number of operands!");
1075 Inst.addOperand(MCOperand::CreateReg(getReg()));
1078 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1079 assert(N == 1 && "Invalid number of operands!");
1080 Inst.addOperand(MCOperand::CreateReg(getReg()));
1083 template <unsigned NumRegs>
1084 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1085 assert(N == 1 && "Invalid number of operands!");
1086 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1087 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1088 unsigned FirstReg = FirstRegs[NumRegs - 1];
1091 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1094 template <unsigned NumRegs>
1095 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1096 assert(N == 1 && "Invalid number of operands!");
1097 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1098 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1099 unsigned FirstReg = FirstRegs[NumRegs - 1];
1102 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1105 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1106 assert(N == 1 && "Invalid number of operands!");
1107 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1110 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1111 assert(N == 1 && "Invalid number of operands!");
1112 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1115 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1116 assert(N == 1 && "Invalid number of operands!");
1117 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1120 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1121 assert(N == 1 && "Invalid number of operands!");
1122 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1125 void addImmOperands(MCInst &Inst, unsigned N) const {
1126 assert(N == 1 && "Invalid number of operands!");
1127 // If this is a pageoff symrefexpr with an addend, adjust the addend
1128 // to be only the page-offset portion. Otherwise, just add the expr
1130 addExpr(Inst, getImm());
1133 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1134 assert(N == 1 && "Invalid number of operands!");
1135 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1137 addExpr(Inst, getImm());
1139 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1142 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1143 addImmOperands(Inst, N);
1146 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1147 assert(N == 1 && "Invalid number of operands!");
1148 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1149 assert(MCE && "Invalid constant immediate operand!");
1150 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1153 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1154 assert(N == 1 && "Invalid number of operands!");
1155 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1156 assert(MCE && "Invalid constant immediate operand!");
1157 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1160 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1161 assert(N == 1 && "Invalid number of operands!");
1162 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1163 assert(MCE && "Invalid constant immediate operand!");
1164 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1167 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1168 assert(N == 1 && "Invalid number of operands!");
1169 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1170 assert(MCE && "Invalid constant immediate operand!");
1171 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1174 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1175 assert(N == 1 && "Invalid number of operands!");
1176 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1177 assert(MCE && "Invalid constant immediate operand!");
1178 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1181 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1182 assert(N == 1 && "Invalid number of operands!");
1183 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1184 assert(MCE && "Invalid constant immediate operand!");
1185 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1188 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1191 assert(MCE && "Invalid constant immediate operand!");
1192 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1195 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1196 assert(N == 1 && "Invalid number of operands!");
1197 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1198 assert(MCE && "Invalid constant immediate operand!");
1199 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1202 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1203 assert(N == 1 && "Invalid number of operands!");
1204 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1205 assert(MCE && "Invalid constant immediate operand!");
1206 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1209 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1210 assert(N == 1 && "Invalid number of operands!");
1211 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1212 assert(MCE && "Invalid constant immediate operand!");
1213 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1216 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 assert(MCE && "Invalid constant immediate operand!");
1220 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1223 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1224 assert(N == 1 && "Invalid number of operands!");
1225 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1226 assert(MCE && "Invalid constant immediate operand!");
1227 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1230 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1231 assert(N == 1 && "Invalid number of operands!");
1232 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1233 assert(MCE && "Invalid constant immediate operand!");
1234 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1237 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1238 assert(N == 1 && "Invalid number of operands!");
1239 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1240 assert(MCE && "Invalid constant immediate operand!");
1241 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1244 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1245 assert(N == 1 && "Invalid number of operands!");
1246 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1247 assert(MCE && "Invalid constant immediate operand!");
1248 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1251 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1252 assert(N == 1 && "Invalid number of operands!");
1253 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1254 assert(MCE && "Invalid constant immediate operand!");
1255 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1258 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1259 assert(N == 1 && "Invalid number of operands!");
1260 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1261 assert(MCE && "Invalid constant immediate operand!");
1262 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1265 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1266 assert(N == 1 && "Invalid number of operands!");
1267 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1268 assert(MCE && "Invalid logical immediate operand!");
1269 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1270 Inst.addOperand(MCOperand::CreateImm(encoding));
1273 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1274 assert(N == 1 && "Invalid number of operands!");
1275 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1276 assert(MCE && "Invalid logical immediate operand!");
1277 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1278 Inst.addOperand(MCOperand::CreateImm(encoding));
1281 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1282 assert(N == 1 && "Invalid number of operands!");
1283 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1284 assert(MCE && "Invalid immediate operand!");
1285 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1286 Inst.addOperand(MCOperand::CreateImm(encoding));
1289 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1290 // Branch operands don't encode the low bits, so shift them off
1291 // here. If it's a label, however, just put it on directly as there's
1292 // not enough information now to do anything.
1293 assert(N == 1 && "Invalid number of operands!");
1294 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1296 addExpr(Inst, getImm());
1299 assert(MCE && "Invalid constant immediate operand!");
1300 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1303 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1304 // Branch operands don't encode the low bits, so shift them off
1305 // here. If it's a label, however, just put it on directly as there's
1306 // not enough information now to do anything.
1307 assert(N == 1 && "Invalid number of operands!");
1308 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1310 addExpr(Inst, getImm());
1313 assert(MCE && "Invalid constant immediate operand!");
1314 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1317 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1318 // Branch operands don't encode the low bits, so shift them off
1319 // here. If it's a label, however, just put it on directly as there's
1320 // not enough information now to do anything.
1321 assert(N == 1 && "Invalid number of operands!");
1322 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1324 addExpr(Inst, getImm());
1327 assert(MCE && "Invalid constant immediate operand!");
1328 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1331 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1336 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1337 assert(N == 1 && "Invalid number of operands!");
1338 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1341 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1342 assert(N == 1 && "Invalid number of operands!");
1345 auto Mapper = ARM64SysReg::MRSMapper(getSysRegFeatureBits());
1346 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1348 Inst.addOperand(MCOperand::CreateImm(Bits));
1351 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!");
1355 auto Mapper = ARM64SysReg::MSRMapper(getSysRegFeatureBits());
1356 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1358 Inst.addOperand(MCOperand::CreateImm(Bits));
1361 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1362 assert(N == 1 && "Invalid number of operands!");
1365 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1367 Inst.addOperand(MCOperand::CreateImm(Bits));
1370 void addSysCROperands(MCInst &Inst, unsigned N) const {
1371 assert(N == 1 && "Invalid number of operands!");
1372 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1375 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1376 assert(N == 1 && "Invalid number of operands!");
1377 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1380 void addShifterOperands(MCInst &Inst, unsigned N) const {
1381 assert(N == 1 && "Invalid number of operands!");
1382 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1385 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1386 assert(N == 1 && "Invalid number of operands!");
1387 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1390 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1391 assert(N == 1 && "Invalid number of operands!");
1392 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1395 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1396 assert(N == 1 && "Invalid number of operands!");
1397 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1400 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1401 assert(N == 1 && "Invalid number of operands!");
1402 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1405 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1406 assert(N == 1 && "Invalid number of operands!");
1407 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1410 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1415 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1417 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1420 void addExtendOperands(MCInst &Inst, unsigned N) const {
1421 assert(N == 1 && "Invalid number of operands!");
1422 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
1424 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1425 unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
1426 ARM64_AM::getShiftValue(getShifter()));
1427 Inst.addOperand(MCOperand::CreateImm(imm));
1429 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1432 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1433 assert(N == 1 && "Invalid number of operands!");
1434 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1437 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1438 assert(N == 1 && "Invalid number of operands!");
1439 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1441 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1442 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1443 ARM64_AM::getShiftValue(getShifter()));
1444 Inst.addOperand(MCOperand::CreateImm(imm));
1446 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1449 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1450 assert(N == 3 && "Invalid number of operands!");
1452 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1453 Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
1454 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1455 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1458 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1459 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1462 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1463 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1466 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1467 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1470 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1471 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1474 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1475 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1478 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1479 unsigned Scale) const {
1480 // Add the base register operand.
1481 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1483 if (!Mem.OffsetImm) {
1484 // There isn't an offset.
1485 Inst.addOperand(MCOperand::CreateImm(0));
1489 // Add the offset operand.
1490 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1491 assert(CE->getValue() % Scale == 0 &&
1492 "Offset operand must be multiple of the scale!");
1494 // The MCInst offset operand doesn't include the low bits (like the
1495 // instruction encoding).
1496 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1499 // If this is a pageoff symrefexpr with an addend, the linker will
1500 // do the scaling of the addend.
1502 // Otherwise we don't know what this is, so just add the scaling divide to
1503 // the expression and let the MC fixup evaluation code deal with it.
1504 const MCExpr *Expr = Mem.OffsetImm;
1505 ARM64MCExpr::VariantKind ELFRefKind;
1506 MCSymbolRefExpr::VariantKind DarwinRefKind;
1509 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1511 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1512 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1516 Inst.addOperand(MCOperand::CreateExpr(Expr));
1519 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1520 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1521 // Add the base register operand.
1522 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1524 // Add the offset operand.
1526 Inst.addOperand(MCOperand::CreateImm(0));
1528 // Only constant offsets supported.
1529 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1530 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1534 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1535 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1536 addMemoryIndexedOperands(Inst, N, 16);
1539 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1540 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1541 addMemoryIndexedOperands(Inst, N, 8);
1544 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1545 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1546 addMemoryIndexedOperands(Inst, N, 4);
1549 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1550 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1551 addMemoryIndexedOperands(Inst, N, 2);
1554 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1555 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1556 addMemoryIndexedOperands(Inst, N, 1);
1559 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1560 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1561 // Add the base register operand (the offset is always zero, so ignore it).
1562 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1565 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1566 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1567 // Add the base register operand (the offset is always zero, so ignore it).
1568 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1571 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1572 unsigned Scale) const {
1573 assert(N == 2 && "Invalid number of operands!");
1575 // Add the base register operand.
1576 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1578 // Add the offset operand.
1580 if (Mem.OffsetImm) {
1581 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1582 assert(CE && "Non-constant indexed offset operand!");
1583 Offset = CE->getValue();
1587 assert(Offset % Scale == 0 &&
1588 "Offset operand must be a multiple of the scale!");
1592 Inst.addOperand(MCOperand::CreateImm(Offset));
1595 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1596 addMemoryWritebackIndexedOperands(Inst, N, 1);
1599 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1600 addMemoryWritebackIndexedOperands(Inst, N, 4);
1603 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1604 addMemoryWritebackIndexedOperands(Inst, N, 8);
1607 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1608 addMemoryWritebackIndexedOperands(Inst, N, 16);
1611 void print(raw_ostream &OS) const override;
1613 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1615 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1616 Op->Tok.Data = Str.data();
1617 Op->Tok.Length = Str.size();
1618 Op->Tok.IsSuffix = IsSuffix;
1624 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1625 SMLoc E, MCContext &Ctx) {
1626 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1627 Op->Reg.RegNum = RegNum;
1628 Op->Reg.isVector = isVector;
1634 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1635 unsigned NumElements, char ElementKind,
1636 SMLoc S, SMLoc E, MCContext &Ctx) {
1637 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1638 Op->VectorList.RegNum = RegNum;
1639 Op->VectorList.Count = Count;
1640 Op->VectorList.NumElements = NumElements;
1641 Op->VectorList.ElementKind = ElementKind;
1647 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1649 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1650 Op->VectorIndex.Val = Idx;
1656 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1658 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1665 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1666 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1667 Op->FPImm.Val = Val;
1673 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1674 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1675 Op->Barrier.Val = Val;
1681 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S,
1682 uint64_t FeatureBits, MCContext &Ctx) {
1683 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1684 Op->SysReg.Data = Str.data();
1685 Op->SysReg.Length = Str.size();
1686 Op->SysReg.FeatureBits = FeatureBits;
1692 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1693 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1695 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1696 Op->Mem.BaseRegNum = BaseRegNum;
1697 Op->Mem.OffsetRegNum = 0;
1698 Op->Mem.OffsetImm = Off;
1699 Op->Mem.ExtType = ARM64_AM::UXTX;
1700 Op->Mem.ShiftVal = 0;
1701 Op->Mem.ExplicitShift = false;
1702 Op->Mem.Mode = ImmediateOffset;
1703 Op->OffsetLoc = OffsetLoc;
1709 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1710 ARM64_AM::ExtendType ExtType,
1711 unsigned ShiftVal, bool ExplicitShift,
1712 SMLoc S, SMLoc E, MCContext &Ctx) {
1713 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1714 Op->Mem.BaseRegNum = BaseReg;
1715 Op->Mem.OffsetRegNum = OffsetReg;
1716 Op->Mem.OffsetImm = nullptr;
1717 Op->Mem.ExtType = ExtType;
1718 Op->Mem.ShiftVal = ShiftVal;
1719 Op->Mem.ExplicitShift = ExplicitShift;
1720 Op->Mem.Mode = RegisterOffset;
1726 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1728 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1729 Op->SysCRImm.Val = Val;
1735 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1736 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1737 Op->Prefetch.Val = Val;
1743 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1744 SMLoc S, SMLoc E, MCContext &Ctx) {
1745 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1746 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1752 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1753 SMLoc S, SMLoc E, MCContext &Ctx) {
1754 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1755 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1762 } // end anonymous namespace.
1764 void ARM64Operand::print(raw_ostream &OS) const {
1767 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1772 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1774 OS << "<barrier " << Name << ">";
1776 OS << "<barrier invalid #" << getBarrier() << ">";
1780 getImm()->print(OS);
1786 OS << "<register " << getReg() << ">";
1788 case k_VectorList: {
1789 OS << "<vectorlist ";
1790 unsigned Reg = getVectorListStart();
1791 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1792 OS << Reg + i << " ";
1797 OS << "<vectorindex " << getVectorIndex() << ">";
1800 OS << "<sysreg: " << getSysReg() << '>';
1803 OS << "'" << getToken() << "'";
1806 OS << "c" << getSysCR();
1810 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1812 OS << "<prfop " << Name << ">";
1814 OS << "<prfop invalid #" << getPrefetch() << ">";
1818 unsigned Val = getShifter();
1819 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1820 << ARM64_AM::getShiftValue(Val) << ">";
1824 unsigned Val = getExtend();
1825 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1826 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1832 /// @name Auto-generated Match Functions
1835 static unsigned MatchRegisterName(StringRef Name);
1839 static unsigned matchVectorRegName(StringRef Name) {
1840 return StringSwitch<unsigned>(Name)
1841 .Case("v0", ARM64::Q0)
1842 .Case("v1", ARM64::Q1)
1843 .Case("v2", ARM64::Q2)
1844 .Case("v3", ARM64::Q3)
1845 .Case("v4", ARM64::Q4)
1846 .Case("v5", ARM64::Q5)
1847 .Case("v6", ARM64::Q6)
1848 .Case("v7", ARM64::Q7)
1849 .Case("v8", ARM64::Q8)
1850 .Case("v9", ARM64::Q9)
1851 .Case("v10", ARM64::Q10)
1852 .Case("v11", ARM64::Q11)
1853 .Case("v12", ARM64::Q12)
1854 .Case("v13", ARM64::Q13)
1855 .Case("v14", ARM64::Q14)
1856 .Case("v15", ARM64::Q15)
1857 .Case("v16", ARM64::Q16)
1858 .Case("v17", ARM64::Q17)
1859 .Case("v18", ARM64::Q18)
1860 .Case("v19", ARM64::Q19)
1861 .Case("v20", ARM64::Q20)
1862 .Case("v21", ARM64::Q21)
1863 .Case("v22", ARM64::Q22)
1864 .Case("v23", ARM64::Q23)
1865 .Case("v24", ARM64::Q24)
1866 .Case("v25", ARM64::Q25)
1867 .Case("v26", ARM64::Q26)
1868 .Case("v27", ARM64::Q27)
1869 .Case("v28", ARM64::Q28)
1870 .Case("v29", ARM64::Q29)
1871 .Case("v30", ARM64::Q30)
1872 .Case("v31", ARM64::Q31)
1876 static bool isValidVectorKind(StringRef Name) {
1877 return StringSwitch<bool>(Name.lower())
1887 // Accept the width neutral ones, too, for verbose syntax. If those
1888 // aren't used in the right places, the token operand won't match so
1889 // all will work out.
1897 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1898 char &ElementKind) {
1899 assert(isValidVectorKind(Name));
1901 ElementKind = Name.lower()[Name.size() - 1];
1904 if (Name.size() == 2)
1907 // Parse the lane count
1908 Name = Name.drop_front();
1909 while (isdigit(Name.front())) {
1910 NumElements = 10 * NumElements + (Name.front() - '0');
1911 Name = Name.drop_front();
1915 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1917 StartLoc = getLoc();
1918 RegNo = tryParseRegister();
1919 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1920 return (RegNo == (unsigned)-1);
1923 /// tryParseRegister - Try to parse a register name. The token must be an
1924 /// Identifier when called, and if it is a register name the token is eaten and
1925 /// the register is added to the operand list.
1926 int ARM64AsmParser::tryParseRegister() {
1927 const AsmToken &Tok = Parser.getTok();
1928 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1930 std::string lowerCase = Tok.getString().lower();
1931 unsigned RegNum = MatchRegisterName(lowerCase);
1932 // Also handle a few aliases of registers.
1934 RegNum = StringSwitch<unsigned>(lowerCase)
1935 .Case("fp", ARM64::FP)
1936 .Case("lr", ARM64::LR)
1937 .Case("x31", ARM64::XZR)
1938 .Case("w31", ARM64::WZR)
1944 Parser.Lex(); // Eat identifier token.
1948 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1949 /// kind specifier. If it is a register specifier, eat the token and return it.
1950 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1951 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1952 TokError("vector register expected");
1956 StringRef Name = Parser.getTok().getString();
1957 // If there is a kind specifier, it's separated from the register name by
1959 size_t Start = 0, Next = Name.find('.');
1960 StringRef Head = Name.slice(Start, Next);
1961 unsigned RegNum = matchVectorRegName(Head);
1963 if (Next != StringRef::npos) {
1964 Kind = Name.slice(Next, StringRef::npos);
1965 if (!isValidVectorKind(Kind)) {
1966 TokError("invalid vector kind qualifier");
1970 Parser.Lex(); // Eat the register token.
1975 TokError("vector register expected");
1979 static int MatchSysCRName(StringRef Name) {
1980 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1982 switch (Name.size()) {
1986 if (Name[0] != 'c' && Name[0] != 'C')
2014 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
2035 llvm_unreachable("Unhandled SysCR operand string!");
2039 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
2040 ARM64AsmParser::OperandMatchResultTy
2041 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
2043 const AsmToken &Tok = Parser.getTok();
2044 if (Tok.isNot(AsmToken::Identifier))
2045 return MatchOperand_NoMatch;
2047 int Num = MatchSysCRName(Tok.getString());
2049 return MatchOperand_NoMatch;
2051 Parser.Lex(); // Eat identifier token.
2052 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
2053 return MatchOperand_Success;
2056 /// tryParsePrefetch - Try to parse a prefetch operand.
2057 ARM64AsmParser::OperandMatchResultTy
2058 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
2060 const AsmToken &Tok = Parser.getTok();
2061 // Either an identifier for named values or a 5-bit immediate.
2062 bool Hash = Tok.is(AsmToken::Hash);
2063 if (Hash || Tok.is(AsmToken::Integer)) {
2065 Parser.Lex(); // Eat hash token.
2066 const MCExpr *ImmVal;
2067 if (getParser().parseExpression(ImmVal))
2068 return MatchOperand_ParseFail;
2070 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2072 TokError("immediate value expected for prefetch operand");
2073 return MatchOperand_ParseFail;
2075 unsigned prfop = MCE->getValue();
2077 TokError("prefetch operand out of range, [0,31] expected");
2078 return MatchOperand_ParseFail;
2081 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2082 return MatchOperand_Success;
2085 if (Tok.isNot(AsmToken::Identifier)) {
2086 TokError("pre-fetch hint expected");
2087 return MatchOperand_ParseFail;
2091 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2093 TokError("pre-fetch hint expected");
2094 return MatchOperand_ParseFail;
2097 Parser.Lex(); // Eat identifier token.
2098 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2099 return MatchOperand_Success;
2102 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2104 ARM64AsmParser::OperandMatchResultTy
2105 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2109 if (Parser.getTok().is(AsmToken::Hash)) {
2110 Parser.Lex(); // Eat hash token.
2113 if (parseSymbolicImmVal(Expr))
2114 return MatchOperand_ParseFail;
2116 ARM64MCExpr::VariantKind ELFRefKind;
2117 MCSymbolRefExpr::VariantKind DarwinRefKind;
2119 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2120 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2121 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2122 // No modifier was specified at all; this is the syntax for an ELF basic
2123 // ADRP relocation (unfortunately).
2124 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2125 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2126 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2128 Error(S, "gotpage label reference not allowed an addend");
2129 return MatchOperand_ParseFail;
2130 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2131 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2132 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2133 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2134 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2135 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2136 // The operand must be an @page or @gotpage qualified symbolref.
2137 Error(S, "page or gotpage label reference expected");
2138 return MatchOperand_ParseFail;
2142 // We have either a label reference possibly with addend or an immediate. The
2143 // addend is a raw value here. The linker will adjust it to only reference the
2145 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2146 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2148 return MatchOperand_Success;
2151 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2153 ARM64AsmParser::OperandMatchResultTy
2154 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2158 if (Parser.getTok().is(AsmToken::Hash)) {
2159 Parser.Lex(); // Eat hash token.
2162 if (getParser().parseExpression(Expr))
2163 return MatchOperand_ParseFail;
2165 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2166 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2168 return MatchOperand_Success;
2171 /// tryParseFPImm - A floating point immediate expression operand.
2172 ARM64AsmParser::OperandMatchResultTy
2173 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2177 if (Parser.getTok().is(AsmToken::Hash)) {
2178 Parser.Lex(); // Eat '#'
2182 // Handle negation, as that still comes through as a separate token.
2183 bool isNegative = false;
2184 if (Parser.getTok().is(AsmToken::Minus)) {
2188 const AsmToken &Tok = Parser.getTok();
2189 if (Tok.is(AsmToken::Real)) {
2190 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2191 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2192 // If we had a '-' in front, toggle the sign bit.
2193 IntVal ^= (uint64_t)isNegative << 63;
2194 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2195 Parser.Lex(); // Eat the token.
2196 // Check for out of range values. As an exception, we let Zero through,
2197 // as we handle that special case in post-processing before matching in
2198 // order to use the zero register for it.
2199 if (Val == -1 && !RealVal.isZero()) {
2200 TokError("floating point value out of range");
2201 return MatchOperand_ParseFail;
2203 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2204 return MatchOperand_Success;
2206 if (Tok.is(AsmToken::Integer)) {
2208 if (!isNegative && Tok.getString().startswith("0x")) {
2209 Val = Tok.getIntVal();
2210 if (Val > 255 || Val < 0) {
2211 TokError("encoded floating point value out of range");
2212 return MatchOperand_ParseFail;
2215 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2216 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2217 // If we had a '-' in front, toggle the sign bit.
2218 IntVal ^= (uint64_t)isNegative << 63;
2219 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2221 Parser.Lex(); // Eat the token.
2222 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2223 return MatchOperand_Success;
2227 return MatchOperand_NoMatch;
2229 TokError("invalid floating point immediate");
2230 return MatchOperand_ParseFail;
2233 /// parseCondCodeString - Parse a Condition Code string.
2234 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2235 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2236 .Case("eq", ARM64CC::EQ)
2237 .Case("ne", ARM64CC::NE)
2238 .Case("cs", ARM64CC::HS)
2239 .Case("hs", ARM64CC::HS)
2240 .Case("cc", ARM64CC::LO)
2241 .Case("lo", ARM64CC::LO)
2242 .Case("mi", ARM64CC::MI)
2243 .Case("pl", ARM64CC::PL)
2244 .Case("vs", ARM64CC::VS)
2245 .Case("vc", ARM64CC::VC)
2246 .Case("hi", ARM64CC::HI)
2247 .Case("ls", ARM64CC::LS)
2248 .Case("ge", ARM64CC::GE)
2249 .Case("lt", ARM64CC::LT)
2250 .Case("gt", ARM64CC::GT)
2251 .Case("le", ARM64CC::LE)
2252 .Case("al", ARM64CC::AL)
2253 .Case("nv", ARM64CC::NV)
2254 .Default(ARM64CC::Invalid);
2258 /// parseCondCode - Parse a Condition Code operand.
2259 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2260 bool invertCondCode) {
2262 const AsmToken &Tok = Parser.getTok();
2263 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2265 StringRef Cond = Tok.getString();
2266 unsigned CC = parseCondCodeString(Cond);
2267 if (CC == ARM64CC::Invalid)
2268 return TokError("invalid condition code");
2269 Parser.Lex(); // Eat identifier token.
2272 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2274 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2276 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2280 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2281 /// them if present.
2282 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2283 const AsmToken &Tok = Parser.getTok();
2284 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2285 .Case("lsl", ARM64_AM::LSL)
2286 .Case("lsr", ARM64_AM::LSR)
2287 .Case("asr", ARM64_AM::ASR)
2288 .Case("ror", ARM64_AM::ROR)
2289 .Case("msl", ARM64_AM::MSL)
2290 .Case("LSL", ARM64_AM::LSL)
2291 .Case("LSR", ARM64_AM::LSR)
2292 .Case("ASR", ARM64_AM::ASR)
2293 .Case("ROR", ARM64_AM::ROR)
2294 .Case("MSL", ARM64_AM::MSL)
2295 .Default(ARM64_AM::InvalidShift);
2296 if (ShOp == ARM64_AM::InvalidShift)
2299 SMLoc S = Tok.getLoc();
2302 // We expect a number here.
2303 bool Hash = getLexer().is(AsmToken::Hash);
2304 if (!Hash && getLexer().isNot(AsmToken::Integer))
2305 return TokError("immediate value expected for shifter operand");
2308 Parser.Lex(); // Eat the '#'.
2310 SMLoc ExprLoc = getLoc();
2311 const MCExpr *ImmVal;
2312 if (getParser().parseExpression(ImmVal))
2315 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2317 return TokError("immediate value expected for shifter operand");
2319 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2320 return Error(ExprLoc, "immediate value too large for shifter operand");
2322 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2324 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2328 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2329 /// them if present.
2330 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2331 const AsmToken &Tok = Parser.getTok();
2332 ARM64_AM::ExtendType ExtOp =
2333 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2334 .Case("uxtb", ARM64_AM::UXTB)
2335 .Case("uxth", ARM64_AM::UXTH)
2336 .Case("uxtw", ARM64_AM::UXTW)
2337 .Case("uxtx", ARM64_AM::UXTX)
2338 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2339 .Case("sxtb", ARM64_AM::SXTB)
2340 .Case("sxth", ARM64_AM::SXTH)
2341 .Case("sxtw", ARM64_AM::SXTW)
2342 .Case("sxtx", ARM64_AM::SXTX)
2343 .Case("UXTB", ARM64_AM::UXTB)
2344 .Case("UXTH", ARM64_AM::UXTH)
2345 .Case("UXTW", ARM64_AM::UXTW)
2346 .Case("UXTX", ARM64_AM::UXTX)
2347 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2348 .Case("SXTB", ARM64_AM::SXTB)
2349 .Case("SXTH", ARM64_AM::SXTH)
2350 .Case("SXTW", ARM64_AM::SXTW)
2351 .Case("SXTX", ARM64_AM::SXTX)
2352 .Default(ARM64_AM::InvalidExtend);
2353 if (ExtOp == ARM64_AM::InvalidExtend)
2356 SMLoc S = Tok.getLoc();
2359 if (getLexer().is(AsmToken::EndOfStatement) ||
2360 getLexer().is(AsmToken::Comma)) {
2361 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2363 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2367 bool Hash = getLexer().is(AsmToken::Hash);
2368 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2369 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2371 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2376 Parser.Lex(); // Eat the '#'.
2378 const MCExpr *ImmVal;
2379 if (getParser().parseExpression(ImmVal))
2382 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2384 return TokError("immediate value expected for extend operand");
2386 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2388 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2392 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2393 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2394 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2395 OperandVector &Operands) {
2396 if (Name.find('.') != StringRef::npos)
2397 return TokError("invalid operand");
2401 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2403 const AsmToken &Tok = Parser.getTok();
2404 StringRef Op = Tok.getString();
2405 SMLoc S = Tok.getLoc();
2407 const MCExpr *Expr = nullptr;
2409 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2411 Expr = MCConstantExpr::Create(op1, getContext()); \
2412 Operands.push_back( \
2413 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2414 Operands.push_back( \
2415 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2416 Operands.push_back( \
2417 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2418 Expr = MCConstantExpr::Create(op2, getContext()); \
2419 Operands.push_back( \
2420 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2423 if (Mnemonic == "ic") {
2424 if (!Op.compare_lower("ialluis")) {
2425 // SYS #0, C7, C1, #0
2426 SYS_ALIAS(0, 7, 1, 0);
2427 } else if (!Op.compare_lower("iallu")) {
2428 // SYS #0, C7, C5, #0
2429 SYS_ALIAS(0, 7, 5, 0);
2430 } else if (!Op.compare_lower("ivau")) {
2431 // SYS #3, C7, C5, #1
2432 SYS_ALIAS(3, 7, 5, 1);
2434 return TokError("invalid operand for IC instruction");
2436 } else if (Mnemonic == "dc") {
2437 if (!Op.compare_lower("zva")) {
2438 // SYS #3, C7, C4, #1
2439 SYS_ALIAS(3, 7, 4, 1);
2440 } else if (!Op.compare_lower("ivac")) {
2441 // SYS #3, C7, C6, #1
2442 SYS_ALIAS(0, 7, 6, 1);
2443 } else if (!Op.compare_lower("isw")) {
2444 // SYS #0, C7, C6, #2
2445 SYS_ALIAS(0, 7, 6, 2);
2446 } else if (!Op.compare_lower("cvac")) {
2447 // SYS #3, C7, C10, #1
2448 SYS_ALIAS(3, 7, 10, 1);
2449 } else if (!Op.compare_lower("csw")) {
2450 // SYS #0, C7, C10, #2
2451 SYS_ALIAS(0, 7, 10, 2);
2452 } else if (!Op.compare_lower("cvau")) {
2453 // SYS #3, C7, C11, #1
2454 SYS_ALIAS(3, 7, 11, 1);
2455 } else if (!Op.compare_lower("civac")) {
2456 // SYS #3, C7, C14, #1
2457 SYS_ALIAS(3, 7, 14, 1);
2458 } else if (!Op.compare_lower("cisw")) {
2459 // SYS #0, C7, C14, #2
2460 SYS_ALIAS(0, 7, 14, 2);
2462 return TokError("invalid operand for DC instruction");
2464 } else if (Mnemonic == "at") {
2465 if (!Op.compare_lower("s1e1r")) {
2466 // SYS #0, C7, C8, #0
2467 SYS_ALIAS(0, 7, 8, 0);
2468 } else if (!Op.compare_lower("s1e2r")) {
2469 // SYS #4, C7, C8, #0
2470 SYS_ALIAS(4, 7, 8, 0);
2471 } else if (!Op.compare_lower("s1e3r")) {
2472 // SYS #6, C7, C8, #0
2473 SYS_ALIAS(6, 7, 8, 0);
2474 } else if (!Op.compare_lower("s1e1w")) {
2475 // SYS #0, C7, C8, #1
2476 SYS_ALIAS(0, 7, 8, 1);
2477 } else if (!Op.compare_lower("s1e2w")) {
2478 // SYS #4, C7, C8, #1
2479 SYS_ALIAS(4, 7, 8, 1);
2480 } else if (!Op.compare_lower("s1e3w")) {
2481 // SYS #6, C7, C8, #1
2482 SYS_ALIAS(6, 7, 8, 1);
2483 } else if (!Op.compare_lower("s1e0r")) {
2484 // SYS #0, C7, C8, #3
2485 SYS_ALIAS(0, 7, 8, 2);
2486 } else if (!Op.compare_lower("s1e0w")) {
2487 // SYS #0, C7, C8, #3
2488 SYS_ALIAS(0, 7, 8, 3);
2489 } else if (!Op.compare_lower("s12e1r")) {
2490 // SYS #4, C7, C8, #4
2491 SYS_ALIAS(4, 7, 8, 4);
2492 } else if (!Op.compare_lower("s12e1w")) {
2493 // SYS #4, C7, C8, #5
2494 SYS_ALIAS(4, 7, 8, 5);
2495 } else if (!Op.compare_lower("s12e0r")) {
2496 // SYS #4, C7, C8, #6
2497 SYS_ALIAS(4, 7, 8, 6);
2498 } else if (!Op.compare_lower("s12e0w")) {
2499 // SYS #4, C7, C8, #7
2500 SYS_ALIAS(4, 7, 8, 7);
2502 return TokError("invalid operand for AT instruction");
2504 } else if (Mnemonic == "tlbi") {
2505 if (!Op.compare_lower("vmalle1is")) {
2506 // SYS #0, C8, C3, #0
2507 SYS_ALIAS(0, 8, 3, 0);
2508 } else if (!Op.compare_lower("alle2is")) {
2509 // SYS #4, C8, C3, #0
2510 SYS_ALIAS(4, 8, 3, 0);
2511 } else if (!Op.compare_lower("alle3is")) {
2512 // SYS #6, C8, C3, #0
2513 SYS_ALIAS(6, 8, 3, 0);
2514 } else if (!Op.compare_lower("vae1is")) {
2515 // SYS #0, C8, C3, #1
2516 SYS_ALIAS(0, 8, 3, 1);
2517 } else if (!Op.compare_lower("vae2is")) {
2518 // SYS #4, C8, C3, #1
2519 SYS_ALIAS(4, 8, 3, 1);
2520 } else if (!Op.compare_lower("vae3is")) {
2521 // SYS #6, C8, C3, #1
2522 SYS_ALIAS(6, 8, 3, 1);
2523 } else if (!Op.compare_lower("aside1is")) {
2524 // SYS #0, C8, C3, #2
2525 SYS_ALIAS(0, 8, 3, 2);
2526 } else if (!Op.compare_lower("vaae1is")) {
2527 // SYS #0, C8, C3, #3
2528 SYS_ALIAS(0, 8, 3, 3);
2529 } else if (!Op.compare_lower("alle1is")) {
2530 // SYS #4, C8, C3, #4
2531 SYS_ALIAS(4, 8, 3, 4);
2532 } else if (!Op.compare_lower("vale1is")) {
2533 // SYS #0, C8, C3, #5
2534 SYS_ALIAS(0, 8, 3, 5);
2535 } else if (!Op.compare_lower("vaale1is")) {
2536 // SYS #0, C8, C3, #7
2537 SYS_ALIAS(0, 8, 3, 7);
2538 } else if (!Op.compare_lower("vmalle1")) {
2539 // SYS #0, C8, C7, #0
2540 SYS_ALIAS(0, 8, 7, 0);
2541 } else if (!Op.compare_lower("alle2")) {
2542 // SYS #4, C8, C7, #0
2543 SYS_ALIAS(4, 8, 7, 0);
2544 } else if (!Op.compare_lower("vale2is")) {
2545 // SYS #4, C8, C3, #5
2546 SYS_ALIAS(4, 8, 3, 5);
2547 } else if (!Op.compare_lower("vale3is")) {
2548 // SYS #6, C8, C3, #5
2549 SYS_ALIAS(6, 8, 3, 5);
2550 } else if (!Op.compare_lower("alle3")) {
2551 // SYS #6, C8, C7, #0
2552 SYS_ALIAS(6, 8, 7, 0);
2553 } else if (!Op.compare_lower("vae1")) {
2554 // SYS #0, C8, C7, #1
2555 SYS_ALIAS(0, 8, 7, 1);
2556 } else if (!Op.compare_lower("vae2")) {
2557 // SYS #4, C8, C7, #1
2558 SYS_ALIAS(4, 8, 7, 1);
2559 } else if (!Op.compare_lower("vae3")) {
2560 // SYS #6, C8, C7, #1
2561 SYS_ALIAS(6, 8, 7, 1);
2562 } else if (!Op.compare_lower("aside1")) {
2563 // SYS #0, C8, C7, #2
2564 SYS_ALIAS(0, 8, 7, 2);
2565 } else if (!Op.compare_lower("vaae1")) {
2566 // SYS #0, C8, C7, #3
2567 SYS_ALIAS(0, 8, 7, 3);
2568 } else if (!Op.compare_lower("alle1")) {
2569 // SYS #4, C8, C7, #4
2570 SYS_ALIAS(4, 8, 7, 4);
2571 } else if (!Op.compare_lower("vale1")) {
2572 // SYS #0, C8, C7, #5
2573 SYS_ALIAS(0, 8, 7, 5);
2574 } else if (!Op.compare_lower("vale2")) {
2575 // SYS #4, C8, C7, #5
2576 SYS_ALIAS(4, 8, 7, 5);
2577 } else if (!Op.compare_lower("vale3")) {
2578 // SYS #6, C8, C7, #5
2579 SYS_ALIAS(6, 8, 7, 5);
2580 } else if (!Op.compare_lower("vaale1")) {
2581 // SYS #0, C8, C7, #7
2582 SYS_ALIAS(0, 8, 7, 7);
2583 } else if (!Op.compare_lower("ipas2e1")) {
2584 // SYS #4, C8, C4, #1
2585 SYS_ALIAS(4, 8, 4, 1);
2586 } else if (!Op.compare_lower("ipas2le1")) {
2587 // SYS #4, C8, C4, #5
2588 SYS_ALIAS(4, 8, 4, 5);
2589 } else if (!Op.compare_lower("ipas2e1is")) {
2590 // SYS #4, C8, C4, #1
2591 SYS_ALIAS(4, 8, 0, 1);
2592 } else if (!Op.compare_lower("ipas2le1is")) {
2593 // SYS #4, C8, C4, #5
2594 SYS_ALIAS(4, 8, 0, 5);
2595 } else if (!Op.compare_lower("vmalls12e1")) {
2596 // SYS #4, C8, C7, #6
2597 SYS_ALIAS(4, 8, 7, 6);
2598 } else if (!Op.compare_lower("vmalls12e1is")) {
2599 // SYS #4, C8, C3, #6
2600 SYS_ALIAS(4, 8, 3, 6);
2602 return TokError("invalid operand for TLBI instruction");
2608 Parser.Lex(); // Eat operand.
2610 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2611 bool HasRegister = false;
2613 // Check for the optional register operand.
2614 if (getLexer().is(AsmToken::Comma)) {
2615 Parser.Lex(); // Eat comma.
2617 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2618 return TokError("expected register operand");
2623 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2624 Parser.eatToEndOfStatement();
2625 return TokError("unexpected token in argument list");
2628 if (ExpectRegister && !HasRegister) {
2629 return TokError("specified " + Mnemonic + " op requires a register");
2631 else if (!ExpectRegister && HasRegister) {
2632 return TokError("specified " + Mnemonic + " op does not use a register");
2635 Parser.Lex(); // Consume the EndOfStatement
2639 ARM64AsmParser::OperandMatchResultTy
2640 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2641 const AsmToken &Tok = Parser.getTok();
2643 // Can be either a #imm style literal or an option name
2644 bool Hash = Tok.is(AsmToken::Hash);
2645 if (Hash || Tok.is(AsmToken::Integer)) {
2646 // Immediate operand.
2648 Parser.Lex(); // Eat the '#'
2649 const MCExpr *ImmVal;
2650 SMLoc ExprLoc = getLoc();
2651 if (getParser().parseExpression(ImmVal))
2652 return MatchOperand_ParseFail;
2653 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2655 Error(ExprLoc, "immediate value expected for barrier operand");
2656 return MatchOperand_ParseFail;
2658 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2659 Error(ExprLoc, "barrier operand out of range");
2660 return MatchOperand_ParseFail;
2663 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2664 return MatchOperand_Success;
2667 if (Tok.isNot(AsmToken::Identifier)) {
2668 TokError("invalid operand for instruction");
2669 return MatchOperand_ParseFail;
2673 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2675 TokError("invalid barrier option name");
2676 return MatchOperand_ParseFail;
2679 // The only valid named option for ISB is 'sy'
2680 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2681 TokError("'sy' or #imm operand expected");
2682 return MatchOperand_ParseFail;
2685 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2686 Parser.Lex(); // Consume the option
2688 return MatchOperand_Success;
2691 ARM64AsmParser::OperandMatchResultTy
2692 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2693 const AsmToken &Tok = Parser.getTok();
2695 if (Tok.isNot(AsmToken::Identifier))
2696 return MatchOperand_NoMatch;
2698 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2699 STI.getFeatureBits(), getContext()));
2700 Parser.Lex(); // Eat identifier
2702 return MatchOperand_Success;
2705 /// tryParseVectorRegister - Parse a vector register operand.
2706 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2707 if (Parser.getTok().isNot(AsmToken::Identifier))
2711 // Check for a vector register specifier first.
2713 int64_t Reg = tryMatchVectorRegister(Kind, false);
2717 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2718 // If there was an explicit qualifier, that goes on as a literal text
2721 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2723 // If there is an index specifier following the register, parse that too.
2724 if (Parser.getTok().is(AsmToken::LBrac)) {
2725 SMLoc SIdx = getLoc();
2726 Parser.Lex(); // Eat left bracket token.
2728 const MCExpr *ImmVal;
2729 if (getParser().parseExpression(ImmVal))
2731 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2733 TokError("immediate value expected for vector index");
2738 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2739 Error(E, "']' expected");
2743 Parser.Lex(); // Eat right bracket token.
2745 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2752 /// parseRegister - Parse a non-vector register operand.
2753 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2755 // Try for a vector register.
2756 if (!tryParseVectorRegister(Operands))
2759 // Try for a scalar register.
2760 int64_t Reg = tryParseRegister();
2764 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2766 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2767 // as a string token in the instruction itself.
2768 if (getLexer().getKind() == AsmToken::LBrac) {
2769 SMLoc LBracS = getLoc();
2771 const AsmToken &Tok = Parser.getTok();
2772 if (Tok.is(AsmToken::Integer)) {
2773 SMLoc IntS = getLoc();
2774 int64_t Val = Tok.getIntVal();
2777 if (getLexer().getKind() == AsmToken::RBrac) {
2778 SMLoc RBracS = getLoc();
2781 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2783 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2785 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2795 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2796 /// do not allow base regisrer writeback modes,
2797 /// or those that handle writeback separately from
2798 /// the memory operand (like the AdvSIMD ldX/stX
2800 ARM64AsmParser::OperandMatchResultTy
2801 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2802 if (Parser.getTok().isNot(AsmToken::LBrac))
2803 return MatchOperand_NoMatch;
2805 Parser.Lex(); // Eat left bracket token.
2807 const AsmToken &BaseRegTok = Parser.getTok();
2808 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2809 Error(BaseRegTok.getLoc(), "register expected");
2810 return MatchOperand_ParseFail;
2813 int64_t Reg = tryParseRegister();
2815 Error(BaseRegTok.getLoc(), "register expected");
2816 return MatchOperand_ParseFail;
2820 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2821 Error(E, "']' expected");
2822 return MatchOperand_ParseFail;
2825 Parser.Lex(); // Eat right bracket token.
2827 Operands.push_back(ARM64Operand::CreateMem(Reg, nullptr, S, E, E, getContext()));
2828 return MatchOperand_Success;
2831 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2832 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2833 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2835 Parser.Lex(); // Eat left bracket token.
2837 const AsmToken &BaseRegTok = Parser.getTok();
2838 if (BaseRegTok.isNot(AsmToken::Identifier))
2839 return Error(BaseRegTok.getLoc(), "register expected");
2841 int64_t Reg = tryParseRegister();
2843 return Error(BaseRegTok.getLoc(), "register expected");
2845 // If there is an offset expression, parse it.
2846 const MCExpr *OffsetExpr = nullptr;
2848 if (Parser.getTok().is(AsmToken::Comma)) {
2849 Parser.Lex(); // Eat the comma.
2850 OffsetLoc = getLoc();
2853 const AsmToken &OffsetRegTok = Parser.getTok();
2854 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2856 // Default shift is LSL, with an omitted shift. We use the third bit of
2857 // the extend value to indicate presence/omission of the immediate offset.
2858 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
2859 int64_t ShiftVal = 0;
2860 bool ExplicitShift = false;
2862 if (Parser.getTok().is(AsmToken::Comma)) {
2863 // Embedded extend operand.
2864 Parser.Lex(); // Eat the comma
2866 SMLoc ExtLoc = getLoc();
2867 const AsmToken &Tok = Parser.getTok();
2868 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2869 .Case("uxtw", ARM64_AM::UXTW)
2870 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2871 .Case("sxtw", ARM64_AM::SXTW)
2872 .Case("sxtx", ARM64_AM::SXTX)
2873 .Case("UXTW", ARM64_AM::UXTW)
2874 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2875 .Case("SXTW", ARM64_AM::SXTW)
2876 .Case("SXTX", ARM64_AM::SXTX)
2877 .Default(ARM64_AM::InvalidExtend);
2878 if (ExtOp == ARM64_AM::InvalidExtend)
2879 return Error(ExtLoc, "expected valid extend operation");
2881 Parser.Lex(); // Eat the extend op.
2883 // A 32-bit offset register is only valid for [SU]/XTW extend
2885 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
2886 if (ExtOp != ARM64_AM::UXTW &&
2887 ExtOp != ARM64_AM::SXTW)
2888 return Error(ExtLoc, "32-bit general purpose offset register "
2889 "requires sxtw or uxtw extend");
2890 } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
2892 return Error(OffsetLoc,
2893 "64-bit general purpose offset register expected");
2895 bool Hash = getLexer().is(AsmToken::Hash);
2896 if (getLexer().is(AsmToken::RBrac)) {
2897 // No immediate operand.
2898 if (ExtOp == ARM64_AM::UXTX)
2899 return Error(ExtLoc, "LSL extend requires immediate operand");
2900 } else if (Hash || getLexer().is(AsmToken::Integer)) {
2901 // Immediate operand.
2903 Parser.Lex(); // Eat the '#'
2904 const MCExpr *ImmVal;
2905 SMLoc ExprLoc = getLoc();
2906 if (getParser().parseExpression(ImmVal))
2908 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2910 return TokError("immediate value expected for extend operand");
2912 ExplicitShift = true;
2913 ShiftVal = MCE->getValue();
2914 if (ShiftVal < 0 || ShiftVal > 4)
2915 return Error(ExprLoc, "immediate operand out of range");
2917 return Error(getLoc(), "expected immediate operand");
2920 if (Parser.getTok().isNot(AsmToken::RBrac))
2921 return Error(getLoc(), "']' expected");
2923 Parser.Lex(); // Eat right bracket token.
2926 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
2927 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
2930 // Immediate expressions.
2931 } else if (Parser.getTok().is(AsmToken::Hash) ||
2932 Parser.getTok().is(AsmToken::Colon) ||
2933 Parser.getTok().is(AsmToken::Integer)) {
2934 if (Parser.getTok().is(AsmToken::Hash))
2935 Parser.Lex(); // Eat hash token.
2937 if (parseSymbolicImmVal(OffsetExpr))
2940 // FIXME: We really should make sure that we're dealing with a LDR/STR
2941 // instruction that can legally have a symbolic expression here.
2942 // Symbol reference.
2943 if (Parser.getTok().isNot(AsmToken::Identifier) &&
2944 Parser.getTok().isNot(AsmToken::String))
2945 return Error(getLoc(), "identifier or immediate expression expected");
2946 if (getParser().parseExpression(OffsetExpr))
2948 // If this is a plain ref, Make sure a legal variant kind was specified.
2949 // Otherwise, it's a more complicated expression and we have to just
2950 // assume it's OK and let the relocation stuff puke if it's not.
2951 ARM64MCExpr::VariantKind ELFRefKind;
2952 MCSymbolRefExpr::VariantKind DarwinRefKind;
2954 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
2956 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
2957 "ELF symbol modifiers not supported here yet");
2959 switch (DarwinRefKind) {
2961 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
2962 case MCSymbolRefExpr::VK_GOTPAGEOFF:
2963 case MCSymbolRefExpr::VK_PAGEOFF:
2964 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
2965 // These are what we're expecting.
2973 if (Parser.getTok().isNot(AsmToken::RBrac))
2974 return Error(E, "']' expected");
2976 Parser.Lex(); // Eat right bracket token.
2978 // Create the memory operand.
2980 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
2982 // Check for a '!', indicating pre-indexed addressing with writeback.
2983 if (Parser.getTok().is(AsmToken::Exclaim)) {
2984 // There needs to have been an immediate or wback doesn't make sense.
2986 return Error(E, "missing offset for pre-indexed addressing");
2987 // Pre-indexed with writeback must have a constant expression for the
2988 // offset. FIXME: Theoretically, we'd like to allow fixups so long
2989 // as they don't require a relocation.
2990 if (!isa<MCConstantExpr>(OffsetExpr))
2991 return Error(OffsetLoc, "constant immediate expression expected");
2993 // Create the Token operand for the '!'.
2994 Operands.push_back(ARM64Operand::CreateToken(
2995 "!", false, Parser.getTok().getLoc(), getContext()));
2996 Parser.Lex(); // Eat the '!' token.
3002 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
3003 bool HasELFModifier = false;
3004 ARM64MCExpr::VariantKind RefKind;
3006 if (Parser.getTok().is(AsmToken::Colon)) {
3007 Parser.Lex(); // Eat ':"
3008 HasELFModifier = true;
3010 if (Parser.getTok().isNot(AsmToken::Identifier)) {
3011 Error(Parser.getTok().getLoc(),
3012 "expect relocation specifier in operand after ':'");
3016 std::string LowerCase = Parser.getTok().getIdentifier().lower();
3017 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
3018 .Case("lo12", ARM64MCExpr::VK_LO12)
3019 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
3020 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
3021 .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
3022 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
3023 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
3024 .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
3025 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
3026 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
3027 .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
3028 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
3029 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
3030 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
3031 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
3032 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
3033 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
3034 .Case("dtprel_hi12", ARM64MCExpr::VK_DTPREL_HI12)
3035 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
3036 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
3037 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
3038 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
3039 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
3040 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
3041 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
3042 .Case("tprel_hi12", ARM64MCExpr::VK_TPREL_HI12)
3043 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
3044 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
3045 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
3046 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
3047 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
3048 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
3049 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
3050 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
3051 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
3052 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
3053 .Default(ARM64MCExpr::VK_INVALID);
3055 if (RefKind == ARM64MCExpr::VK_INVALID) {
3056 Error(Parser.getTok().getLoc(),
3057 "expect relocation specifier in operand after ':'");
3061 Parser.Lex(); // Eat identifier
3063 if (Parser.getTok().isNot(AsmToken::Colon)) {
3064 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
3067 Parser.Lex(); // Eat ':'
3070 if (getParser().parseExpression(ImmVal))
3074 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
3079 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
3080 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
3081 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
3083 Parser.Lex(); // Eat left bracket token.
3085 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
3088 int64_t PrevReg = FirstReg;
3091 if (Parser.getTok().is(AsmToken::Minus)) {
3092 Parser.Lex(); // Eat the minus.
3094 SMLoc Loc = getLoc();
3096 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3099 // Any Kind suffices must match on all regs in the list.
3100 if (Kind != NextKind)
3101 return Error(Loc, "mismatched register size suffix");
3103 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3105 if (Space == 0 || Space > 3) {
3106 return Error(Loc, "invalid number of vectors");
3112 while (Parser.getTok().is(AsmToken::Comma)) {
3113 Parser.Lex(); // Eat the comma token.
3115 SMLoc Loc = getLoc();
3117 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3120 // Any Kind suffices must match on all regs in the list.
3121 if (Kind != NextKind)
3122 return Error(Loc, "mismatched register size suffix");
3124 // Registers must be incremental (with wraparound at 31)
3125 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3126 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3127 return Error(Loc, "registers must be sequential");
3134 if (Parser.getTok().is(AsmToken::EndOfStatement))
3135 Error(getLoc(), "'}' expected");
3136 Parser.Lex(); // Eat the '}' token.
3138 unsigned NumElements = 0;
3139 char ElementKind = 0;
3141 parseValidVectorKind(Kind, NumElements, ElementKind);
3143 Operands.push_back(ARM64Operand::CreateVectorList(
3144 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3146 // If there is an index specifier following the list, parse that too.
3147 if (Parser.getTok().is(AsmToken::LBrac)) {
3148 SMLoc SIdx = getLoc();
3149 Parser.Lex(); // Eat left bracket token.
3151 const MCExpr *ImmVal;
3152 if (getParser().parseExpression(ImmVal))
3154 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3156 TokError("immediate value expected for vector index");
3161 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3162 Error(E, "']' expected");
3166 Parser.Lex(); // Eat right bracket token.
3168 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3174 /// parseOperand - Parse a arm instruction operand. For now this parses the
3175 /// operand regardless of the mnemonic.
3176 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3177 bool invertCondCode) {
3178 // Check if the current operand has a custom associated parser, if so, try to
3179 // custom parse the operand, or fallback to the general approach.
3180 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3181 if (ResTy == MatchOperand_Success)
3183 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3184 // there was a match, but an error occurred, in which case, just return that
3185 // the operand parsing failed.
3186 if (ResTy == MatchOperand_ParseFail)
3189 // Nothing custom, so do general case parsing.
3191 switch (getLexer().getKind()) {
3195 if (parseSymbolicImmVal(Expr))
3196 return Error(S, "invalid operand");
3198 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3199 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3202 case AsmToken::LBrac:
3203 return parseMemory(Operands);
3204 case AsmToken::LCurly:
3205 return parseVectorList(Operands);
3206 case AsmToken::Identifier: {
3207 // If we're expecting a Condition Code operand, then just parse that.
3209 return parseCondCode(Operands, invertCondCode);
3211 // If it's a register name, parse it.
3212 if (!parseRegister(Operands))
3215 // This could be an optional "shift" operand.
3216 if (!parseOptionalShift(Operands))
3219 // Or maybe it could be an optional "extend" operand.
3220 if (!parseOptionalExtend(Operands))
3223 // This was not a register so parse other operands that start with an
3224 // identifier (like labels) as expressions and create them as immediates.
3225 const MCExpr *IdVal;
3227 if (getParser().parseExpression(IdVal))
3230 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3231 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3234 case AsmToken::Integer:
3235 case AsmToken::Real:
3236 case AsmToken::Hash: {
3237 // #42 -> immediate.
3239 if (getLexer().is(AsmToken::Hash))
3242 // The only Real that should come through here is a literal #0.0 for
3243 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3244 // so convert the value.
3245 const AsmToken &Tok = Parser.getTok();
3246 if (Tok.is(AsmToken::Real)) {
3247 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3248 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3250 (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
3251 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
3252 Mnemonic != "fcmlt"))
3253 return TokError("unexpected floating point literal");
3254 Parser.Lex(); // Eat the token.
3257 ARM64Operand::CreateToken("#0", false, S, getContext()));
3259 ARM64Operand::CreateToken(".0", false, S, getContext()));
3263 const MCExpr *ImmVal;
3264 if (parseSymbolicImmVal(ImmVal))
3267 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3268 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3274 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3276 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3277 StringRef Name, SMLoc NameLoc,
3278 OperandVector &Operands) {
3279 Name = StringSwitch<StringRef>(Name.lower())
3280 .Case("beq", "b.eq")
3281 .Case("bne", "b.ne")
3282 .Case("bhs", "b.hs")
3283 .Case("bcs", "b.cs")
3284 .Case("blo", "b.lo")
3285 .Case("bcc", "b.cc")
3286 .Case("bmi", "b.mi")
3287 .Case("bpl", "b.pl")
3288 .Case("bvs", "b.vs")
3289 .Case("bvc", "b.vc")
3290 .Case("bhi", "b.hi")
3291 .Case("bls", "b.ls")
3292 .Case("bge", "b.ge")
3293 .Case("blt", "b.lt")
3294 .Case("bgt", "b.gt")
3295 .Case("ble", "b.le")
3296 .Case("bal", "b.al")
3297 .Case("bnv", "b.nv")
3300 // Create the leading tokens for the mnemonic, split by '.' characters.
3301 size_t Start = 0, Next = Name.find('.');
3302 StringRef Head = Name.slice(Start, Next);
3304 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3305 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3306 return parseSysAlias(Head, NameLoc, Operands);
3309 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3312 // Handle condition codes for a branch mnemonic
3313 if (Head == "b" && Next != StringRef::npos) {
3315 Next = Name.find('.', Start + 1);
3316 Head = Name.slice(Start + 1, Next);
3318 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3319 (Head.data() - Name.data()));
3320 unsigned CC = parseCondCodeString(Head);
3321 if (CC == ARM64CC::Invalid)
3322 return Error(SuffixLoc, "invalid condition code");
3323 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3325 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3328 // Add the remaining tokens in the mnemonic.
3329 while (Next != StringRef::npos) {
3331 Next = Name.find('.', Start + 1);
3332 Head = Name.slice(Start, Next);
3333 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3334 (Head.data() - Name.data()) + 1);
3336 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3339 // Conditional compare instructions have a Condition Code operand, which needs
3340 // to be parsed and an immediate operand created.
3341 bool condCodeFourthOperand =
3342 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3343 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3344 Head == "csinc" || Head == "csinv" || Head == "csneg");
3346 // These instructions are aliases to some of the conditional select
3347 // instructions. However, the condition code is inverted in the aliased
3350 // FIXME: Is this the correct way to handle these? Or should the parser
3351 // generate the aliased instructions directly?
3352 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3353 bool condCodeThirdOperand =
3354 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3356 // Read the remaining operands.
3357 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3358 // Read the first operand.
3359 if (parseOperand(Operands, false, false)) {
3360 Parser.eatToEndOfStatement();
3365 while (getLexer().is(AsmToken::Comma)) {
3366 Parser.Lex(); // Eat the comma.
3368 // Parse and remember the operand.
3369 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3370 (N == 3 && condCodeThirdOperand) ||
3371 (N == 2 && condCodeSecondOperand),
3372 condCodeSecondOperand || condCodeThirdOperand)) {
3373 Parser.eatToEndOfStatement();
3381 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3382 SMLoc Loc = Parser.getTok().getLoc();
3383 Parser.eatToEndOfStatement();
3384 return Error(Loc, "unexpected token in argument list");
3387 Parser.Lex(); // Consume the EndOfStatement
3391 // FIXME: This entire function is a giant hack to provide us with decent
3392 // operand range validation/diagnostics until TableGen/MC can be extended
3393 // to support autogeneration of this kind of validation.
3394 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3395 SmallVectorImpl<SMLoc> &Loc) {
3396 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3397 // Check for indexed addressing modes w/ the base register being the
3398 // same as a destination/source register or pair load where
3399 // the Rt == Rt2. All of those are undefined behaviour.
3400 switch (Inst.getOpcode()) {
3401 case ARM64::LDPSWpre:
3402 case ARM64::LDPWpost:
3403 case ARM64::LDPWpre:
3404 case ARM64::LDPXpost:
3405 case ARM64::LDPXpre: {
3406 unsigned Rt = Inst.getOperand(0).getReg();
3407 unsigned Rt2 = Inst.getOperand(1).getReg();
3408 unsigned Rn = Inst.getOperand(2).getReg();
3409 if (RI->isSubRegisterEq(Rn, Rt))
3410 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3411 "is also a destination");
3412 if (RI->isSubRegisterEq(Rn, Rt2))
3413 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3414 "is also a destination");
3417 case ARM64::LDPDpost:
3418 case ARM64::LDPDpre:
3419 case ARM64::LDPQpost:
3420 case ARM64::LDPQpre:
3421 case ARM64::LDPSpost:
3422 case ARM64::LDPSpre:
3423 case ARM64::LDPSWpost:
3429 case ARM64::LDPXi: {
3430 unsigned Rt = Inst.getOperand(0).getReg();
3431 unsigned Rt2 = Inst.getOperand(1).getReg();
3433 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3436 case ARM64::STPDpost:
3437 case ARM64::STPDpre:
3438 case ARM64::STPQpost:
3439 case ARM64::STPQpre:
3440 case ARM64::STPSpost:
3441 case ARM64::STPSpre:
3442 case ARM64::STPWpost:
3443 case ARM64::STPWpre:
3444 case ARM64::STPXpost:
3445 case ARM64::STPXpre: {
3446 unsigned Rt = Inst.getOperand(0).getReg();
3447 unsigned Rt2 = Inst.getOperand(1).getReg();
3448 unsigned Rn = Inst.getOperand(2).getReg();
3449 if (RI->isSubRegisterEq(Rn, Rt))
3450 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3451 "is also a source");
3452 if (RI->isSubRegisterEq(Rn, Rt2))
3453 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3454 "is also a source");
3457 case ARM64::LDRBBpre:
3458 case ARM64::LDRBpre:
3459 case ARM64::LDRHHpre:
3460 case ARM64::LDRHpre:
3461 case ARM64::LDRSBWpre:
3462 case ARM64::LDRSBXpre:
3463 case ARM64::LDRSHWpre:
3464 case ARM64::LDRSHXpre:
3465 case ARM64::LDRSWpre:
3466 case ARM64::LDRWpre:
3467 case ARM64::LDRXpre:
3468 case ARM64::LDRBBpost:
3469 case ARM64::LDRBpost:
3470 case ARM64::LDRHHpost:
3471 case ARM64::LDRHpost:
3472 case ARM64::LDRSBWpost:
3473 case ARM64::LDRSBXpost:
3474 case ARM64::LDRSHWpost:
3475 case ARM64::LDRSHXpost:
3476 case ARM64::LDRSWpost:
3477 case ARM64::LDRWpost:
3478 case ARM64::LDRXpost: {
3479 unsigned Rt = Inst.getOperand(0).getReg();
3480 unsigned Rn = Inst.getOperand(1).getReg();
3481 if (RI->isSubRegisterEq(Rn, Rt))
3482 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3483 "is also a source");
3486 case ARM64::STRBBpost:
3487 case ARM64::STRBpost:
3488 case ARM64::STRHHpost:
3489 case ARM64::STRHpost:
3490 case ARM64::STRWpost:
3491 case ARM64::STRXpost:
3492 case ARM64::STRBBpre:
3493 case ARM64::STRBpre:
3494 case ARM64::STRHHpre:
3495 case ARM64::STRHpre:
3496 case ARM64::STRWpre:
3497 case ARM64::STRXpre: {
3498 unsigned Rt = Inst.getOperand(0).getReg();
3499 unsigned Rn = Inst.getOperand(1).getReg();
3500 if (RI->isSubRegisterEq(Rn, Rt))
3501 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3502 "is also a source");
3507 // Now check immediate ranges. Separate from the above as there is overlap
3508 // in the instructions being checked and this keeps the nested conditionals
3510 switch (Inst.getOpcode()) {
3512 case ARM64::ANDSWrs:
3514 case ARM64::ORRWrs: {
3515 if (!Inst.getOperand(3).isImm())
3516 return Error(Loc[3], "immediate value expected");
3517 int64_t shifter = Inst.getOperand(3).getImm();
3518 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3519 if (ST == ARM64_AM::LSL && shifter > 31)
3520 return Error(Loc[3], "shift value out of range");
3523 case ARM64::ADDSWri:
3524 case ARM64::ADDSXri:
3527 case ARM64::SUBSWri:
3528 case ARM64::SUBSXri:
3530 case ARM64::SUBXri: {
3531 if (!Inst.getOperand(3).isImm())
3532 return Error(Loc[3], "immediate value expected");
3533 int64_t shifter = Inst.getOperand(3).getImm();
3534 if (shifter != 0 && shifter != 12)
3535 return Error(Loc[3], "shift value out of range");
3536 // The imm12 operand can be an expression. Validate that it's legit.
3537 // FIXME: We really, really want to allow arbitrary expressions here
3538 // and resolve the value and validate the result at fixup time, but
3539 // that's hard as we have long since lost any source information we
3540 // need to generate good diagnostics by that point.
3541 if ((Inst.getOpcode() == ARM64::ADDXri ||
3542 Inst.getOpcode() == ARM64::ADDWri) &&
3543 Inst.getOperand(2).isExpr()) {
3544 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3545 ARM64MCExpr::VariantKind ELFRefKind;
3546 MCSymbolRefExpr::VariantKind DarwinRefKind;
3548 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3549 return Error(Loc[2], "invalid immediate expression");
3552 // Note that we don't range-check the addend. It's adjusted modulo page
3553 // size when converted, so there is no "out of range" condition when using
3554 // @pageoff. Any validity checking for the value was done in the is*()
3555 // predicate function.
3556 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3557 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3558 Inst.getOpcode() == ARM64::ADDXri)
3560 if (ELFRefKind == ARM64MCExpr::VK_LO12 ||
3561 ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12 ||
3562 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3563 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3564 ELFRefKind == ARM64MCExpr::VK_TPREL_HI12 ||
3565 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3566 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3567 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3569 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3570 // @gotpageoff can only be used directly, not with an addend.
3574 // Otherwise, we're not sure, so don't allow it for now.
3575 return Error(Loc[2], "invalid immediate expression");
3578 // If it's anything but an immediate, it's not legit.
3579 if (!Inst.getOperand(2).isImm())
3580 return Error(Loc[2], "invalid immediate expression");
3581 int64_t imm = Inst.getOperand(2).getImm();
3582 if (imm > 4095 || imm < 0)
3583 return Error(Loc[2], "immediate value out of range");
3586 case ARM64::LDRBpre:
3587 case ARM64::LDRHpre:
3588 case ARM64::LDRSBWpre:
3589 case ARM64::LDRSBXpre:
3590 case ARM64::LDRSHWpre:
3591 case ARM64::LDRSHXpre:
3592 case ARM64::LDRWpre:
3593 case ARM64::LDRXpre:
3594 case ARM64::LDRSpre:
3595 case ARM64::LDRDpre:
3596 case ARM64::LDRQpre:
3597 case ARM64::STRBpre:
3598 case ARM64::STRHpre:
3599 case ARM64::STRWpre:
3600 case ARM64::STRXpre:
3601 case ARM64::STRSpre:
3602 case ARM64::STRDpre:
3603 case ARM64::STRQpre:
3604 case ARM64::LDRBpost:
3605 case ARM64::LDRHpost:
3606 case ARM64::LDRSBWpost:
3607 case ARM64::LDRSBXpost:
3608 case ARM64::LDRSHWpost:
3609 case ARM64::LDRSHXpost:
3610 case ARM64::LDRWpost:
3611 case ARM64::LDRXpost:
3612 case ARM64::LDRSpost:
3613 case ARM64::LDRDpost:
3614 case ARM64::LDRQpost:
3615 case ARM64::STRBpost:
3616 case ARM64::STRHpost:
3617 case ARM64::STRWpost:
3618 case ARM64::STRXpost:
3619 case ARM64::STRSpost:
3620 case ARM64::STRDpost:
3621 case ARM64::STRQpost:
3626 case ARM64::LDTRSHWi:
3627 case ARM64::LDTRSHXi:
3628 case ARM64::LDTRSBWi:
3629 case ARM64::LDTRSBXi:
3630 case ARM64::LDTRSWi:
3642 case ARM64::LDURSHWi:
3643 case ARM64::LDURSHXi:
3644 case ARM64::LDURSBWi:
3645 case ARM64::LDURSBXi:
3646 case ARM64::LDURSWi:
3654 case ARM64::STURBi: {
3655 // FIXME: Should accept expressions and error in fixup evaluation
3657 if (!Inst.getOperand(2).isImm())
3658 return Error(Loc[1], "immediate value expected");
3659 int64_t offset = Inst.getOperand(2).getImm();
3660 if (offset > 255 || offset < -256)
3661 return Error(Loc[1], "offset value out of range");
3666 case ARM64::LDRSWro:
3668 case ARM64::STRSro: {
3669 // FIXME: Should accept expressions and error in fixup evaluation
3671 if (!Inst.getOperand(3).isImm())
3672 return Error(Loc[1], "immediate value expected");
3673 int64_t shift = Inst.getOperand(3).getImm();
3674 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3675 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3676 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3677 return Error(Loc[1], "shift type invalid");
3686 case ARM64::STRQro: {
3687 // FIXME: Should accept expressions and error in fixup evaluation
3689 if (!Inst.getOperand(3).isImm())
3690 return Error(Loc[1], "immediate value expected");
3691 int64_t shift = Inst.getOperand(3).getImm();
3692 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3693 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3694 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3695 return Error(Loc[1], "shift type invalid");
3699 case ARM64::LDRHHro:
3700 case ARM64::LDRSHWro:
3701 case ARM64::LDRSHXro:
3703 case ARM64::STRHHro: {
3704 // FIXME: Should accept expressions and error in fixup evaluation
3706 if (!Inst.getOperand(3).isImm())
3707 return Error(Loc[1], "immediate value expected");
3708 int64_t shift = Inst.getOperand(3).getImm();
3709 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3710 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3711 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3712 return Error(Loc[1], "shift type invalid");
3716 case ARM64::LDRBBro:
3717 case ARM64::LDRSBWro:
3718 case ARM64::LDRSBXro:
3720 case ARM64::STRBBro: {
3721 // FIXME: Should accept expressions and error in fixup evaluation
3723 if (!Inst.getOperand(3).isImm())
3724 return Error(Loc[1], "immediate value expected");
3725 int64_t shift = Inst.getOperand(3).getImm();
3726 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3727 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3728 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3729 return Error(Loc[1], "shift type invalid");
3743 case ARM64::LDPWpre:
3744 case ARM64::LDPXpre:
3745 case ARM64::LDPSpre:
3746 case ARM64::LDPDpre:
3747 case ARM64::LDPQpre:
3748 case ARM64::LDPSWpre:
3749 case ARM64::STPWpre:
3750 case ARM64::STPXpre:
3751 case ARM64::STPSpre:
3752 case ARM64::STPDpre:
3753 case ARM64::STPQpre:
3754 case ARM64::LDPWpost:
3755 case ARM64::LDPXpost:
3756 case ARM64::LDPSpost:
3757 case ARM64::LDPDpost:
3758 case ARM64::LDPQpost:
3759 case ARM64::LDPSWpost:
3760 case ARM64::STPWpost:
3761 case ARM64::STPXpost:
3762 case ARM64::STPSpost:
3763 case ARM64::STPDpost:
3764 case ARM64::STPQpost:
3774 case ARM64::STNPQi: {
3775 // FIXME: Should accept expressions and error in fixup evaluation
3777 if (!Inst.getOperand(3).isImm())
3778 return Error(Loc[2], "immediate value expected");
3779 int64_t offset = Inst.getOperand(3).getImm();
3780 if (offset > 63 || offset < -64)
3781 return Error(Loc[2], "offset value out of range");
3789 static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
3790 StringRef mnemonic, uint64_t imm, unsigned shift,
3791 MCContext &Context) {
3792 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3793 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3795 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
3797 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
3798 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
3799 Op2->getEndLoc(), Context);
3801 Operands.push_back(ARM64Operand::CreateShifter(
3802 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3807 static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
3808 MCContext &Context) {
3809 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3810 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3812 ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
3814 const MCExpr *Imm = MCConstantExpr::Create(0, Context);
3815 Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
3816 Op2->getEndLoc(), Context));
3817 Operands.push_back(ARM64Operand::CreateShifter(
3818 ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3823 static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
3824 MCContext &Context) {
3825 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3826 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3828 ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
3830 // Operands[2] becomes Operands[3].
3831 Operands.push_back(Operands[2]);
3832 // And Operands[2] becomes ZR.
3833 unsigned ZeroReg = ARM64::XZR;
3834 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3835 Operands[2]->getReg()))
3836 ZeroReg = ARM64::WZR;
3839 ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
3840 Op2->getEndLoc(), Context);
3845 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3847 case Match_MissingFeature:
3849 "instruction requires a CPU feature not currently enabled");
3850 case Match_InvalidOperand:
3851 return Error(Loc, "invalid operand for instruction");
3852 case Match_InvalidSuffix:
3853 return Error(Loc, "invalid type suffix for instruction");
3854 case Match_InvalidMemoryIndexedSImm9:
3855 return Error(Loc, "index must be an integer in range [-256, 255].");
3856 case Match_InvalidMemoryIndexed32SImm7:
3857 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3858 case Match_InvalidMemoryIndexed64SImm7:
3859 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3860 case Match_InvalidMemoryIndexed128SImm7:
3861 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3862 case Match_InvalidMemoryIndexed8:
3863 return Error(Loc, "index must be an integer in range [0, 4095].");
3864 case Match_InvalidMemoryIndexed16:
3865 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3866 case Match_InvalidMemoryIndexed32:
3867 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3868 case Match_InvalidMemoryIndexed64:
3869 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3870 case Match_InvalidMemoryIndexed128:
3871 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3872 case Match_InvalidImm0_7:
3873 return Error(Loc, "immediate must be an integer in range [0, 7].");
3874 case Match_InvalidImm0_15:
3875 return Error(Loc, "immediate must be an integer in range [0, 15].");
3876 case Match_InvalidImm0_31:
3877 return Error(Loc, "immediate must be an integer in range [0, 31].");
3878 case Match_InvalidImm0_63:
3879 return Error(Loc, "immediate must be an integer in range [0, 63].");
3880 case Match_InvalidImm1_8:
3881 return Error(Loc, "immediate must be an integer in range [1, 8].");
3882 case Match_InvalidImm1_16:
3883 return Error(Loc, "immediate must be an integer in range [1, 16].");
3884 case Match_InvalidImm1_32:
3885 return Error(Loc, "immediate must be an integer in range [1, 32].");
3886 case Match_InvalidImm1_64:
3887 return Error(Loc, "immediate must be an integer in range [1, 64].");
3888 case Match_InvalidIndexB:
3889 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3890 case Match_InvalidIndexH:
3891 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3892 case Match_InvalidIndexS:
3893 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3894 case Match_InvalidIndexD:
3895 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3896 case Match_InvalidLabel:
3897 return Error(Loc, "expected label or encodable integer pc offset");
3899 return Error(Loc, "expected readable system register");
3901 return Error(Loc, "expected writable system register or pstate");
3902 case Match_MnemonicFail:
3903 return Error(Loc, "unrecognized instruction mnemonic");
3905 assert(0 && "unexpected error code!");
3906 return Error(Loc, "invalid instruction format");
3910 static const char *getSubtargetFeatureName(unsigned Val);
3912 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3913 OperandVector &Operands,
3915 unsigned &ErrorInfo,
3916 bool MatchingInlineAsm) {
3917 assert(!Operands.empty() && "Unexpect empty operand list!");
3918 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3919 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3921 StringRef Tok = Op->getToken();
3922 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
3923 // This needs to be done before the special handling of ADD/SUB immediates.
3924 if (Tok == "cmp" || Tok == "cmn") {
3925 // Replace the opcode with either ADDS or SUBS.
3926 const char *Repl = StringSwitch<const char *>(Tok)
3927 .Case("cmp", "subs")
3928 .Case("cmn", "adds")
3930 assert(Repl && "Unknown compare instruction");
3932 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
3934 // Insert WZR or XZR as destination operand.
3935 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3937 if (RegOp->isReg() &&
3938 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3940 ZeroReg = ARM64::WZR;
3942 ZeroReg = ARM64::XZR;
3944 Operands.begin() + 1,
3945 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
3946 // Update since we modified it above.
3947 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3948 Tok = Op->getToken();
3951 unsigned NumOperands = Operands.size();
3953 if (Tok == "mov" && NumOperands == 3) {
3954 // The MOV mnemomic is aliased to movn/movz, depending on the value of
3955 // the immediate being instantiated.
3956 // FIXME: Catching this here is a total hack, and we should use tblgen
3957 // support to implement this instead as soon as it is available.
3959 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3960 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3962 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
3963 uint64_t Val = CE->getValue();
3964 uint64_t NVal = ~Val;
3966 // If this is a 32-bit register and the value has none of the upper
3967 // set, clear the complemented upper 32-bits so the logic below works
3968 // for 32-bit registers too.
3969 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3971 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3973 (Val & 0xFFFFFFFFULL) == Val)
3974 NVal &= 0x00000000FFFFFFFFULL;
3976 // MOVK Rd, imm << 0
3977 if ((Val & 0xFFFF) == Val)
3978 rewriteMOVI(Operands, "movz", Val, 0, getContext());
3980 // MOVK Rd, imm << 16
3981 else if ((Val & 0xFFFF0000ULL) == Val)
3982 rewriteMOVI(Operands, "movz", Val, 16, getContext());
3984 // MOVK Rd, imm << 32
3985 else if ((Val & 0xFFFF00000000ULL) == Val)
3986 rewriteMOVI(Operands, "movz", Val, 32, getContext());
3988 // MOVK Rd, imm << 48
3989 else if ((Val & 0xFFFF000000000000ULL) == Val)
3990 rewriteMOVI(Operands, "movz", Val, 48, getContext());
3992 // MOVN Rd, (~imm << 0)
3993 else if ((NVal & 0xFFFFULL) == NVal)
3994 rewriteMOVI(Operands, "movn", NVal, 0, getContext());
3996 // MOVN Rd, ~(imm << 16)
3997 else if ((NVal & 0xFFFF0000ULL) == NVal)
3998 rewriteMOVI(Operands, "movn", NVal, 16, getContext());
4000 // MOVN Rd, ~(imm << 32)
4001 else if ((NVal & 0xFFFF00000000ULL) == NVal)
4002 rewriteMOVI(Operands, "movn", NVal, 32, getContext());
4004 // MOVN Rd, ~(imm << 48)
4005 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
4006 rewriteMOVI(Operands, "movn", NVal, 48, getContext());
4008 } else if (Op1->isReg() && Op2->isReg()) {
4010 unsigned Reg1 = Op1->getReg();
4011 unsigned Reg2 = Op2->getReg();
4012 if ((Reg1 == ARM64::SP &&
4013 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg2)) ||
4014 (Reg2 == ARM64::SP &&
4015 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg1)) ||
4016 (Reg1 == ARM64::WSP &&
4017 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) ||
4018 (Reg2 == ARM64::WSP &&
4019 ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg1)))
4020 rewriteMOVRSP(Operands, getContext());
4022 rewriteMOVR(Operands, getContext());
4024 } else if (NumOperands == 4) {
4025 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
4026 // Handle the uimm24 immediate form, where the shift is not specified.
4027 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4029 if (const MCConstantExpr *CE =
4030 dyn_cast<MCConstantExpr>(Op3->getImm())) {
4031 uint64_t Val = CE->getValue();
4032 if (Val >= (1 << 24)) {
4033 Error(IDLoc, "immediate value is too large");
4036 if (Val < (1 << 12)) {
4037 Operands.push_back(ARM64Operand::CreateShifter(
4038 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4039 } else if ((Val & 0xfff) == 0) {
4041 CE = MCConstantExpr::Create(Val >> 12, getContext());
4043 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
4044 Operands.push_back(ARM64Operand::CreateShifter(
4045 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
4047 Error(IDLoc, "immediate value is too large");
4051 Operands.push_back(ARM64Operand::CreateShifter(
4052 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4056 // FIXME: Horible hack to handle the LSL -> UBFM alias.
4057 } else if (NumOperands == 4 && Tok == "lsl") {
4058 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4059 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4060 if (Op2->isReg() && Op3->isImm()) {
4061 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4063 uint64_t Op3Val = Op3CE->getValue();
4064 uint64_t NewOp3Val = 0;
4065 uint64_t NewOp4Val = 0;
4066 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
4068 NewOp3Val = (32 - Op3Val) & 0x1f;
4069 NewOp4Val = 31 - Op3Val;
4071 NewOp3Val = (64 - Op3Val) & 0x3f;
4072 NewOp4Val = 63 - Op3Val;
4075 const MCExpr *NewOp3 =
4076 MCConstantExpr::Create(NewOp3Val, getContext());
4077 const MCExpr *NewOp4 =
4078 MCConstantExpr::Create(NewOp4Val, getContext());
4080 Operands[0] = ARM64Operand::CreateToken(
4081 "ubfm", false, Op->getStartLoc(), getContext());
4082 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4083 Op3->getEndLoc(), getContext());
4084 Operands.push_back(ARM64Operand::CreateImm(
4085 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
4091 // FIXME: Horrible hack to handle the optional LSL shift for vector
4093 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
4094 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4095 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4096 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4097 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4098 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
4099 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
4100 IDLoc, getContext()));
4101 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
4102 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4103 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
4104 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4105 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
4106 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
4107 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
4108 // Canonicalize on lower-case for ease of comparison.
4109 std::string CanonicalSuffix = Suffix.lower();
4110 if (Tok != "movi" ||
4111 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
4112 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
4113 Operands.push_back(ARM64Operand::CreateShifter(
4114 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
4117 } else if (NumOperands == 5) {
4118 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
4119 // UBFIZ -> UBFM aliases.
4120 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
4121 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4122 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4123 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4125 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4126 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4127 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4129 if (Op3CE && Op4CE) {
4130 uint64_t Op3Val = Op3CE->getValue();
4131 uint64_t Op4Val = Op4CE->getValue();
4133 uint64_t NewOp3Val = 0;
4134 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
4136 NewOp3Val = (32 - Op3Val) & 0x1f;
4138 NewOp3Val = (64 - Op3Val) & 0x3f;
4140 uint64_t NewOp4Val = Op4Val - 1;
4142 const MCExpr *NewOp3 =
4143 MCConstantExpr::Create(NewOp3Val, getContext());
4144 const MCExpr *NewOp4 =
4145 MCConstantExpr::Create(NewOp4Val, getContext());
4146 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
4147 Op3->getEndLoc(), getContext());
4148 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
4149 Op4->getEndLoc(), getContext());
4151 Operands[0] = ARM64Operand::CreateToken(
4152 "bfm", false, Op->getStartLoc(), getContext());
4153 else if (Tok == "sbfiz")
4154 Operands[0] = ARM64Operand::CreateToken(
4155 "sbfm", false, Op->getStartLoc(), getContext());
4156 else if (Tok == "ubfiz")
4157 Operands[0] = ARM64Operand::CreateToken(
4158 "ubfm", false, Op->getStartLoc(), getContext());
4160 llvm_unreachable("No valid mnemonic for alias?");
4168 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4169 // UBFX -> UBFM aliases.
4170 } else if (NumOperands == 5 &&
4171 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4172 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4173 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4174 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4176 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4177 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4178 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4180 if (Op3CE && Op4CE) {
4181 uint64_t Op3Val = Op3CE->getValue();
4182 uint64_t Op4Val = Op4CE->getValue();
4183 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4185 if (NewOp4Val >= Op3Val) {
4186 const MCExpr *NewOp4 =
4187 MCConstantExpr::Create(NewOp4Val, getContext());
4188 Operands[4] = ARM64Operand::CreateImm(
4189 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4191 Operands[0] = ARM64Operand::CreateToken(
4192 "bfm", false, Op->getStartLoc(), getContext());
4193 else if (Tok == "sbfx")
4194 Operands[0] = ARM64Operand::CreateToken(
4195 "sbfm", false, Op->getStartLoc(), getContext());
4196 else if (Tok == "ubfx")
4197 Operands[0] = ARM64Operand::CreateToken(
4198 "ubfm", false, Op->getStartLoc(), getContext());
4200 llvm_unreachable("No valid mnemonic for alias?");
4209 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4210 // InstAlias can't quite handle this since the reg classes aren't
4212 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4213 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4215 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4216 if (OpCE->getValue() < 32) {
4217 // The source register can be Wn here, but the matcher expects a
4218 // GPR64. Twiddle it here if necessary.
4219 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4221 unsigned Reg = getXRegFromWReg(Op->getReg());
4222 Operands[1] = ARM64Operand::CreateReg(
4223 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4230 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4231 // InstAlias can't quite handle this since the reg classes aren't
4233 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4234 // The source register can be Wn here, but the matcher expects a
4235 // GPR64. Twiddle it here if necessary.
4236 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4238 unsigned Reg = getXRegFromWReg(Op->getReg());
4239 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4240 Op->getEndLoc(), getContext());
4244 // FIXME: Likewise for sxt[bh] with a Xd dst operand
4245 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
4246 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4248 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
4250 // The source register can be Wn here, but the matcher expects a
4251 // GPR64. Twiddle it here if necessary.
4252 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4254 unsigned Reg = getXRegFromWReg(Op->getReg());
4255 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4256 Op->getEndLoc(), getContext());
4261 // FIXME: Likewise for uxt[bh] with a Xd dst operand
4262 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
4263 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4265 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
4267 // The source register can be Wn here, but the matcher expects a
4268 // GPR32. Twiddle it here if necessary.
4269 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4271 unsigned Reg = getWRegFromXReg(Op->getReg());
4272 Operands[1] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4273 Op->getEndLoc(), getContext());
4279 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4280 if (NumOperands == 3 && Tok == "fmov") {
4281 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4282 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4283 if (RegOp->isReg() && ImmOp->isFPImm() &&
4284 ImmOp->getFPImm() == (unsigned)-1) {
4285 unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
4289 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4290 Op->getEndLoc(), getContext());
4295 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4296 // FMOV instructions. The index isn't an actual instruction operand
4297 // but rather syntactic sugar. It really should be part of the mnemonic,
4298 // not the operand, but whatever.
4299 if ((NumOperands == 5) && Tok == "fmov") {
4300 // If the last operand is a vectorindex of '1', then replace it with
4301 // a '[' '1' ']' token sequence, which is what the matcher
4302 // (annoyingly) expects for a literal vector index operand.
4303 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4304 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4305 SMLoc Loc = Op->getStartLoc();
4306 Operands.pop_back();
4309 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4311 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4313 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4314 } else if (Op->isReg()) {
4315 // Similarly, check the destination operand for the GPR->High-lane
4317 unsigned OpNo = NumOperands - 2;
4318 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4319 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4320 SMLoc Loc = Op->getStartLoc();
4322 ARM64Operand::CreateToken("[", false, Loc, getContext());
4324 Operands.begin() + OpNo + 1,
4325 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4327 Operands.begin() + OpNo + 2,
4328 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4335 // First try to match against the secondary set of tables containing the
4336 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4337 unsigned MatchResult =
4338 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4340 // If that fails, try against the alternate table containing long-form NEON:
4341 // "fadd v0.2s, v1.2s, v2.2s"
4342 if (MatchResult != Match_Success)
4344 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4346 switch (MatchResult) {
4347 case Match_Success: {
4348 // Perform range checking and other semantic validations
4349 SmallVector<SMLoc, 8> OperandLocs;
4350 NumOperands = Operands.size();
4351 for (unsigned i = 1; i < NumOperands; ++i)
4352 OperandLocs.push_back(Operands[i]->getStartLoc());
4353 if (validateInstruction(Inst, OperandLocs))
4357 Out.EmitInstruction(Inst, STI);
4360 case Match_MissingFeature: {
4361 assert(ErrorInfo && "Unknown missing feature!");
4362 // Special case the error message for the very common case where only
4363 // a single subtarget feature is missing (neon, e.g.).
4364 std::string Msg = "instruction requires:";
4366 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
4367 if (ErrorInfo & Mask) {
4369 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
4373 return Error(IDLoc, Msg);
4375 case Match_MnemonicFail:
4376 return showMatchError(IDLoc, MatchResult);
4377 case Match_InvalidOperand: {
4378 SMLoc ErrorLoc = IDLoc;
4379 if (ErrorInfo != ~0U) {
4380 if (ErrorInfo >= Operands.size())
4381 return Error(IDLoc, "too few operands for instruction");
4383 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4384 if (ErrorLoc == SMLoc())
4387 // If the match failed on a suffix token operand, tweak the diagnostic
4389 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4390 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4391 MatchResult = Match_InvalidSuffix;
4393 return showMatchError(ErrorLoc, MatchResult);
4395 case Match_InvalidMemoryIndexedSImm9: {
4396 // If there is not a '!' after the memory operand that failed, we really
4397 // want the diagnostic for the non-pre-indexed instruction variant instead.
4398 // Be careful to check for the post-indexed variant as well, which also
4399 // uses this match diagnostic. Also exclude the explicitly unscaled
4400 // mnemonics, as they want the unscaled diagnostic as well.
4401 if (Operands.size() == ErrorInfo + 1 &&
4402 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4403 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4404 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4405 // the register class of the previous operand. Default to 64 in case
4406 // we see something unexpected.
4407 MatchResult = Match_InvalidMemoryIndexed64;
4409 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4410 if (PrevOp->isReg() &&
4411 ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
4413 MatchResult = Match_InvalidMemoryIndexed32;
4416 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4417 if (ErrorLoc == SMLoc())
4419 return showMatchError(ErrorLoc, MatchResult);
4421 case Match_InvalidMemoryIndexed32:
4422 case Match_InvalidMemoryIndexed64:
4423 case Match_InvalidMemoryIndexed128:
4424 // If there is a '!' after the memory operand that failed, we really
4425 // want the diagnostic for the pre-indexed instruction variant instead.
4426 if (Operands.size() > ErrorInfo + 1 &&
4427 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4428 MatchResult = Match_InvalidMemoryIndexedSImm9;
4430 case Match_InvalidMemoryIndexed8:
4431 case Match_InvalidMemoryIndexed16:
4432 case Match_InvalidMemoryIndexed32SImm7:
4433 case Match_InvalidMemoryIndexed64SImm7:
4434 case Match_InvalidMemoryIndexed128SImm7:
4435 case Match_InvalidImm0_7:
4436 case Match_InvalidImm0_15:
4437 case Match_InvalidImm0_31:
4438 case Match_InvalidImm0_63:
4439 case Match_InvalidImm1_8:
4440 case Match_InvalidImm1_16:
4441 case Match_InvalidImm1_32:
4442 case Match_InvalidImm1_64:
4443 case Match_InvalidIndexB:
4444 case Match_InvalidIndexH:
4445 case Match_InvalidIndexS:
4446 case Match_InvalidIndexD:
4447 case Match_InvalidLabel:
4450 // Any time we get here, there's nothing fancy to do. Just get the
4451 // operand SMLoc and display the diagnostic.
4452 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4453 // If it's a memory operand, the error is with the offset immediate,
4454 // so get that location instead.
4455 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4456 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4457 if (ErrorLoc == SMLoc())
4459 return showMatchError(ErrorLoc, MatchResult);
4463 llvm_unreachable("Implement any new match types added!");
4467 /// ParseDirective parses the arm specific directives
4468 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4469 StringRef IDVal = DirectiveID.getIdentifier();
4470 SMLoc Loc = DirectiveID.getLoc();
4471 if (IDVal == ".hword")
4472 return parseDirectiveWord(2, Loc);
4473 if (IDVal == ".word")
4474 return parseDirectiveWord(4, Loc);
4475 if (IDVal == ".xword")
4476 return parseDirectiveWord(8, Loc);
4477 if (IDVal == ".tlsdesccall")
4478 return parseDirectiveTLSDescCall(Loc);
4480 return parseDirectiveLOH(IDVal, Loc);
4483 /// parseDirectiveWord
4484 /// ::= .word [ expression (, expression)* ]
4485 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4486 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4488 const MCExpr *Value;
4489 if (getParser().parseExpression(Value))
4492 getParser().getStreamer().EmitValue(Value, Size);
4494 if (getLexer().is(AsmToken::EndOfStatement))
4497 // FIXME: Improve diagnostic.
4498 if (getLexer().isNot(AsmToken::Comma))
4499 return Error(L, "unexpected token in directive");
4508 // parseDirectiveTLSDescCall:
4509 // ::= .tlsdesccall symbol
4510 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4512 if (getParser().parseIdentifier(Name))
4513 return Error(L, "expected symbol after directive");
4515 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4516 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4517 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4520 Inst.setOpcode(ARM64::TLSDESCCALL);
4521 Inst.addOperand(MCOperand::CreateExpr(Expr));
4523 getParser().getStreamer().EmitInstruction(Inst, STI);
4527 /// ::= .loh <lohName | lohId> label1, ..., labelN
4528 /// The number of arguments depends on the loh identifier.
4529 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4530 if (IDVal != MCLOHDirectiveName())
4533 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4534 if (getParser().getTok().isNot(AsmToken::Integer))
4535 return TokError("expected an identifier or a number in directive");
4536 // We successfully get a numeric value for the identifier.
4537 // Check if it is valid.
4538 int64_t Id = getParser().getTok().getIntVal();
4539 Kind = (MCLOHType)Id;
4540 // Check that Id does not overflow MCLOHType.
4541 if (!isValidMCLOHType(Kind) || Id != Kind)
4542 return TokError("invalid numeric identifier in directive");
4544 StringRef Name = getTok().getIdentifier();
4545 // We successfully parse an identifier.
4546 // Check if it is a recognized one.
4547 int Id = MCLOHNameToId(Name);
4550 return TokError("invalid identifier in directive");
4551 Kind = (MCLOHType)Id;
4553 // Consume the identifier.
4555 // Get the number of arguments of this LOH.
4556 int NbArgs = MCLOHIdToNbArgs(Kind);
4558 assert(NbArgs != -1 && "Invalid number of arguments");
4560 SmallVector<MCSymbol *, 3> Args;
4561 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4563 if (getParser().parseIdentifier(Name))
4564 return TokError("expected identifier in directive");
4565 Args.push_back(getContext().GetOrCreateSymbol(Name));
4567 if (Idx + 1 == NbArgs)
4569 if (getLexer().isNot(AsmToken::Comma))
4570 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4573 if (getLexer().isNot(AsmToken::EndOfStatement))
4574 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4576 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4581 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4582 ARM64MCExpr::VariantKind &ELFRefKind,
4583 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4585 ELFRefKind = ARM64MCExpr::VK_INVALID;
4586 DarwinRefKind = MCSymbolRefExpr::VK_None;
4589 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4590 ELFRefKind = AE->getKind();
4591 Expr = AE->getSubExpr();
4594 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4596 // It's a simple symbol reference with no addend.
4597 DarwinRefKind = SE->getKind();
4601 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4605 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4608 DarwinRefKind = SE->getKind();
4610 if (BE->getOpcode() != MCBinaryExpr::Add &&
4611 BE->getOpcode() != MCBinaryExpr::Sub)
4614 // See if the addend is is a constant, otherwise there's more going
4615 // on here than we can deal with.
4616 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
4620 Addend = AddendExpr->getValue();
4621 if (BE->getOpcode() == MCBinaryExpr::Sub)
4624 // It's some symbol reference + a constant addend, but really
4625 // shouldn't use both Darwin and ELF syntax.
4626 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4627 DarwinRefKind == MCSymbolRefExpr::VK_None;
4630 /// Force static initialization.
4631 extern "C" void LLVMInitializeARM64AsmParser() {
4632 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
4633 RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
4636 #define GET_REGISTER_MATCHER
4637 #define GET_SUBTARGET_FEATURE_NAME
4638 #define GET_MATCHER_IMPLEMENTATION
4639 #include "ARM64GenAsmMatcher.inc"
4641 // Define this matcher function after the auto-generated include so we
4642 // have the match class enum definitions.
4643 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4645 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4646 // If the kind is a token for a literal immediate, check if our asm
4647 // operand matches. This is for InstAliases which have a fixed-value
4648 // immediate in the syntax.
4649 int64_t ExpectedVal;
4652 return Match_InvalidOperand;
4694 return Match_InvalidOperand;
4695 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4697 return Match_InvalidOperand;
4698 if (CE->getValue() == ExpectedVal)
4699 return Match_Success;
4700 return Match_InvalidOperand;