1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 ARM64CC::CondCode parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseRegister(OperandVector &Operands);
60 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
61 bool parseVectorList(OperandVector &Operands);
62 bool parseOperand(OperandVector &Operands, bool isCondCode,
65 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
66 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
67 bool showMatchError(SMLoc Loc, unsigned ErrCode);
69 bool parseDirectiveWord(unsigned Size, SMLoc L);
70 bool parseDirectiveTLSDescCall(SMLoc L);
72 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
74 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
75 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
76 OperandVector &Operands, MCStreamer &Out,
78 bool MatchingInlineAsm) override;
79 /// @name Auto-generated Match Functions
82 #define GET_ASSEMBLER_HEADER
83 #include "ARM64GenAsmMatcher.inc"
87 OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
88 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
89 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
90 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
91 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
92 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
93 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
94 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
95 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
96 OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
97 OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum ARM64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "ARM64GenAsmMatcher.inc"
106 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII,
108 const MCTargetOptions &Options)
109 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
110 MCAsmParserExtension::Initialize(_Parser);
112 // Initialize the set of available features.
113 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
116 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
117 SMLoc NameLoc, OperandVector &Operands) override;
118 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
119 bool ParseDirective(AsmToken DirectiveID) override;
120 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op,
121 unsigned Kind) override;
123 static bool classifySymbolRef(const MCExpr *Expr,
124 ARM64MCExpr::VariantKind &ELFRefKind,
125 MCSymbolRefExpr::VariantKind &DarwinRefKind,
128 } // end anonymous namespace
132 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
134 class ARM64Operand : public MCParsedAsmOperand {
152 SMLoc StartLoc, EndLoc;
157 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
165 struct VectorListOp {
168 unsigned NumElements;
169 unsigned ElementKind;
172 struct VectorIndexOp {
180 struct ShiftedImmOp {
182 unsigned ShiftAmount;
186 ARM64CC::CondCode Code;
190 unsigned Val; // Encoded 8-bit representation.
194 unsigned Val; // Not the enum since not all values have names.
200 uint64_t FeatureBits; // We need to pass through information about which
201 // core we are compiling for so that the SysReg
202 // Mappers can appropriately conditionalize.
213 struct ShiftExtendOp {
214 ARM64_AM::ShiftExtendType Type;
216 bool HasExplicitAmount;
226 struct VectorListOp VectorList;
227 struct VectorIndexOp VectorIndex;
229 struct ShiftedImmOp ShiftedImm;
230 struct CondCodeOp CondCode;
231 struct FPImmOp FPImm;
232 struct BarrierOp Barrier;
233 struct SysRegOp SysReg;
234 struct SysCRImmOp SysCRImm;
235 struct PrefetchOp Prefetch;
236 struct ShiftExtendOp ShiftExtend;
239 // Keep the MCContext around as the MCExprs may need manipulated during
240 // the add<>Operands() calls.
243 ARM64Operand(KindTy K, MCContext &_Ctx)
244 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
247 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
249 StartLoc = o.StartLoc;
259 ShiftedImm = o.ShiftedImm;
262 CondCode = o.CondCode;
274 VectorList = o.VectorList;
277 VectorIndex = o.VectorIndex;
283 SysCRImm = o.SysCRImm;
286 Prefetch = o.Prefetch;
289 ShiftExtend = o.ShiftExtend;
294 /// getStartLoc - Get the location of the first token of this operand.
295 SMLoc getStartLoc() const override { return StartLoc; }
296 /// getEndLoc - Get the location of the last token of this operand.
297 SMLoc getEndLoc() const override { return EndLoc; }
299 StringRef getToken() const {
300 assert(Kind == k_Token && "Invalid access!");
301 return StringRef(Tok.Data, Tok.Length);
304 bool isTokenSuffix() const {
305 assert(Kind == k_Token && "Invalid access!");
309 const MCExpr *getImm() const {
310 assert(Kind == k_Immediate && "Invalid access!");
314 const MCExpr *getShiftedImmVal() const {
315 assert(Kind == k_ShiftedImm && "Invalid access!");
316 return ShiftedImm.Val;
319 unsigned getShiftedImmShift() const {
320 assert(Kind == k_ShiftedImm && "Invalid access!");
321 return ShiftedImm.ShiftAmount;
324 ARM64CC::CondCode getCondCode() const {
325 assert(Kind == k_CondCode && "Invalid access!");
326 return CondCode.Code;
329 unsigned getFPImm() const {
330 assert(Kind == k_FPImm && "Invalid access!");
334 unsigned getBarrier() const {
335 assert(Kind == k_Barrier && "Invalid access!");
339 unsigned getReg() const override {
340 assert(Kind == k_Register && "Invalid access!");
344 unsigned getVectorListStart() const {
345 assert(Kind == k_VectorList && "Invalid access!");
346 return VectorList.RegNum;
349 unsigned getVectorListCount() const {
350 assert(Kind == k_VectorList && "Invalid access!");
351 return VectorList.Count;
354 unsigned getVectorIndex() const {
355 assert(Kind == k_VectorIndex && "Invalid access!");
356 return VectorIndex.Val;
359 StringRef getSysReg() const {
360 assert(Kind == k_SysReg && "Invalid access!");
361 return StringRef(SysReg.Data, SysReg.Length);
364 uint64_t getSysRegFeatureBits() const {
365 assert(Kind == k_SysReg && "Invalid access!");
366 return SysReg.FeatureBits;
369 unsigned getSysCR() const {
370 assert(Kind == k_SysCR && "Invalid access!");
374 unsigned getPrefetch() const {
375 assert(Kind == k_Prefetch && "Invalid access!");
379 ARM64_AM::ShiftExtendType getShiftExtendType() const {
380 assert(Kind == k_ShiftExtend && "Invalid access!");
381 return ShiftExtend.Type;
384 unsigned getShiftExtendAmount() const {
385 assert(Kind == k_ShiftExtend && "Invalid access!");
386 return ShiftExtend.Amount;
389 bool hasShiftExtendAmount() const {
390 assert(Kind == k_ShiftExtend && "Invalid access!");
391 return ShiftExtend.HasExplicitAmount;
394 bool isImm() const override { return Kind == k_Immediate; }
395 bool isMem() const override { return false; }
396 bool isSImm9() const {
399 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
402 int64_t Val = MCE->getValue();
403 return (Val >= -256 && Val < 256);
405 bool isSImm7s4() const {
408 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
411 int64_t Val = MCE->getValue();
412 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
414 bool isSImm7s8() const {
417 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
420 int64_t Val = MCE->getValue();
421 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
423 bool isSImm7s16() const {
426 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
429 int64_t Val = MCE->getValue();
430 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
433 bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
434 ARM64MCExpr::VariantKind ELFRefKind;
435 MCSymbolRefExpr::VariantKind DarwinRefKind;
437 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
439 // If we don't understand the expression, assume the best and
440 // let the fixup and relocation code deal with it.
444 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
445 ELFRefKind == ARM64MCExpr::VK_LO12 ||
446 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
447 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
448 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
449 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
450 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
451 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
452 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
453 // Note that we don't range-check the addend. It's adjusted modulo page
454 // size when converted, so there is no "out of range" condition when using
456 return Addend >= 0 && (Addend % Scale) == 0;
457 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
458 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
459 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
466 template <int Scale> bool isUImm12Offset() const {
470 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
472 return isSymbolicUImm12Offset(getImm(), Scale);
474 int64_t Val = MCE->getValue();
475 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
478 bool isImm0_7() const {
481 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
484 int64_t Val = MCE->getValue();
485 return (Val >= 0 && Val < 8);
487 bool isImm1_8() const {
490 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
493 int64_t Val = MCE->getValue();
494 return (Val > 0 && Val < 9);
496 bool isImm0_15() const {
499 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
502 int64_t Val = MCE->getValue();
503 return (Val >= 0 && Val < 16);
505 bool isImm1_16() const {
508 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
511 int64_t Val = MCE->getValue();
512 return (Val > 0 && Val < 17);
514 bool isImm0_31() const {
517 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
520 int64_t Val = MCE->getValue();
521 return (Val >= 0 && Val < 32);
523 bool isImm1_31() const {
526 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
529 int64_t Val = MCE->getValue();
530 return (Val >= 1 && Val < 32);
532 bool isImm1_32() const {
535 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
538 int64_t Val = MCE->getValue();
539 return (Val >= 1 && Val < 33);
541 bool isImm0_63() const {
544 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
547 int64_t Val = MCE->getValue();
548 return (Val >= 0 && Val < 64);
550 bool isImm1_63() const {
553 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
556 int64_t Val = MCE->getValue();
557 return (Val >= 1 && Val < 64);
559 bool isImm1_64() const {
562 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
565 int64_t Val = MCE->getValue();
566 return (Val >= 1 && Val < 65);
568 bool isImm0_127() const {
571 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 int64_t Val = MCE->getValue();
575 return (Val >= 0 && Val < 128);
577 bool isImm0_255() const {
580 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
583 int64_t Val = MCE->getValue();
584 return (Val >= 0 && Val < 256);
586 bool isImm0_65535() const {
589 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
592 int64_t Val = MCE->getValue();
593 return (Val >= 0 && Val < 65536);
595 bool isImm32_63() const {
598 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
601 int64_t Val = MCE->getValue();
602 return (Val >= 32 && Val < 64);
604 bool isLogicalImm32() const {
607 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
610 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
612 bool isLogicalImm64() const {
615 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
618 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
620 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
621 bool isAddSubImm() const {
622 if (!isShiftedImm() && !isImm())
627 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
628 if (isShiftedImm()) {
629 unsigned Shift = ShiftedImm.ShiftAmount;
630 Expr = ShiftedImm.Val;
631 if (Shift != 0 && Shift != 12)
637 ARM64MCExpr::VariantKind ELFRefKind;
638 MCSymbolRefExpr::VariantKind DarwinRefKind;
640 if (ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind,
641 DarwinRefKind, Addend)) {
642 return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
643 || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
644 || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
645 || ELFRefKind == ARM64MCExpr::VK_LO12
646 || ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12
647 || ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12
648 || ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC
649 || ELFRefKind == ARM64MCExpr::VK_TPREL_HI12
650 || ELFRefKind == ARM64MCExpr::VK_TPREL_LO12
651 || ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC
652 || ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12;
655 // Otherwise it should be a real immediate in range:
656 const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
657 return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
659 bool isCondCode() const { return Kind == k_CondCode; }
660 bool isSIMDImmType10() const {
663 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
666 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
668 bool isBranchTarget26() const {
671 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
674 int64_t Val = MCE->getValue();
677 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
679 bool isPCRelLabel19() const {
682 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
685 int64_t Val = MCE->getValue();
688 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
690 bool isBranchTarget14() const {
693 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
696 int64_t Val = MCE->getValue();
699 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
702 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
706 ARM64MCExpr::VariantKind ELFRefKind;
707 MCSymbolRefExpr::VariantKind DarwinRefKind;
709 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
713 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
716 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
717 if (ELFRefKind == AllowedModifiers[i])
724 bool isMovZSymbolG3() const {
725 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
726 return isMovWSymbol(Variants);
729 bool isMovZSymbolG2() const {
730 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
731 ARM64MCExpr::VK_ABS_G2_S,
732 ARM64MCExpr::VK_TPREL_G2,
733 ARM64MCExpr::VK_DTPREL_G2 };
734 return isMovWSymbol(Variants);
737 bool isMovZSymbolG1() const {
738 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
739 ARM64MCExpr::VK_ABS_G1_S,
740 ARM64MCExpr::VK_GOTTPREL_G1,
741 ARM64MCExpr::VK_TPREL_G1,
742 ARM64MCExpr::VK_DTPREL_G1, };
743 return isMovWSymbol(Variants);
746 bool isMovZSymbolG0() const {
747 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
748 ARM64MCExpr::VK_ABS_G0_S,
749 ARM64MCExpr::VK_TPREL_G0,
750 ARM64MCExpr::VK_DTPREL_G0 };
751 return isMovWSymbol(Variants);
754 bool isMovKSymbolG3() const {
755 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
756 return isMovWSymbol(Variants);
759 bool isMovKSymbolG2() const {
760 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
761 return isMovWSymbol(Variants);
764 bool isMovKSymbolG1() const {
765 static ARM64MCExpr::VariantKind Variants[] = {
766 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
767 ARM64MCExpr::VK_DTPREL_G1_NC
769 return isMovWSymbol(Variants);
772 bool isMovKSymbolG0() const {
773 static ARM64MCExpr::VariantKind Variants[] = {
774 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
775 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
777 return isMovWSymbol(Variants);
780 template<int RegWidth, int Shift>
781 bool isMOVZMovAlias() const {
782 if (!isImm()) return false;
784 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785 if (!CE) return false;
786 uint64_t Value = CE->getValue();
789 Value &= 0xffffffffULL;
791 // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
792 if (Value == 0 && Shift != 0)
795 return (Value & ~(0xffffULL << Shift)) == 0;
798 template<int RegWidth, int Shift>
799 bool isMOVNMovAlias() const {
800 if (!isImm()) return false;
802 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
803 if (!CE) return false;
804 uint64_t Value = CE->getValue();
806 // MOVZ takes precedence over MOVN.
807 for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
808 if ((Value & ~(0xffffULL << MOVZShift)) == 0)
813 Value &= 0xffffffffULL;
815 return (Value & ~(0xffffULL << Shift)) == 0;
818 bool isFPImm() const { return Kind == k_FPImm; }
819 bool isBarrier() const { return Kind == k_Barrier; }
820 bool isSysReg() const { return Kind == k_SysReg; }
821 bool isMRSSystemRegister() const {
822 if (!isSysReg()) return false;
824 bool IsKnownRegister;
825 auto Mapper = ARM64SysReg::MRSMapper(getSysRegFeatureBits());
826 Mapper.fromString(getSysReg(), IsKnownRegister);
828 return IsKnownRegister;
830 bool isMSRSystemRegister() const {
831 if (!isSysReg()) return false;
833 bool IsKnownRegister;
834 auto Mapper = ARM64SysReg::MSRMapper(getSysRegFeatureBits());
835 Mapper.fromString(getSysReg(), IsKnownRegister);
837 return IsKnownRegister;
839 bool isSystemPStateField() const {
840 if (!isSysReg()) return false;
842 bool IsKnownRegister;
843 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
845 return IsKnownRegister;
847 bool isReg() const override { return Kind == k_Register && !Reg.isVector; }
848 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
849 bool isVectorRegLo() const {
850 return Kind == k_Register && Reg.isVector &&
851 ARM64MCRegisterClasses[ARM64::FPR128_loRegClassID].contains(Reg.RegNum);
853 bool isGPR32as64() const {
854 return Kind == k_Register && !Reg.isVector &&
855 ARM64MCRegisterClasses[ARM64::GPR64RegClassID].contains(Reg.RegNum);
858 bool isGPR64sp0() const {
859 return Kind == k_Register && !Reg.isVector &&
860 ARM64MCRegisterClasses[ARM64::GPR64spRegClassID].contains(Reg.RegNum);
863 /// Is this a vector list with the type implicit (presumably attached to the
864 /// instruction itself)?
865 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
866 return Kind == k_VectorList && VectorList.Count == NumRegs &&
867 !VectorList.ElementKind;
870 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
871 bool isTypedVectorList() const {
872 if (Kind != k_VectorList)
874 if (VectorList.Count != NumRegs)
876 if (VectorList.ElementKind != ElementKind)
878 return VectorList.NumElements == NumElements;
881 bool isVectorIndex1() const {
882 return Kind == k_VectorIndex && VectorIndex.Val == 1;
884 bool isVectorIndexB() const {
885 return Kind == k_VectorIndex && VectorIndex.Val < 16;
887 bool isVectorIndexH() const {
888 return Kind == k_VectorIndex && VectorIndex.Val < 8;
890 bool isVectorIndexS() const {
891 return Kind == k_VectorIndex && VectorIndex.Val < 4;
893 bool isVectorIndexD() const {
894 return Kind == k_VectorIndex && VectorIndex.Val < 2;
896 bool isToken() const override { return Kind == k_Token; }
897 bool isTokenEqual(StringRef Str) const {
898 return Kind == k_Token && getToken() == Str;
900 bool isSysCR() const { return Kind == k_SysCR; }
901 bool isPrefetch() const { return Kind == k_Prefetch; }
902 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
903 bool isShifter() const {
904 if (!isShiftExtend())
907 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
908 return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR ||
909 ST == ARM64_AM::ROR || ST == ARM64_AM::MSL);
911 bool isExtend() const {
912 if (!isShiftExtend())
915 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
916 return (ET == ARM64_AM::UXTB || ET == ARM64_AM::SXTB ||
917 ET == ARM64_AM::UXTH || ET == ARM64_AM::SXTH ||
918 ET == ARM64_AM::UXTW || ET == ARM64_AM::SXTW ||
919 ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX ||
920 ET == ARM64_AM::LSL) &&
921 getShiftExtendAmount() <= 4;
924 bool isExtend64() const {
927 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
928 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
929 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
931 bool isExtendLSL64() const {
934 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
935 return (ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX || ET == ARM64_AM::LSL) &&
936 getShiftExtendAmount() <= 4;
939 template<int Width> bool isMemXExtend() const {
942 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
943 return (ET == ARM64_AM::LSL || ET == ARM64_AM::SXTX) &&
944 (getShiftExtendAmount() == Log2_32(Width / 8) ||
945 getShiftExtendAmount() == 0);
948 template<int Width> bool isMemWExtend() const {
951 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
952 return (ET == ARM64_AM::UXTW || ET == ARM64_AM::SXTW) &&
953 (getShiftExtendAmount() == Log2_32(Width / 8) ||
954 getShiftExtendAmount() == 0);
957 template <unsigned width>
958 bool isArithmeticShifter() const {
962 // An arithmetic shifter is LSL, LSR, or ASR.
963 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
964 return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR ||
965 ST == ARM64_AM::ASR) && getShiftExtendAmount() < width;
968 template <unsigned width>
969 bool isLogicalShifter() const {
973 // A logical shifter is LSL, LSR, ASR or ROR.
974 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
975 return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR ||
976 ST == ARM64_AM::ROR) &&
977 getShiftExtendAmount() < width;
980 bool isMovImm32Shifter() const {
984 // A MOVi shifter is LSL of 0, 16, 32, or 48.
985 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
986 if (ST != ARM64_AM::LSL)
988 uint64_t Val = getShiftExtendAmount();
989 return (Val == 0 || Val == 16);
992 bool isMovImm64Shifter() const {
996 // A MOVi shifter is LSL of 0 or 16.
997 ARM64_AM::ShiftExtendType ST = getShiftExtendType();
998 if (ST != ARM64_AM::LSL)
1000 uint64_t Val = getShiftExtendAmount();
1001 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1004 bool isLogicalVecShifter() const {
1008 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1009 unsigned Shift = getShiftExtendAmount();
1010 return getShiftExtendType() == ARM64_AM::LSL &&
1011 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1014 bool isLogicalVecHalfWordShifter() const {
1015 if (!isLogicalVecShifter())
1018 // A logical vector shifter is a left shift by 0 or 8.
1019 unsigned Shift = getShiftExtendAmount();
1020 return getShiftExtendType() == ARM64_AM::LSL && (Shift == 0 || Shift == 8);
1023 bool isMoveVecShifter() const {
1024 if (!isShiftExtend())
1027 // A logical vector shifter is a left shift by 8 or 16.
1028 unsigned Shift = getShiftExtendAmount();
1029 return getShiftExtendType() == ARM64_AM::MSL && (Shift == 8 || Shift == 16);
1032 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1033 // to LDUR/STUR when the offset is not legal for the former but is for
1034 // the latter. As such, in addition to checking for being a legal unscaled
1035 // address, also check that it is not a legal scaled address. This avoids
1036 // ambiguity in the matcher.
1038 bool isSImm9OffsetFB() const {
1039 return isSImm9() && !isUImm12Offset<Width / 8>();
1042 bool isAdrpLabel() const {
1043 // Validation was handled during parsing, so we just sanity check that
1044 // something didn't go haywire.
1048 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1049 int64_t Val = CE->getValue();
1050 int64_t Min = - (4096 * (1LL << (21 - 1)));
1051 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1052 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1058 bool isAdrLabel() const {
1059 // Validation was handled during parsing, so we just sanity check that
1060 // something didn't go haywire.
1064 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1065 int64_t Val = CE->getValue();
1066 int64_t Min = - (1LL << (21 - 1));
1067 int64_t Max = ((1LL << (21 - 1)) - 1);
1068 return Val >= Min && Val <= Max;
1074 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1075 // Add as immediates when possible. Null MCExpr = 0.
1077 Inst.addOperand(MCOperand::CreateImm(0));
1078 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1079 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1081 Inst.addOperand(MCOperand::CreateExpr(Expr));
1084 void addRegOperands(MCInst &Inst, unsigned N) const {
1085 assert(N == 1 && "Invalid number of operands!");
1086 Inst.addOperand(MCOperand::CreateReg(getReg()));
1089 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1090 assert(N == 1 && "Invalid number of operands!");
1091 assert(ARM64MCRegisterClasses[ARM64::GPR64RegClassID].contains(getReg()));
1093 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1094 uint32_t Reg = RI->getRegClass(ARM64::GPR32RegClassID).getRegister(
1095 RI->getEncodingValue(getReg()));
1097 Inst.addOperand(MCOperand::CreateReg(Reg));
1100 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1101 assert(N == 1 && "Invalid number of operands!");
1102 assert(ARM64MCRegisterClasses[ARM64::FPR128RegClassID].contains(getReg()));
1103 Inst.addOperand(MCOperand::CreateReg(ARM64::D0 + getReg() - ARM64::Q0));
1106 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1107 assert(N == 1 && "Invalid number of operands!");
1108 assert(ARM64MCRegisterClasses[ARM64::FPR128RegClassID].contains(getReg()));
1109 Inst.addOperand(MCOperand::CreateReg(getReg()));
1112 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1113 assert(N == 1 && "Invalid number of operands!");
1114 Inst.addOperand(MCOperand::CreateReg(getReg()));
1117 template <unsigned NumRegs>
1118 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1119 assert(N == 1 && "Invalid number of operands!");
1120 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1121 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1122 unsigned FirstReg = FirstRegs[NumRegs - 1];
1125 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1128 template <unsigned NumRegs>
1129 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1130 assert(N == 1 && "Invalid number of operands!");
1131 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1132 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1133 unsigned FirstReg = FirstRegs[NumRegs - 1];
1136 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1139 void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
1140 assert(N == 1 && "Invalid number of operands!");
1141 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1144 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1145 assert(N == 1 && "Invalid number of operands!");
1146 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1149 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1150 assert(N == 1 && "Invalid number of operands!");
1151 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1154 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1155 assert(N == 1 && "Invalid number of operands!");
1156 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1159 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1164 void addImmOperands(MCInst &Inst, unsigned N) const {
1165 assert(N == 1 && "Invalid number of operands!");
1166 // If this is a pageoff symrefexpr with an addend, adjust the addend
1167 // to be only the page-offset portion. Otherwise, just add the expr
1169 addExpr(Inst, getImm());
1172 void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
1173 assert(N == 2 && "Invalid number of operands!");
1174 if (isShiftedImm()) {
1175 addExpr(Inst, getShiftedImmVal());
1176 Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
1178 addExpr(Inst, getImm());
1179 Inst.addOperand(MCOperand::CreateImm(0));
1183 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1184 assert(N == 1 && "Invalid number of operands!");
1185 Inst.addOperand(MCOperand::CreateImm(getCondCode()));
1188 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1189 assert(N == 1 && "Invalid number of operands!");
1190 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1192 addExpr(Inst, getImm());
1194 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
1197 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1198 addImmOperands(Inst, N);
1202 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1203 assert(N == 1 && "Invalid number of operands!");
1204 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1207 Inst.addOperand(MCOperand::CreateExpr(getImm()));
1210 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
1213 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1214 assert(N == 1 && "Invalid number of operands!");
1215 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1216 assert(MCE && "Invalid constant immediate operand!");
1217 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1220 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1221 assert(N == 1 && "Invalid number of operands!");
1222 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1223 assert(MCE && "Invalid constant immediate operand!");
1224 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1227 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1228 assert(N == 1 && "Invalid number of operands!");
1229 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1230 assert(MCE && "Invalid constant immediate operand!");
1231 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1234 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1235 assert(N == 1 && "Invalid number of operands!");
1236 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1237 assert(MCE && "Invalid constant immediate operand!");
1238 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1241 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1242 assert(N == 1 && "Invalid number of operands!");
1243 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1244 assert(MCE && "Invalid constant immediate operand!");
1245 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1248 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1249 assert(N == 1 && "Invalid number of operands!");
1250 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1251 assert(MCE && "Invalid constant immediate operand!");
1252 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1255 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1256 assert(N == 1 && "Invalid number of operands!");
1257 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1258 assert(MCE && "Invalid constant immediate operand!");
1259 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1262 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1263 assert(N == 1 && "Invalid number of operands!");
1264 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1265 assert(MCE && "Invalid constant immediate operand!");
1266 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1269 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1270 assert(N == 1 && "Invalid number of operands!");
1271 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1272 assert(MCE && "Invalid constant immediate operand!");
1273 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1276 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1277 assert(N == 1 && "Invalid number of operands!");
1278 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1279 assert(MCE && "Invalid constant immediate operand!");
1280 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1283 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1284 assert(N == 1 && "Invalid number of operands!");
1285 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1286 assert(MCE && "Invalid constant immediate operand!");
1287 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1290 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1291 assert(N == 1 && "Invalid number of operands!");
1292 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1293 assert(MCE && "Invalid constant immediate operand!");
1294 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1297 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1298 assert(N == 1 && "Invalid number of operands!");
1299 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1300 assert(MCE && "Invalid constant immediate operand!");
1301 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1304 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1305 assert(N == 1 && "Invalid number of operands!");
1306 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1307 assert(MCE && "Invalid constant immediate operand!");
1308 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1311 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1312 assert(N == 1 && "Invalid number of operands!");
1313 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1314 assert(MCE && "Invalid constant immediate operand!");
1315 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1318 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1319 assert(N == 1 && "Invalid number of operands!");
1320 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1321 assert(MCE && "Invalid constant immediate operand!");
1322 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1325 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1326 assert(N == 1 && "Invalid number of operands!");
1327 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1328 assert(MCE && "Invalid constant immediate operand!");
1329 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1332 void addImm32_63Operands(MCInst &Inst, unsigned N) const {
1333 assert(N == 1 && "Invalid number of operands!");
1334 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1335 assert(MCE && "Invalid constant immediate operand!");
1336 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1339 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1340 assert(N == 1 && "Invalid number of operands!");
1341 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1342 assert(MCE && "Invalid logical immediate operand!");
1343 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1344 Inst.addOperand(MCOperand::CreateImm(encoding));
1347 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1348 assert(N == 1 && "Invalid number of operands!");
1349 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1350 assert(MCE && "Invalid logical immediate operand!");
1351 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1352 Inst.addOperand(MCOperand::CreateImm(encoding));
1355 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1356 assert(N == 1 && "Invalid number of operands!");
1357 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1358 assert(MCE && "Invalid immediate operand!");
1359 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1360 Inst.addOperand(MCOperand::CreateImm(encoding));
1363 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1364 // Branch operands don't encode the low bits, so shift them off
1365 // here. If it's a label, however, just put it on directly as there's
1366 // not enough information now to do anything.
1367 assert(N == 1 && "Invalid number of operands!");
1368 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1370 addExpr(Inst, getImm());
1373 assert(MCE && "Invalid constant immediate operand!");
1374 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1377 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
1378 // Branch operands don't encode the low bits, so shift them off
1379 // here. If it's a label, however, just put it on directly as there's
1380 // not enough information now to do anything.
1381 assert(N == 1 && "Invalid number of operands!");
1382 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1384 addExpr(Inst, getImm());
1387 assert(MCE && "Invalid constant immediate operand!");
1388 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1391 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1392 // Branch operands don't encode the low bits, so shift them off
1393 // here. If it's a label, however, just put it on directly as there's
1394 // not enough information now to do anything.
1395 assert(N == 1 && "Invalid number of operands!");
1396 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1398 addExpr(Inst, getImm());
1401 assert(MCE && "Invalid constant immediate operand!");
1402 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1405 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1406 assert(N == 1 && "Invalid number of operands!");
1407 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1410 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1411 assert(N == 1 && "Invalid number of operands!");
1412 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1415 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1416 assert(N == 1 && "Invalid number of operands!");
1419 auto Mapper = ARM64SysReg::MRSMapper(getSysRegFeatureBits());
1420 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1422 Inst.addOperand(MCOperand::CreateImm(Bits));
1425 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1426 assert(N == 1 && "Invalid number of operands!");
1429 auto Mapper = ARM64SysReg::MSRMapper(getSysRegFeatureBits());
1430 uint32_t Bits = Mapper.fromString(getSysReg(), Valid);
1432 Inst.addOperand(MCOperand::CreateImm(Bits));
1435 void addSystemPStateFieldOperands(MCInst &Inst, unsigned N) const {
1436 assert(N == 1 && "Invalid number of operands!");
1439 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1441 Inst.addOperand(MCOperand::CreateImm(Bits));
1444 void addSysCROperands(MCInst &Inst, unsigned N) const {
1445 assert(N == 1 && "Invalid number of operands!");
1446 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1449 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1450 assert(N == 1 && "Invalid number of operands!");
1451 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1454 void addShifterOperands(MCInst &Inst, unsigned N) const {
1455 assert(N == 1 && "Invalid number of operands!");
1457 ARM64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
1458 Inst.addOperand(MCOperand::CreateImm(Imm));
1461 void addExtendOperands(MCInst &Inst, unsigned N) const {
1462 assert(N == 1 && "Invalid number of operands!");
1463 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
1464 if (ET == ARM64_AM::LSL) ET = ARM64_AM::UXTW;
1465 unsigned Imm = ARM64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1466 Inst.addOperand(MCOperand::CreateImm(Imm));
1469 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1470 assert(N == 1 && "Invalid number of operands!");
1471 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
1472 if (ET == ARM64_AM::LSL) ET = ARM64_AM::UXTX;
1473 unsigned Imm = ARM64_AM::getArithExtendImm(ET, getShiftExtendAmount());
1474 Inst.addOperand(MCOperand::CreateImm(Imm));
1477 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
1478 assert(N == 2 && "Invalid number of operands!");
1479 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
1480 bool IsSigned = ET == ARM64_AM::SXTW || ET == ARM64_AM::SXTX;
1481 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1482 Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
1485 // For 8-bit load/store instructions with a register offset, both the
1486 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
1487 // they're disambiguated by whether the shift was explicit or implicit rather
1489 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
1490 assert(N == 2 && "Invalid number of operands!");
1491 ARM64_AM::ShiftExtendType ET = getShiftExtendType();
1492 bool IsSigned = ET == ARM64_AM::SXTW || ET == ARM64_AM::SXTX;
1493 Inst.addOperand(MCOperand::CreateImm(IsSigned));
1494 Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
1498 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
1499 assert(N == 1 && "Invalid number of operands!");
1501 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1502 uint64_t Value = CE->getValue();
1503 Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
1507 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
1508 assert(N == 1 && "Invalid number of operands!");
1510 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
1511 uint64_t Value = CE->getValue();
1512 Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
1515 void print(raw_ostream &OS) const override;
1517 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1519 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1520 Op->Tok.Data = Str.data();
1521 Op->Tok.Length = Str.size();
1522 Op->Tok.IsSuffix = IsSuffix;
1528 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1529 SMLoc E, MCContext &Ctx) {
1530 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1531 Op->Reg.RegNum = RegNum;
1532 Op->Reg.isVector = isVector;
1538 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1539 unsigned NumElements, char ElementKind,
1540 SMLoc S, SMLoc E, MCContext &Ctx) {
1541 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1542 Op->VectorList.RegNum = RegNum;
1543 Op->VectorList.Count = Count;
1544 Op->VectorList.NumElements = NumElements;
1545 Op->VectorList.ElementKind = ElementKind;
1551 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1553 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1554 Op->VectorIndex.Val = Idx;
1560 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1562 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1569 static ARM64Operand *CreateShiftedImm(const MCExpr *Val, unsigned ShiftAmount,
1570 SMLoc S, SMLoc E, MCContext &Ctx) {
1571 ARM64Operand *Op = new ARM64Operand(k_ShiftedImm, Ctx);
1572 Op->ShiftedImm .Val = Val;
1573 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1579 static ARM64Operand *CreateCondCode(ARM64CC::CondCode Code, SMLoc S, SMLoc E,
1581 ARM64Operand *Op = new ARM64Operand(k_CondCode, Ctx);
1582 Op->CondCode.Code = Code;
1588 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1589 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1590 Op->FPImm.Val = Val;
1596 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1597 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1598 Op->Barrier.Val = Val;
1604 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S,
1605 uint64_t FeatureBits, MCContext &Ctx) {
1606 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1607 Op->SysReg.Data = Str.data();
1608 Op->SysReg.Length = Str.size();
1609 Op->SysReg.FeatureBits = FeatureBits;
1615 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1617 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1618 Op->SysCRImm.Val = Val;
1624 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1625 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1626 Op->Prefetch.Val = Val;
1632 static ARM64Operand *CreateShiftExtend(ARM64_AM::ShiftExtendType ShOp,
1633 unsigned Val, bool HasExplicitAmount,
1634 SMLoc S, SMLoc E, MCContext &Ctx) {
1635 ARM64Operand *Op = new ARM64Operand(k_ShiftExtend, Ctx);
1636 Op->ShiftExtend.Type = ShOp;
1637 Op->ShiftExtend.Amount = Val;
1638 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1645 } // end anonymous namespace.
1647 void ARM64Operand::print(raw_ostream &OS) const {
1650 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1655 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1657 OS << "<barrier " << Name << ">";
1659 OS << "<barrier invalid #" << getBarrier() << ">";
1663 getImm()->print(OS);
1665 case k_ShiftedImm: {
1666 unsigned Shift = getShiftedImmShift();
1667 OS << "<shiftedimm ";
1668 getShiftedImmVal()->print(OS);
1669 OS << ", lsl #" << ARM64_AM::getShiftValue(Shift) << ">";
1673 OS << "<condcode " << getCondCode() << ">";
1676 OS << "<register " << getReg() << ">";
1678 case k_VectorList: {
1679 OS << "<vectorlist ";
1680 unsigned Reg = getVectorListStart();
1681 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1682 OS << Reg + i << " ";
1687 OS << "<vectorindex " << getVectorIndex() << ">";
1690 OS << "<sysreg: " << getSysReg() << '>';
1693 OS << "'" << getToken() << "'";
1696 OS << "c" << getSysCR();
1700 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1702 OS << "<prfop " << Name << ">";
1704 OS << "<prfop invalid #" << getPrefetch() << ">";
1707 case k_ShiftExtend: {
1708 OS << "<" << ARM64_AM::getShiftExtendName(getShiftExtendType()) << " #"
1709 << getShiftExtendAmount();
1710 if (!hasShiftExtendAmount())
1718 /// @name Auto-generated Match Functions
1721 static unsigned MatchRegisterName(StringRef Name);
1725 static unsigned matchVectorRegName(StringRef Name) {
1726 return StringSwitch<unsigned>(Name)
1727 .Case("v0", ARM64::Q0)
1728 .Case("v1", ARM64::Q1)
1729 .Case("v2", ARM64::Q2)
1730 .Case("v3", ARM64::Q3)
1731 .Case("v4", ARM64::Q4)
1732 .Case("v5", ARM64::Q5)
1733 .Case("v6", ARM64::Q6)
1734 .Case("v7", ARM64::Q7)
1735 .Case("v8", ARM64::Q8)
1736 .Case("v9", ARM64::Q9)
1737 .Case("v10", ARM64::Q10)
1738 .Case("v11", ARM64::Q11)
1739 .Case("v12", ARM64::Q12)
1740 .Case("v13", ARM64::Q13)
1741 .Case("v14", ARM64::Q14)
1742 .Case("v15", ARM64::Q15)
1743 .Case("v16", ARM64::Q16)
1744 .Case("v17", ARM64::Q17)
1745 .Case("v18", ARM64::Q18)
1746 .Case("v19", ARM64::Q19)
1747 .Case("v20", ARM64::Q20)
1748 .Case("v21", ARM64::Q21)
1749 .Case("v22", ARM64::Q22)
1750 .Case("v23", ARM64::Q23)
1751 .Case("v24", ARM64::Q24)
1752 .Case("v25", ARM64::Q25)
1753 .Case("v26", ARM64::Q26)
1754 .Case("v27", ARM64::Q27)
1755 .Case("v28", ARM64::Q28)
1756 .Case("v29", ARM64::Q29)
1757 .Case("v30", ARM64::Q30)
1758 .Case("v31", ARM64::Q31)
1762 static bool isValidVectorKind(StringRef Name) {
1763 return StringSwitch<bool>(Name.lower())
1773 // Accept the width neutral ones, too, for verbose syntax. If those
1774 // aren't used in the right places, the token operand won't match so
1775 // all will work out.
1783 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1784 char &ElementKind) {
1785 assert(isValidVectorKind(Name));
1787 ElementKind = Name.lower()[Name.size() - 1];
1790 if (Name.size() == 2)
1793 // Parse the lane count
1794 Name = Name.drop_front();
1795 while (isdigit(Name.front())) {
1796 NumElements = 10 * NumElements + (Name.front() - '0');
1797 Name = Name.drop_front();
1801 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1803 StartLoc = getLoc();
1804 RegNo = tryParseRegister();
1805 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1806 return (RegNo == (unsigned)-1);
1809 /// tryParseRegister - Try to parse a register name. The token must be an
1810 /// Identifier when called, and if it is a register name the token is eaten and
1811 /// the register is added to the operand list.
1812 int ARM64AsmParser::tryParseRegister() {
1813 const AsmToken &Tok = Parser.getTok();
1814 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1816 std::string lowerCase = Tok.getString().lower();
1817 unsigned RegNum = MatchRegisterName(lowerCase);
1818 // Also handle a few aliases of registers.
1820 RegNum = StringSwitch<unsigned>(lowerCase)
1821 .Case("fp", ARM64::FP)
1822 .Case("lr", ARM64::LR)
1823 .Case("x31", ARM64::XZR)
1824 .Case("w31", ARM64::WZR)
1830 Parser.Lex(); // Eat identifier token.
1834 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1835 /// kind specifier. If it is a register specifier, eat the token and return it.
1836 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1837 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1838 TokError("vector register expected");
1842 StringRef Name = Parser.getTok().getString();
1843 // If there is a kind specifier, it's separated from the register name by
1845 size_t Start = 0, Next = Name.find('.');
1846 StringRef Head = Name.slice(Start, Next);
1847 unsigned RegNum = matchVectorRegName(Head);
1849 if (Next != StringRef::npos) {
1850 Kind = Name.slice(Next, StringRef::npos);
1851 if (!isValidVectorKind(Kind)) {
1852 TokError("invalid vector kind qualifier");
1856 Parser.Lex(); // Eat the register token.
1861 TokError("vector register expected");
1865 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1866 ARM64AsmParser::OperandMatchResultTy
1867 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1870 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1871 Error(S, "Expected cN operand where 0 <= N <= 15");
1872 return MatchOperand_ParseFail;
1875 StringRef Tok = Parser.getTok().getIdentifier();
1876 if (Tok[0] != 'c' && Tok[0] != 'C') {
1877 Error(S, "Expected cN operand where 0 <= N <= 15");
1878 return MatchOperand_ParseFail;
1882 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
1883 if (BadNum || CRNum > 15) {
1884 Error(S, "Expected cN operand where 0 <= N <= 15");
1885 return MatchOperand_ParseFail;
1888 Parser.Lex(); // Eat identifier token.
1889 Operands.push_back(ARM64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
1890 return MatchOperand_Success;
1893 /// tryParsePrefetch - Try to parse a prefetch operand.
1894 ARM64AsmParser::OperandMatchResultTy
1895 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1897 const AsmToken &Tok = Parser.getTok();
1898 // Either an identifier for named values or a 5-bit immediate.
1899 bool Hash = Tok.is(AsmToken::Hash);
1900 if (Hash || Tok.is(AsmToken::Integer)) {
1902 Parser.Lex(); // Eat hash token.
1903 const MCExpr *ImmVal;
1904 if (getParser().parseExpression(ImmVal))
1905 return MatchOperand_ParseFail;
1907 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
1909 TokError("immediate value expected for prefetch operand");
1910 return MatchOperand_ParseFail;
1912 unsigned prfop = MCE->getValue();
1914 TokError("prefetch operand out of range, [0,31] expected");
1915 return MatchOperand_ParseFail;
1918 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
1919 return MatchOperand_Success;
1922 if (Tok.isNot(AsmToken::Identifier)) {
1923 TokError("pre-fetch hint expected");
1924 return MatchOperand_ParseFail;
1928 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
1930 TokError("pre-fetch hint expected");
1931 return MatchOperand_ParseFail;
1934 Parser.Lex(); // Eat identifier token.
1935 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
1936 return MatchOperand_Success;
1939 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
1941 ARM64AsmParser::OperandMatchResultTy
1942 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
1946 if (Parser.getTok().is(AsmToken::Hash)) {
1947 Parser.Lex(); // Eat hash token.
1950 if (parseSymbolicImmVal(Expr))
1951 return MatchOperand_ParseFail;
1953 ARM64MCExpr::VariantKind ELFRefKind;
1954 MCSymbolRefExpr::VariantKind DarwinRefKind;
1956 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
1957 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
1958 ELFRefKind == ARM64MCExpr::VK_INVALID) {
1959 // No modifier was specified at all; this is the syntax for an ELF basic
1960 // ADRP relocation (unfortunately).
1961 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
1962 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
1963 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
1965 Error(S, "gotpage label reference not allowed an addend");
1966 return MatchOperand_ParseFail;
1967 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
1968 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
1969 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
1970 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
1971 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
1972 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
1973 // The operand must be an @page or @gotpage qualified symbolref.
1974 Error(S, "page or gotpage label reference expected");
1975 return MatchOperand_ParseFail;
1979 // We have either a label reference possibly with addend or an immediate. The
1980 // addend is a raw value here. The linker will adjust it to only reference the
1982 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1983 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
1985 return MatchOperand_Success;
1988 /// tryParseAdrLabel - Parse and validate a source label for the ADR
1990 ARM64AsmParser::OperandMatchResultTy
1991 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
1995 if (Parser.getTok().is(AsmToken::Hash)) {
1996 Parser.Lex(); // Eat hash token.
1999 if (getParser().parseExpression(Expr))
2000 return MatchOperand_ParseFail;
2002 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2003 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2005 return MatchOperand_Success;
2008 /// tryParseFPImm - A floating point immediate expression operand.
2009 ARM64AsmParser::OperandMatchResultTy
2010 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2014 if (Parser.getTok().is(AsmToken::Hash)) {
2015 Parser.Lex(); // Eat '#'
2019 // Handle negation, as that still comes through as a separate token.
2020 bool isNegative = false;
2021 if (Parser.getTok().is(AsmToken::Minus)) {
2025 const AsmToken &Tok = Parser.getTok();
2026 if (Tok.is(AsmToken::Real)) {
2027 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2028 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2029 // If we had a '-' in front, toggle the sign bit.
2030 IntVal ^= (uint64_t)isNegative << 63;
2031 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2032 Parser.Lex(); // Eat the token.
2033 // Check for out of range values. As an exception, we let Zero through,
2034 // as we handle that special case in post-processing before matching in
2035 // order to use the zero register for it.
2036 if (Val == -1 && !RealVal.isZero()) {
2037 TokError("expected compatible register or floating-point constant");
2038 return MatchOperand_ParseFail;
2040 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2041 return MatchOperand_Success;
2043 if (Tok.is(AsmToken::Integer)) {
2045 if (!isNegative && Tok.getString().startswith("0x")) {
2046 Val = Tok.getIntVal();
2047 if (Val > 255 || Val < 0) {
2048 TokError("encoded floating point value out of range");
2049 return MatchOperand_ParseFail;
2052 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2053 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2054 // If we had a '-' in front, toggle the sign bit.
2055 IntVal ^= (uint64_t)isNegative << 63;
2056 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2058 Parser.Lex(); // Eat the token.
2059 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2060 return MatchOperand_Success;
2064 return MatchOperand_NoMatch;
2066 TokError("invalid floating point immediate");
2067 return MatchOperand_ParseFail;
2070 /// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
2071 ARM64AsmParser::OperandMatchResultTy
2072 ARM64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
2075 if (Parser.getTok().is(AsmToken::Hash))
2076 Parser.Lex(); // Eat '#'
2077 else if (Parser.getTok().isNot(AsmToken::Integer))
2078 // Operand should start from # or should be integer, emit error otherwise.
2079 return MatchOperand_NoMatch;
2082 if (parseSymbolicImmVal(Imm))
2083 return MatchOperand_ParseFail;
2084 else if (Parser.getTok().isNot(AsmToken::Comma)) {
2085 uint64_t ShiftAmount = 0;
2086 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
2088 int64_t Val = MCE->getValue();
2089 if (Val > 0xfff && (Val & 0xfff) == 0) {
2090 Imm = MCConstantExpr::Create(Val >> 12, getContext());
2094 SMLoc E = Parser.getTok().getLoc();
2095 Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
2097 return MatchOperand_Success;
2103 // The optional operand must be "lsl #N" where N is non-negative.
2104 if (!Parser.getTok().is(AsmToken::Identifier) ||
2105 !Parser.getTok().getIdentifier().equals_lower("lsl")) {
2106 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2107 return MatchOperand_ParseFail;
2113 if (Parser.getTok().is(AsmToken::Hash)) {
2117 if (Parser.getTok().isNot(AsmToken::Integer)) {
2118 Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
2119 return MatchOperand_ParseFail;
2122 int64_t ShiftAmount = Parser.getTok().getIntVal();
2124 if (ShiftAmount < 0) {
2125 Error(Parser.getTok().getLoc(), "positive shift amount required");
2126 return MatchOperand_ParseFail;
2128 Parser.Lex(); // Eat the number
2130 SMLoc E = Parser.getTok().getLoc();
2131 Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount,
2132 S, E, getContext()));
2133 return MatchOperand_Success;
2136 /// parseCondCodeString - Parse a Condition Code string.
2137 ARM64CC::CondCode ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2138 ARM64CC::CondCode CC = StringSwitch<ARM64CC::CondCode>(Cond.lower())
2139 .Case("eq", ARM64CC::EQ)
2140 .Case("ne", ARM64CC::NE)
2141 .Case("cs", ARM64CC::HS)
2142 .Case("hs", ARM64CC::HS)
2143 .Case("cc", ARM64CC::LO)
2144 .Case("lo", ARM64CC::LO)
2145 .Case("mi", ARM64CC::MI)
2146 .Case("pl", ARM64CC::PL)
2147 .Case("vs", ARM64CC::VS)
2148 .Case("vc", ARM64CC::VC)
2149 .Case("hi", ARM64CC::HI)
2150 .Case("ls", ARM64CC::LS)
2151 .Case("ge", ARM64CC::GE)
2152 .Case("lt", ARM64CC::LT)
2153 .Case("gt", ARM64CC::GT)
2154 .Case("le", ARM64CC::LE)
2155 .Case("al", ARM64CC::AL)
2156 .Case("nv", ARM64CC::NV)
2157 .Default(ARM64CC::Invalid);
2161 /// parseCondCode - Parse a Condition Code operand.
2162 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2163 bool invertCondCode) {
2165 const AsmToken &Tok = Parser.getTok();
2166 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2168 StringRef Cond = Tok.getString();
2169 ARM64CC::CondCode CC = parseCondCodeString(Cond);
2170 if (CC == ARM64CC::Invalid)
2171 return TokError("invalid condition code");
2172 Parser.Lex(); // Eat identifier token.
2175 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2178 ARM64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2182 /// tryParseOptionalShift - Some operands take an optional shift argument. Parse
2183 /// them if present.
2184 ARM64AsmParser::OperandMatchResultTy
2185 ARM64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
2186 const AsmToken &Tok = Parser.getTok();
2187 std::string LowerID = Tok.getString().lower();
2188 ARM64_AM::ShiftExtendType ShOp =
2189 StringSwitch<ARM64_AM::ShiftExtendType>(LowerID)
2190 .Case("lsl", ARM64_AM::LSL)
2191 .Case("lsr", ARM64_AM::LSR)
2192 .Case("asr", ARM64_AM::ASR)
2193 .Case("ror", ARM64_AM::ROR)
2194 .Case("msl", ARM64_AM::MSL)
2195 .Case("uxtb", ARM64_AM::UXTB)
2196 .Case("uxth", ARM64_AM::UXTH)
2197 .Case("uxtw", ARM64_AM::UXTW)
2198 .Case("uxtx", ARM64_AM::UXTX)
2199 .Case("sxtb", ARM64_AM::SXTB)
2200 .Case("sxth", ARM64_AM::SXTH)
2201 .Case("sxtw", ARM64_AM::SXTW)
2202 .Case("sxtx", ARM64_AM::SXTX)
2203 .Default(ARM64_AM::InvalidShiftExtend);
2205 if (ShOp == ARM64_AM::InvalidShiftExtend)
2206 return MatchOperand_NoMatch;
2208 SMLoc S = Tok.getLoc();
2211 bool Hash = getLexer().is(AsmToken::Hash);
2212 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
2213 if (ShOp == ARM64_AM::LSL || ShOp == ARM64_AM::LSR ||
2214 ShOp == ARM64_AM::ASR || ShOp == ARM64_AM::ROR ||
2215 ShOp == ARM64_AM::MSL) {
2216 // We expect a number here.
2217 TokError("expected #imm after shift specifier");
2218 return MatchOperand_ParseFail;
2221 // "extend" type operatoins don't need an immediate, #0 is implicit.
2222 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2224 ARM64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
2225 return MatchOperand_Success;
2229 Parser.Lex(); // Eat the '#'.
2231 // Make sure we do actually have a number
2232 if (!Parser.getTok().is(AsmToken::Integer)) {
2233 Error(Parser.getTok().getLoc(),
2234 "expected integer shift amount");
2235 return MatchOperand_ParseFail;
2238 const MCExpr *ImmVal;
2239 if (getParser().parseExpression(ImmVal))
2240 return MatchOperand_ParseFail;
2242 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2244 TokError("expected #imm after shift specifier");
2245 return MatchOperand_ParseFail;
2248 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2249 Operands.push_back(ARM64Operand::CreateShiftExtend(ShOp, MCE->getValue(),
2250 true, S, E, getContext()));
2251 return MatchOperand_Success;
2254 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2255 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2256 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2257 OperandVector &Operands) {
2258 if (Name.find('.') != StringRef::npos)
2259 return TokError("invalid operand");
2263 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2265 const AsmToken &Tok = Parser.getTok();
2266 StringRef Op = Tok.getString();
2267 SMLoc S = Tok.getLoc();
2269 const MCExpr *Expr = nullptr;
2271 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2273 Expr = MCConstantExpr::Create(op1, getContext()); \
2274 Operands.push_back( \
2275 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2276 Operands.push_back( \
2277 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2278 Operands.push_back( \
2279 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2280 Expr = MCConstantExpr::Create(op2, getContext()); \
2281 Operands.push_back( \
2282 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2285 if (Mnemonic == "ic") {
2286 if (!Op.compare_lower("ialluis")) {
2287 // SYS #0, C7, C1, #0
2288 SYS_ALIAS(0, 7, 1, 0);
2289 } else if (!Op.compare_lower("iallu")) {
2290 // SYS #0, C7, C5, #0
2291 SYS_ALIAS(0, 7, 5, 0);
2292 } else if (!Op.compare_lower("ivau")) {
2293 // SYS #3, C7, C5, #1
2294 SYS_ALIAS(3, 7, 5, 1);
2296 return TokError("invalid operand for IC instruction");
2298 } else if (Mnemonic == "dc") {
2299 if (!Op.compare_lower("zva")) {
2300 // SYS #3, C7, C4, #1
2301 SYS_ALIAS(3, 7, 4, 1);
2302 } else if (!Op.compare_lower("ivac")) {
2303 // SYS #3, C7, C6, #1
2304 SYS_ALIAS(0, 7, 6, 1);
2305 } else if (!Op.compare_lower("isw")) {
2306 // SYS #0, C7, C6, #2
2307 SYS_ALIAS(0, 7, 6, 2);
2308 } else if (!Op.compare_lower("cvac")) {
2309 // SYS #3, C7, C10, #1
2310 SYS_ALIAS(3, 7, 10, 1);
2311 } else if (!Op.compare_lower("csw")) {
2312 // SYS #0, C7, C10, #2
2313 SYS_ALIAS(0, 7, 10, 2);
2314 } else if (!Op.compare_lower("cvau")) {
2315 // SYS #3, C7, C11, #1
2316 SYS_ALIAS(3, 7, 11, 1);
2317 } else if (!Op.compare_lower("civac")) {
2318 // SYS #3, C7, C14, #1
2319 SYS_ALIAS(3, 7, 14, 1);
2320 } else if (!Op.compare_lower("cisw")) {
2321 // SYS #0, C7, C14, #2
2322 SYS_ALIAS(0, 7, 14, 2);
2324 return TokError("invalid operand for DC instruction");
2326 } else if (Mnemonic == "at") {
2327 if (!Op.compare_lower("s1e1r")) {
2328 // SYS #0, C7, C8, #0
2329 SYS_ALIAS(0, 7, 8, 0);
2330 } else if (!Op.compare_lower("s1e2r")) {
2331 // SYS #4, C7, C8, #0
2332 SYS_ALIAS(4, 7, 8, 0);
2333 } else if (!Op.compare_lower("s1e3r")) {
2334 // SYS #6, C7, C8, #0
2335 SYS_ALIAS(6, 7, 8, 0);
2336 } else if (!Op.compare_lower("s1e1w")) {
2337 // SYS #0, C7, C8, #1
2338 SYS_ALIAS(0, 7, 8, 1);
2339 } else if (!Op.compare_lower("s1e2w")) {
2340 // SYS #4, C7, C8, #1
2341 SYS_ALIAS(4, 7, 8, 1);
2342 } else if (!Op.compare_lower("s1e3w")) {
2343 // SYS #6, C7, C8, #1
2344 SYS_ALIAS(6, 7, 8, 1);
2345 } else if (!Op.compare_lower("s1e0r")) {
2346 // SYS #0, C7, C8, #3
2347 SYS_ALIAS(0, 7, 8, 2);
2348 } else if (!Op.compare_lower("s1e0w")) {
2349 // SYS #0, C7, C8, #3
2350 SYS_ALIAS(0, 7, 8, 3);
2351 } else if (!Op.compare_lower("s12e1r")) {
2352 // SYS #4, C7, C8, #4
2353 SYS_ALIAS(4, 7, 8, 4);
2354 } else if (!Op.compare_lower("s12e1w")) {
2355 // SYS #4, C7, C8, #5
2356 SYS_ALIAS(4, 7, 8, 5);
2357 } else if (!Op.compare_lower("s12e0r")) {
2358 // SYS #4, C7, C8, #6
2359 SYS_ALIAS(4, 7, 8, 6);
2360 } else if (!Op.compare_lower("s12e0w")) {
2361 // SYS #4, C7, C8, #7
2362 SYS_ALIAS(4, 7, 8, 7);
2364 return TokError("invalid operand for AT instruction");
2366 } else if (Mnemonic == "tlbi") {
2367 if (!Op.compare_lower("vmalle1is")) {
2368 // SYS #0, C8, C3, #0
2369 SYS_ALIAS(0, 8, 3, 0);
2370 } else if (!Op.compare_lower("alle2is")) {
2371 // SYS #4, C8, C3, #0
2372 SYS_ALIAS(4, 8, 3, 0);
2373 } else if (!Op.compare_lower("alle3is")) {
2374 // SYS #6, C8, C3, #0
2375 SYS_ALIAS(6, 8, 3, 0);
2376 } else if (!Op.compare_lower("vae1is")) {
2377 // SYS #0, C8, C3, #1
2378 SYS_ALIAS(0, 8, 3, 1);
2379 } else if (!Op.compare_lower("vae2is")) {
2380 // SYS #4, C8, C3, #1
2381 SYS_ALIAS(4, 8, 3, 1);
2382 } else if (!Op.compare_lower("vae3is")) {
2383 // SYS #6, C8, C3, #1
2384 SYS_ALIAS(6, 8, 3, 1);
2385 } else if (!Op.compare_lower("aside1is")) {
2386 // SYS #0, C8, C3, #2
2387 SYS_ALIAS(0, 8, 3, 2);
2388 } else if (!Op.compare_lower("vaae1is")) {
2389 // SYS #0, C8, C3, #3
2390 SYS_ALIAS(0, 8, 3, 3);
2391 } else if (!Op.compare_lower("alle1is")) {
2392 // SYS #4, C8, C3, #4
2393 SYS_ALIAS(4, 8, 3, 4);
2394 } else if (!Op.compare_lower("vale1is")) {
2395 // SYS #0, C8, C3, #5
2396 SYS_ALIAS(0, 8, 3, 5);
2397 } else if (!Op.compare_lower("vaale1is")) {
2398 // SYS #0, C8, C3, #7
2399 SYS_ALIAS(0, 8, 3, 7);
2400 } else if (!Op.compare_lower("vmalle1")) {
2401 // SYS #0, C8, C7, #0
2402 SYS_ALIAS(0, 8, 7, 0);
2403 } else if (!Op.compare_lower("alle2")) {
2404 // SYS #4, C8, C7, #0
2405 SYS_ALIAS(4, 8, 7, 0);
2406 } else if (!Op.compare_lower("vale2is")) {
2407 // SYS #4, C8, C3, #5
2408 SYS_ALIAS(4, 8, 3, 5);
2409 } else if (!Op.compare_lower("vale3is")) {
2410 // SYS #6, C8, C3, #5
2411 SYS_ALIAS(6, 8, 3, 5);
2412 } else if (!Op.compare_lower("alle3")) {
2413 // SYS #6, C8, C7, #0
2414 SYS_ALIAS(6, 8, 7, 0);
2415 } else if (!Op.compare_lower("vae1")) {
2416 // SYS #0, C8, C7, #1
2417 SYS_ALIAS(0, 8, 7, 1);
2418 } else if (!Op.compare_lower("vae2")) {
2419 // SYS #4, C8, C7, #1
2420 SYS_ALIAS(4, 8, 7, 1);
2421 } else if (!Op.compare_lower("vae3")) {
2422 // SYS #6, C8, C7, #1
2423 SYS_ALIAS(6, 8, 7, 1);
2424 } else if (!Op.compare_lower("aside1")) {
2425 // SYS #0, C8, C7, #2
2426 SYS_ALIAS(0, 8, 7, 2);
2427 } else if (!Op.compare_lower("vaae1")) {
2428 // SYS #0, C8, C7, #3
2429 SYS_ALIAS(0, 8, 7, 3);
2430 } else if (!Op.compare_lower("alle1")) {
2431 // SYS #4, C8, C7, #4
2432 SYS_ALIAS(4, 8, 7, 4);
2433 } else if (!Op.compare_lower("vale1")) {
2434 // SYS #0, C8, C7, #5
2435 SYS_ALIAS(0, 8, 7, 5);
2436 } else if (!Op.compare_lower("vale2")) {
2437 // SYS #4, C8, C7, #5
2438 SYS_ALIAS(4, 8, 7, 5);
2439 } else if (!Op.compare_lower("vale3")) {
2440 // SYS #6, C8, C7, #5
2441 SYS_ALIAS(6, 8, 7, 5);
2442 } else if (!Op.compare_lower("vaale1")) {
2443 // SYS #0, C8, C7, #7
2444 SYS_ALIAS(0, 8, 7, 7);
2445 } else if (!Op.compare_lower("ipas2e1")) {
2446 // SYS #4, C8, C4, #1
2447 SYS_ALIAS(4, 8, 4, 1);
2448 } else if (!Op.compare_lower("ipas2le1")) {
2449 // SYS #4, C8, C4, #5
2450 SYS_ALIAS(4, 8, 4, 5);
2451 } else if (!Op.compare_lower("ipas2e1is")) {
2452 // SYS #4, C8, C4, #1
2453 SYS_ALIAS(4, 8, 0, 1);
2454 } else if (!Op.compare_lower("ipas2le1is")) {
2455 // SYS #4, C8, C4, #5
2456 SYS_ALIAS(4, 8, 0, 5);
2457 } else if (!Op.compare_lower("vmalls12e1")) {
2458 // SYS #4, C8, C7, #6
2459 SYS_ALIAS(4, 8, 7, 6);
2460 } else if (!Op.compare_lower("vmalls12e1is")) {
2461 // SYS #4, C8, C3, #6
2462 SYS_ALIAS(4, 8, 3, 6);
2464 return TokError("invalid operand for TLBI instruction");
2470 Parser.Lex(); // Eat operand.
2472 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2473 bool HasRegister = false;
2475 // Check for the optional register operand.
2476 if (getLexer().is(AsmToken::Comma)) {
2477 Parser.Lex(); // Eat comma.
2479 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2480 return TokError("expected register operand");
2485 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2486 Parser.eatToEndOfStatement();
2487 return TokError("unexpected token in argument list");
2490 if (ExpectRegister && !HasRegister) {
2491 return TokError("specified " + Mnemonic + " op requires a register");
2493 else if (!ExpectRegister && HasRegister) {
2494 return TokError("specified " + Mnemonic + " op does not use a register");
2497 Parser.Lex(); // Consume the EndOfStatement
2501 ARM64AsmParser::OperandMatchResultTy
2502 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2503 const AsmToken &Tok = Parser.getTok();
2505 // Can be either a #imm style literal or an option name
2506 bool Hash = Tok.is(AsmToken::Hash);
2507 if (Hash || Tok.is(AsmToken::Integer)) {
2508 // Immediate operand.
2510 Parser.Lex(); // Eat the '#'
2511 const MCExpr *ImmVal;
2512 SMLoc ExprLoc = getLoc();
2513 if (getParser().parseExpression(ImmVal))
2514 return MatchOperand_ParseFail;
2515 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2517 Error(ExprLoc, "immediate value expected for barrier operand");
2518 return MatchOperand_ParseFail;
2520 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2521 Error(ExprLoc, "barrier operand out of range");
2522 return MatchOperand_ParseFail;
2525 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2526 return MatchOperand_Success;
2529 if (Tok.isNot(AsmToken::Identifier)) {
2530 TokError("invalid operand for instruction");
2531 return MatchOperand_ParseFail;
2535 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2537 TokError("invalid barrier option name");
2538 return MatchOperand_ParseFail;
2541 // The only valid named option for ISB is 'sy'
2542 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2543 TokError("'sy' or #imm operand expected");
2544 return MatchOperand_ParseFail;
2547 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2548 Parser.Lex(); // Consume the option
2550 return MatchOperand_Success;
2553 ARM64AsmParser::OperandMatchResultTy
2554 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2555 const AsmToken &Tok = Parser.getTok();
2557 if (Tok.isNot(AsmToken::Identifier))
2558 return MatchOperand_NoMatch;
2560 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2561 STI.getFeatureBits(), getContext()));
2562 Parser.Lex(); // Eat identifier
2564 return MatchOperand_Success;
2567 /// tryParseVectorRegister - Parse a vector register operand.
2568 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2569 if (Parser.getTok().isNot(AsmToken::Identifier))
2573 // Check for a vector register specifier first.
2575 int64_t Reg = tryMatchVectorRegister(Kind, false);
2579 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2580 // If there was an explicit qualifier, that goes on as a literal text
2583 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2585 // If there is an index specifier following the register, parse that too.
2586 if (Parser.getTok().is(AsmToken::LBrac)) {
2587 SMLoc SIdx = getLoc();
2588 Parser.Lex(); // Eat left bracket token.
2590 const MCExpr *ImmVal;
2591 if (getParser().parseExpression(ImmVal))
2593 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2595 TokError("immediate value expected for vector index");
2600 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2601 Error(E, "']' expected");
2605 Parser.Lex(); // Eat right bracket token.
2607 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2614 /// parseRegister - Parse a non-vector register operand.
2615 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2617 // Try for a vector register.
2618 if (!tryParseVectorRegister(Operands))
2621 // Try for a scalar register.
2622 int64_t Reg = tryParseRegister();
2626 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2628 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2629 // as a string token in the instruction itself.
2630 if (getLexer().getKind() == AsmToken::LBrac) {
2631 SMLoc LBracS = getLoc();
2633 const AsmToken &Tok = Parser.getTok();
2634 if (Tok.is(AsmToken::Integer)) {
2635 SMLoc IntS = getLoc();
2636 int64_t Val = Tok.getIntVal();
2639 if (getLexer().getKind() == AsmToken::RBrac) {
2640 SMLoc RBracS = getLoc();
2643 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2645 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2647 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2657 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2658 bool HasELFModifier = false;
2659 ARM64MCExpr::VariantKind RefKind;
2661 if (Parser.getTok().is(AsmToken::Colon)) {
2662 Parser.Lex(); // Eat ':"
2663 HasELFModifier = true;
2665 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2666 Error(Parser.getTok().getLoc(),
2667 "expect relocation specifier in operand after ':'");
2671 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2672 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
2673 .Case("lo12", ARM64MCExpr::VK_LO12)
2674 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
2675 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
2676 .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
2677 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
2678 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
2679 .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
2680 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
2681 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
2682 .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
2683 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
2684 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
2685 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
2686 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
2687 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
2688 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
2689 .Case("dtprel_hi12", ARM64MCExpr::VK_DTPREL_HI12)
2690 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
2691 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
2692 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
2693 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
2694 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
2695 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
2696 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
2697 .Case("tprel_hi12", ARM64MCExpr::VK_TPREL_HI12)
2698 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
2699 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
2700 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
2701 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
2702 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
2703 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
2704 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
2705 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
2706 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
2707 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
2708 .Default(ARM64MCExpr::VK_INVALID);
2710 if (RefKind == ARM64MCExpr::VK_INVALID) {
2711 Error(Parser.getTok().getLoc(),
2712 "expect relocation specifier in operand after ':'");
2716 Parser.Lex(); // Eat identifier
2718 if (Parser.getTok().isNot(AsmToken::Colon)) {
2719 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2722 Parser.Lex(); // Eat ':'
2725 if (getParser().parseExpression(ImmVal))
2729 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
2734 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2735 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
2736 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2738 Parser.Lex(); // Eat left bracket token.
2740 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2743 int64_t PrevReg = FirstReg;
2746 if (Parser.getTok().is(AsmToken::Minus)) {
2747 Parser.Lex(); // Eat the minus.
2749 SMLoc Loc = getLoc();
2751 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2754 // Any Kind suffices must match on all regs in the list.
2755 if (Kind != NextKind)
2756 return Error(Loc, "mismatched register size suffix");
2758 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
2760 if (Space == 0 || Space > 3) {
2761 return Error(Loc, "invalid number of vectors");
2767 while (Parser.getTok().is(AsmToken::Comma)) {
2768 Parser.Lex(); // Eat the comma token.
2770 SMLoc Loc = getLoc();
2772 int64_t Reg = tryMatchVectorRegister(NextKind, true);
2775 // Any Kind suffices must match on all regs in the list.
2776 if (Kind != NextKind)
2777 return Error(Loc, "mismatched register size suffix");
2779 // Registers must be incremental (with wraparound at 31)
2780 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
2781 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
2782 return Error(Loc, "registers must be sequential");
2789 if (Parser.getTok().isNot(AsmToken::RCurly))
2790 return Error(getLoc(), "'}' expected");
2791 Parser.Lex(); // Eat the '}' token.
2794 return Error(S, "invalid number of vectors");
2796 unsigned NumElements = 0;
2797 char ElementKind = 0;
2799 parseValidVectorKind(Kind, NumElements, ElementKind);
2801 Operands.push_back(ARM64Operand::CreateVectorList(
2802 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
2804 // If there is an index specifier following the list, parse that too.
2805 if (Parser.getTok().is(AsmToken::LBrac)) {
2806 SMLoc SIdx = getLoc();
2807 Parser.Lex(); // Eat left bracket token.
2809 const MCExpr *ImmVal;
2810 if (getParser().parseExpression(ImmVal))
2812 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2814 TokError("immediate value expected for vector index");
2819 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2820 Error(E, "']' expected");
2824 Parser.Lex(); // Eat right bracket token.
2826 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2832 ARM64AsmParser::OperandMatchResultTy
2833 ARM64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
2834 const AsmToken &Tok = Parser.getTok();
2835 if (!Tok.is(AsmToken::Identifier))
2836 return MatchOperand_NoMatch;
2838 unsigned RegNum = MatchRegisterName(Tok.getString().lower());
2840 MCContext &Ctx = getContext();
2841 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2842 if (!RI->getRegClass(ARM64::GPR64spRegClassID).contains(RegNum))
2843 return MatchOperand_NoMatch;
2846 Parser.Lex(); // Eat register
2848 if (Parser.getTok().isNot(AsmToken::Comma)) {
2849 Operands.push_back(ARM64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2850 return MatchOperand_Success;
2852 Parser.Lex(); // Eat comma.
2854 if (Parser.getTok().is(AsmToken::Hash))
2855 Parser.Lex(); // Eat hash
2857 if (Parser.getTok().isNot(AsmToken::Integer)) {
2858 Error(getLoc(), "index must be absent or #0");
2859 return MatchOperand_ParseFail;
2862 const MCExpr *ImmVal;
2863 if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
2864 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
2865 Error(getLoc(), "index must be absent or #0");
2866 return MatchOperand_ParseFail;
2869 Operands.push_back(ARM64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
2870 return MatchOperand_Success;
2873 /// parseOperand - Parse a arm instruction operand. For now this parses the
2874 /// operand regardless of the mnemonic.
2875 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
2876 bool invertCondCode) {
2877 // Check if the current operand has a custom associated parser, if so, try to
2878 // custom parse the operand, or fallback to the general approach.
2879 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2880 if (ResTy == MatchOperand_Success)
2882 // If there wasn't a custom match, try the generic matcher below. Otherwise,
2883 // there was a match, but an error occurred, in which case, just return that
2884 // the operand parsing failed.
2885 if (ResTy == MatchOperand_ParseFail)
2888 // Nothing custom, so do general case parsing.
2890 switch (getLexer().getKind()) {
2894 if (parseSymbolicImmVal(Expr))
2895 return Error(S, "invalid operand");
2897 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2898 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2901 case AsmToken::LBrac: {
2902 SMLoc Loc = Parser.getTok().getLoc();
2903 Operands.push_back(ARM64Operand::CreateToken("[", false, Loc,
2905 Parser.Lex(); // Eat '['
2907 // There's no comma after a '[', so we can parse the next operand
2909 return parseOperand(Operands, false, false);
2911 case AsmToken::LCurly:
2912 return parseVectorList(Operands);
2913 case AsmToken::Identifier: {
2914 // If we're expecting a Condition Code operand, then just parse that.
2916 return parseCondCode(Operands, invertCondCode);
2918 // If it's a register name, parse it.
2919 if (!parseRegister(Operands))
2922 // This could be an optional "shift" or "extend" operand.
2923 OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
2924 // We can only continue if no tokens were eaten.
2925 if (GotShift != MatchOperand_NoMatch)
2928 // This was not a register so parse other operands that start with an
2929 // identifier (like labels) as expressions and create them as immediates.
2930 const MCExpr *IdVal;
2932 if (getParser().parseExpression(IdVal))
2935 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2936 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
2939 case AsmToken::Integer:
2940 case AsmToken::Real:
2941 case AsmToken::Hash: {
2942 // #42 -> immediate.
2944 if (getLexer().is(AsmToken::Hash))
2947 // Parse a negative sign
2948 bool isNegative = false;
2949 if (Parser.getTok().is(AsmToken::Minus)) {
2951 // We need to consume this token only when we have a Real, otherwise
2952 // we let parseSymbolicImmVal take care of it
2953 if (Parser.getLexer().peekTok().is(AsmToken::Real))
2957 // The only Real that should come through here is a literal #0.0 for
2958 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
2959 // so convert the value.
2960 const AsmToken &Tok = Parser.getTok();
2961 if (Tok.is(AsmToken::Real)) {
2962 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2963 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2964 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
2965 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
2966 Mnemonic != "fcmlt")
2967 return TokError("unexpected floating point literal");
2968 else if (IntVal != 0 || isNegative)
2969 return TokError("expected floating-point constant #0.0");
2970 Parser.Lex(); // Eat the token.
2973 ARM64Operand::CreateToken("#0", false, S, getContext()));
2975 ARM64Operand::CreateToken(".0", false, S, getContext()));
2979 const MCExpr *ImmVal;
2980 if (parseSymbolicImmVal(ImmVal))
2983 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2984 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
2990 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
2992 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
2993 StringRef Name, SMLoc NameLoc,
2994 OperandVector &Operands) {
2995 Name = StringSwitch<StringRef>(Name.lower())
2996 .Case("beq", "b.eq")
2997 .Case("bne", "b.ne")
2998 .Case("bhs", "b.hs")
2999 .Case("bcs", "b.cs")
3000 .Case("blo", "b.lo")
3001 .Case("bcc", "b.cc")
3002 .Case("bmi", "b.mi")
3003 .Case("bpl", "b.pl")
3004 .Case("bvs", "b.vs")
3005 .Case("bvc", "b.vc")
3006 .Case("bhi", "b.hi")
3007 .Case("bls", "b.ls")
3008 .Case("bge", "b.ge")
3009 .Case("blt", "b.lt")
3010 .Case("bgt", "b.gt")
3011 .Case("ble", "b.le")
3012 .Case("bal", "b.al")
3013 .Case("bnv", "b.nv")
3016 // Create the leading tokens for the mnemonic, split by '.' characters.
3017 size_t Start = 0, Next = Name.find('.');
3018 StringRef Head = Name.slice(Start, Next);
3020 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3021 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
3022 bool IsError = parseSysAlias(Head, NameLoc, Operands);
3023 if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
3024 Parser.eatToEndOfStatement();
3029 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3032 // Handle condition codes for a branch mnemonic
3033 if (Head == "b" && Next != StringRef::npos) {
3035 Next = Name.find('.', Start + 1);
3036 Head = Name.slice(Start + 1, Next);
3038 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3039 (Head.data() - Name.data()));
3040 ARM64CC::CondCode CC = parseCondCodeString(Head);
3041 if (CC == ARM64CC::Invalid)
3042 return Error(SuffixLoc, "invalid condition code");
3044 ARM64Operand::CreateToken(".", true, SuffixLoc, getContext()));
3046 ARM64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3049 // Add the remaining tokens in the mnemonic.
3050 while (Next != StringRef::npos) {
3052 Next = Name.find('.', Start + 1);
3053 Head = Name.slice(Start, Next);
3054 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3055 (Head.data() - Name.data()) + 1);
3057 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3060 // Conditional compare instructions have a Condition Code operand, which needs
3061 // to be parsed and an immediate operand created.
3062 bool condCodeFourthOperand =
3063 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3064 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3065 Head == "csinc" || Head == "csinv" || Head == "csneg");
3067 // These instructions are aliases to some of the conditional select
3068 // instructions. However, the condition code is inverted in the aliased
3071 // FIXME: Is this the correct way to handle these? Or should the parser
3072 // generate the aliased instructions directly?
3073 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3074 bool condCodeThirdOperand =
3075 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3077 // Read the remaining operands.
3078 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3079 // Read the first operand.
3080 if (parseOperand(Operands, false, false)) {
3081 Parser.eatToEndOfStatement();
3086 while (getLexer().is(AsmToken::Comma)) {
3087 Parser.Lex(); // Eat the comma.
3089 // Parse and remember the operand.
3090 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3091 (N == 3 && condCodeThirdOperand) ||
3092 (N == 2 && condCodeSecondOperand),
3093 condCodeSecondOperand || condCodeThirdOperand)) {
3094 Parser.eatToEndOfStatement();
3098 // After successfully parsing some operands there are two special cases to
3099 // consider (i.e. notional operands not separated by commas). Both are due
3100 // to memory specifiers:
3101 // + An RBrac will end an address for load/store/prefetch
3102 // + An '!' will indicate a pre-indexed operation.
3104 // It's someone else's responsibility to make sure these tokens are sane
3105 // in the given context!
3106 if (Parser.getTok().is(AsmToken::RBrac)) {
3107 SMLoc Loc = Parser.getTok().getLoc();
3108 Operands.push_back(ARM64Operand::CreateToken("]", false, Loc,
3113 if (Parser.getTok().is(AsmToken::Exclaim)) {
3114 SMLoc Loc = Parser.getTok().getLoc();
3115 Operands.push_back(ARM64Operand::CreateToken("!", false, Loc,
3124 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3125 SMLoc Loc = Parser.getTok().getLoc();
3126 Parser.eatToEndOfStatement();
3127 return Error(Loc, "unexpected token in argument list");
3130 Parser.Lex(); // Consume the EndOfStatement
3134 // FIXME: This entire function is a giant hack to provide us with decent
3135 // operand range validation/diagnostics until TableGen/MC can be extended
3136 // to support autogeneration of this kind of validation.
3137 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3138 SmallVectorImpl<SMLoc> &Loc) {
3139 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3140 // Check for indexed addressing modes w/ the base register being the
3141 // same as a destination/source register or pair load where
3142 // the Rt == Rt2. All of those are undefined behaviour.
3143 switch (Inst.getOpcode()) {
3144 case ARM64::LDPSWpre:
3145 case ARM64::LDPWpost:
3146 case ARM64::LDPWpre:
3147 case ARM64::LDPXpost:
3148 case ARM64::LDPXpre: {
3149 unsigned Rt = Inst.getOperand(1).getReg();
3150 unsigned Rt2 = Inst.getOperand(2).getReg();
3151 unsigned Rn = Inst.getOperand(3).getReg();
3152 if (RI->isSubRegisterEq(Rn, Rt))
3153 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3154 "is also a destination");
3155 if (RI->isSubRegisterEq(Rn, Rt2))
3156 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3157 "is also a destination");
3165 case ARM64::LDPXi: {
3166 unsigned Rt = Inst.getOperand(0).getReg();
3167 unsigned Rt2 = Inst.getOperand(1).getReg();
3169 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3172 case ARM64::LDPDpost:
3173 case ARM64::LDPDpre:
3174 case ARM64::LDPQpost:
3175 case ARM64::LDPQpre:
3176 case ARM64::LDPSpost:
3177 case ARM64::LDPSpre:
3178 case ARM64::LDPSWpost: {
3179 unsigned Rt = Inst.getOperand(1).getReg();
3180 unsigned Rt2 = Inst.getOperand(2).getReg();
3182 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3185 case ARM64::STPDpost:
3186 case ARM64::STPDpre:
3187 case ARM64::STPQpost:
3188 case ARM64::STPQpre:
3189 case ARM64::STPSpost:
3190 case ARM64::STPSpre:
3191 case ARM64::STPWpost:
3192 case ARM64::STPWpre:
3193 case ARM64::STPXpost:
3194 case ARM64::STPXpre: {
3195 unsigned Rt = Inst.getOperand(1).getReg();
3196 unsigned Rt2 = Inst.getOperand(2).getReg();
3197 unsigned Rn = Inst.getOperand(3).getReg();
3198 if (RI->isSubRegisterEq(Rn, Rt))
3199 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3200 "is also a source");
3201 if (RI->isSubRegisterEq(Rn, Rt2))
3202 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3203 "is also a source");
3206 case ARM64::LDRBBpre:
3207 case ARM64::LDRBpre:
3208 case ARM64::LDRHHpre:
3209 case ARM64::LDRHpre:
3210 case ARM64::LDRSBWpre:
3211 case ARM64::LDRSBXpre:
3212 case ARM64::LDRSHWpre:
3213 case ARM64::LDRSHXpre:
3214 case ARM64::LDRSWpre:
3215 case ARM64::LDRWpre:
3216 case ARM64::LDRXpre:
3217 case ARM64::LDRBBpost:
3218 case ARM64::LDRBpost:
3219 case ARM64::LDRHHpost:
3220 case ARM64::LDRHpost:
3221 case ARM64::LDRSBWpost:
3222 case ARM64::LDRSBXpost:
3223 case ARM64::LDRSHWpost:
3224 case ARM64::LDRSHXpost:
3225 case ARM64::LDRSWpost:
3226 case ARM64::LDRWpost:
3227 case ARM64::LDRXpost: {
3228 unsigned Rt = Inst.getOperand(1).getReg();
3229 unsigned Rn = Inst.getOperand(2).getReg();
3230 if (RI->isSubRegisterEq(Rn, Rt))
3231 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3232 "is also a source");
3235 case ARM64::STRBBpost:
3236 case ARM64::STRBpost:
3237 case ARM64::STRHHpost:
3238 case ARM64::STRHpost:
3239 case ARM64::STRWpost:
3240 case ARM64::STRXpost:
3241 case ARM64::STRBBpre:
3242 case ARM64::STRBpre:
3243 case ARM64::STRHHpre:
3244 case ARM64::STRHpre:
3245 case ARM64::STRWpre:
3246 case ARM64::STRXpre: {
3247 unsigned Rt = Inst.getOperand(1).getReg();
3248 unsigned Rn = Inst.getOperand(2).getReg();
3249 if (RI->isSubRegisterEq(Rn, Rt))
3250 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3251 "is also a source");
3256 // Now check immediate ranges. Separate from the above as there is overlap
3257 // in the instructions being checked and this keeps the nested conditionals
3259 switch (Inst.getOpcode()) {
3260 case ARM64::ADDSWri:
3261 case ARM64::ADDSXri:
3264 case ARM64::SUBSWri:
3265 case ARM64::SUBSXri:
3267 case ARM64::SUBXri: {
3268 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
3269 // some slight duplication here.
3270 if (Inst.getOperand(2).isExpr()) {
3271 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3272 ARM64MCExpr::VariantKind ELFRefKind;
3273 MCSymbolRefExpr::VariantKind DarwinRefKind;
3275 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3276 return Error(Loc[2], "invalid immediate expression");
3279 // Only allow these with ADDXri.
3280 if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3281 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
3282 Inst.getOpcode() == ARM64::ADDXri)
3285 // Only allow these with ADDXri/ADDWri
3286 if ((ELFRefKind == ARM64MCExpr::VK_LO12 ||
3287 ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12 ||
3288 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3289 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3290 ELFRefKind == ARM64MCExpr::VK_TPREL_HI12 ||
3291 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3292 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3293 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) &&
3294 (Inst.getOpcode() == ARM64::ADDXri ||
3295 Inst.getOpcode() == ARM64::ADDWri))
3298 // Don't allow expressions in the immediate field otherwise
3299 return Error(Loc[2], "invalid immediate expression");
3308 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3310 case Match_MissingFeature:
3312 "instruction requires a CPU feature not currently enabled");
3313 case Match_InvalidOperand:
3314 return Error(Loc, "invalid operand for instruction");
3315 case Match_InvalidSuffix:
3316 return Error(Loc, "invalid type suffix for instruction");
3317 case Match_InvalidCondCode:
3318 return Error(Loc, "expected AArch64 condition code");
3319 case Match_AddSubRegExtendSmall:
3321 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
3322 case Match_AddSubRegExtendLarge:
3324 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
3325 case Match_AddSubSecondSource:
3327 "expected compatible register, symbol or integer in range [0, 4095]");
3328 case Match_LogicalSecondSource:
3329 return Error(Loc, "expected compatible register or logical immediate");
3330 case Match_InvalidMovImm32Shift:
3331 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
3332 case Match_InvalidMovImm64Shift:
3333 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
3334 case Match_AddSubRegShift32:
3336 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
3337 case Match_AddSubRegShift64:
3339 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
3340 case Match_InvalidFPImm:
3342 "expected compatible register or floating-point constant");
3343 case Match_InvalidMemoryIndexedSImm9:
3344 return Error(Loc, "index must be an integer in range [-256, 255].");
3345 case Match_InvalidMemoryIndexed4SImm7:
3346 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
3347 case Match_InvalidMemoryIndexed8SImm7:
3348 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
3349 case Match_InvalidMemoryIndexed16SImm7:
3350 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
3351 case Match_InvalidMemoryWExtend8:
3353 "expected 'uxtw' or 'sxtw' with optional shift of #0");
3354 case Match_InvalidMemoryWExtend16:
3356 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
3357 case Match_InvalidMemoryWExtend32:
3359 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
3360 case Match_InvalidMemoryWExtend64:
3362 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
3363 case Match_InvalidMemoryWExtend128:
3365 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
3366 case Match_InvalidMemoryXExtend8:
3368 "expected 'lsl' or 'sxtx' with optional shift of #0");
3369 case Match_InvalidMemoryXExtend16:
3371 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
3372 case Match_InvalidMemoryXExtend32:
3374 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
3375 case Match_InvalidMemoryXExtend64:
3377 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
3378 case Match_InvalidMemoryXExtend128:
3380 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
3381 case Match_InvalidMemoryIndexed1:
3382 return Error(Loc, "index must be an integer in range [0, 4095].");
3383 case Match_InvalidMemoryIndexed2:
3384 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
3385 case Match_InvalidMemoryIndexed4:
3386 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
3387 case Match_InvalidMemoryIndexed8:
3388 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
3389 case Match_InvalidMemoryIndexed16:
3390 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
3391 case Match_InvalidImm0_7:
3392 return Error(Loc, "immediate must be an integer in range [0, 7].");
3393 case Match_InvalidImm0_15:
3394 return Error(Loc, "immediate must be an integer in range [0, 15].");
3395 case Match_InvalidImm0_31:
3396 return Error(Loc, "immediate must be an integer in range [0, 31].");
3397 case Match_InvalidImm0_63:
3398 return Error(Loc, "immediate must be an integer in range [0, 63].");
3399 case Match_InvalidImm0_127:
3400 return Error(Loc, "immediate must be an integer in range [0, 127].");
3401 case Match_InvalidImm0_65535:
3402 return Error(Loc, "immediate must be an integer in range [0, 65535].");
3403 case Match_InvalidImm1_8:
3404 return Error(Loc, "immediate must be an integer in range [1, 8].");
3405 case Match_InvalidImm1_16:
3406 return Error(Loc, "immediate must be an integer in range [1, 16].");
3407 case Match_InvalidImm1_32:
3408 return Error(Loc, "immediate must be an integer in range [1, 32].");
3409 case Match_InvalidImm1_64:
3410 return Error(Loc, "immediate must be an integer in range [1, 64].");
3411 case Match_InvalidIndex1:
3412 return Error(Loc, "expected lane specifier '[1]'");
3413 case Match_InvalidIndexB:
3414 return Error(Loc, "vector lane must be an integer in range [0, 15].");
3415 case Match_InvalidIndexH:
3416 return Error(Loc, "vector lane must be an integer in range [0, 7].");
3417 case Match_InvalidIndexS:
3418 return Error(Loc, "vector lane must be an integer in range [0, 3].");
3419 case Match_InvalidIndexD:
3420 return Error(Loc, "vector lane must be an integer in range [0, 1].");
3421 case Match_InvalidLabel:
3422 return Error(Loc, "expected label or encodable integer pc offset");
3424 return Error(Loc, "expected readable system register");
3426 return Error(Loc, "expected writable system register or pstate");
3427 case Match_MnemonicFail:
3428 return Error(Loc, "unrecognized instruction mnemonic");
3430 assert(0 && "unexpected error code!");
3431 return Error(Loc, "invalid instruction format");
3435 static const char *getSubtargetFeatureName(unsigned Val);
3437 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3438 OperandVector &Operands,
3440 unsigned &ErrorInfo,
3441 bool MatchingInlineAsm) {
3442 assert(!Operands.empty() && "Unexpect empty operand list!");
3443 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3444 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3446 StringRef Tok = Op->getToken();
3447 unsigned NumOperands = Operands.size();
3449 if (NumOperands == 4 && Tok == "lsl") {
3450 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3451 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3452 if (Op2->isReg() && Op3->isImm()) {
3453 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3455 uint64_t Op3Val = Op3CE->getValue();
3456 uint64_t NewOp3Val = 0;
3457 uint64_t NewOp4Val = 0;
3458 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3460 NewOp3Val = (32 - Op3Val) & 0x1f;
3461 NewOp4Val = 31 - Op3Val;
3463 NewOp3Val = (64 - Op3Val) & 0x3f;
3464 NewOp4Val = 63 - Op3Val;
3467 const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
3468 const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
3470 Operands[0] = ARM64Operand::CreateToken(
3471 "ubfm", false, Op->getStartLoc(), getContext());
3472 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3473 Op3->getEndLoc(), getContext());
3474 Operands.push_back(ARM64Operand::CreateImm(
3475 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
3480 } else if (NumOperands == 5) {
3481 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3482 // UBFIZ -> UBFM aliases.
3483 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3484 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3485 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3486 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
3488 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3489 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3490 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3492 if (Op3CE && Op4CE) {
3493 uint64_t Op3Val = Op3CE->getValue();
3494 uint64_t Op4Val = Op4CE->getValue();
3496 uint64_t RegWidth = 0;
3497 if (ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3503 if (Op3Val >= RegWidth)
3504 return Error(Op3->getStartLoc(),
3505 "expected integer in range [0, 31]");
3506 if (Op4Val < 1 || Op4Val > RegWidth)
3507 return Error(Op4->getStartLoc(),
3508 "expected integer in range [1, 32]");
3510 uint64_t NewOp3Val = 0;
3511 if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
3513 NewOp3Val = (32 - Op3Val) & 0x1f;
3515 NewOp3Val = (64 - Op3Val) & 0x3f;
3517 uint64_t NewOp4Val = Op4Val - 1;
3519 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
3520 return Error(Op4->getStartLoc(),
3521 "requested insert overflows register");
3523 const MCExpr *NewOp3 =
3524 MCConstantExpr::Create(NewOp3Val, getContext());
3525 const MCExpr *NewOp4 =
3526 MCConstantExpr::Create(NewOp4Val, getContext());
3527 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3528 Op3->getEndLoc(), getContext());
3529 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
3530 Op4->getEndLoc(), getContext());
3532 Operands[0] = ARM64Operand::CreateToken(
3533 "bfm", false, Op->getStartLoc(), getContext());
3534 else if (Tok == "sbfiz")
3535 Operands[0] = ARM64Operand::CreateToken(
3536 "sbfm", false, Op->getStartLoc(), getContext());
3537 else if (Tok == "ubfiz")
3538 Operands[0] = ARM64Operand::CreateToken(
3539 "ubfm", false, Op->getStartLoc(), getContext());
3541 llvm_unreachable("No valid mnemonic for alias?");
3549 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
3550 // UBFX -> UBFM aliases.
3551 } else if (NumOperands == 5 &&
3552 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
3553 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3554 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3555 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
3557 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3558 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3559 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3561 if (Op3CE && Op4CE) {
3562 uint64_t Op3Val = Op3CE->getValue();
3563 uint64_t Op4Val = Op4CE->getValue();
3565 uint64_t RegWidth = 0;
3566 if (ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3572 if (Op3Val >= RegWidth)
3573 return Error(Op3->getStartLoc(),
3574 "expected integer in range [0, 31]");
3575 if (Op4Val < 1 || Op4Val > RegWidth)
3576 return Error(Op4->getStartLoc(),
3577 "expected integer in range [1, 32]");
3579 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
3581 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
3582 return Error(Op4->getStartLoc(),
3583 "requested extract overflows register");
3585 const MCExpr *NewOp4 =
3586 MCConstantExpr::Create(NewOp4Val, getContext());
3587 Operands[4] = ARM64Operand::CreateImm(
3588 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
3590 Operands[0] = ARM64Operand::CreateToken(
3591 "bfm", false, Op->getStartLoc(), getContext());
3592 else if (Tok == "sbfx")
3593 Operands[0] = ARM64Operand::CreateToken(
3594 "sbfm", false, Op->getStartLoc(), getContext());
3595 else if (Tok == "ubfx")
3596 Operands[0] = ARM64Operand::CreateToken(
3597 "ubfm", false, Op->getStartLoc(), getContext());
3599 llvm_unreachable("No valid mnemonic for alias?");
3607 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
3608 // InstAlias can't quite handle this since the reg classes aren't
3610 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
3611 // The source register can be Wn here, but the matcher expects a
3612 // GPR64. Twiddle it here if necessary.
3613 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
3615 unsigned Reg = getXRegFromWReg(Op->getReg());
3616 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3617 Op->getEndLoc(), getContext());
3621 // FIXME: Likewise for sxt[bh] with a Xd dst operand
3622 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
3623 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3625 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3627 // The source register can be Wn here, but the matcher expects a
3628 // GPR64. Twiddle it here if necessary.
3629 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
3631 unsigned Reg = getXRegFromWReg(Op->getReg());
3632 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3633 Op->getEndLoc(), getContext());
3638 // FIXME: Likewise for uxt[bh] with a Xd dst operand
3639 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
3640 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3642 ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
3644 // The source register can be Wn here, but the matcher expects a
3645 // GPR32. Twiddle it here if necessary.
3646 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
3648 unsigned Reg = getWRegFromXReg(Op->getReg());
3649 Operands[1] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
3650 Op->getEndLoc(), getContext());
3656 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
3657 if (NumOperands == 3 && Tok == "fmov") {
3658 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3659 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
3660 if (RegOp->isReg() && ImmOp->isFPImm() &&
3661 ImmOp->getFPImm() == (unsigned)-1) {
3662 unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
3666 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
3667 Op->getEndLoc(), getContext());
3673 // First try to match against the secondary set of tables containing the
3674 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
3675 unsigned MatchResult =
3676 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
3678 // If that fails, try against the alternate table containing long-form NEON:
3679 // "fadd v0.2s, v1.2s, v2.2s"
3680 if (MatchResult != Match_Success)
3682 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
3684 switch (MatchResult) {
3685 case Match_Success: {
3686 // Perform range checking and other semantic validations
3687 SmallVector<SMLoc, 8> OperandLocs;
3688 NumOperands = Operands.size();
3689 for (unsigned i = 1; i < NumOperands; ++i)
3690 OperandLocs.push_back(Operands[i]->getStartLoc());
3691 if (validateInstruction(Inst, OperandLocs))
3695 Out.EmitInstruction(Inst, STI);
3698 case Match_MissingFeature: {
3699 assert(ErrorInfo && "Unknown missing feature!");
3700 // Special case the error message for the very common case where only
3701 // a single subtarget feature is missing (neon, e.g.).
3702 std::string Msg = "instruction requires:";
3704 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
3705 if (ErrorInfo & Mask) {
3707 Msg += getSubtargetFeatureName(ErrorInfo & Mask);
3711 return Error(IDLoc, Msg);
3713 case Match_MnemonicFail:
3714 return showMatchError(IDLoc, MatchResult);
3715 case Match_InvalidOperand: {
3716 SMLoc ErrorLoc = IDLoc;
3717 if (ErrorInfo != ~0U) {
3718 if (ErrorInfo >= Operands.size())
3719 return Error(IDLoc, "too few operands for instruction");
3721 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
3722 if (ErrorLoc == SMLoc())
3725 // If the match failed on a suffix token operand, tweak the diagnostic
3727 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
3728 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
3729 MatchResult = Match_InvalidSuffix;
3731 return showMatchError(ErrorLoc, MatchResult);
3733 case Match_InvalidMemoryIndexed1:
3734 case Match_InvalidMemoryIndexed2:
3735 case Match_InvalidMemoryIndexed4:
3736 case Match_InvalidMemoryIndexed8:
3737 case Match_InvalidMemoryIndexed16:
3738 case Match_InvalidCondCode:
3739 case Match_AddSubRegExtendSmall:
3740 case Match_AddSubRegExtendLarge:
3741 case Match_AddSubSecondSource:
3742 case Match_LogicalSecondSource:
3743 case Match_AddSubRegShift32:
3744 case Match_AddSubRegShift64:
3745 case Match_InvalidMovImm32Shift:
3746 case Match_InvalidMovImm64Shift:
3747 case Match_InvalidFPImm:
3748 case Match_InvalidMemoryWExtend8:
3749 case Match_InvalidMemoryWExtend16:
3750 case Match_InvalidMemoryWExtend32:
3751 case Match_InvalidMemoryWExtend64:
3752 case Match_InvalidMemoryWExtend128:
3753 case Match_InvalidMemoryXExtend8:
3754 case Match_InvalidMemoryXExtend16:
3755 case Match_InvalidMemoryXExtend32:
3756 case Match_InvalidMemoryXExtend64:
3757 case Match_InvalidMemoryXExtend128:
3758 case Match_InvalidMemoryIndexed4SImm7:
3759 case Match_InvalidMemoryIndexed8SImm7:
3760 case Match_InvalidMemoryIndexed16SImm7:
3761 case Match_InvalidMemoryIndexedSImm9:
3762 case Match_InvalidImm0_7:
3763 case Match_InvalidImm0_15:
3764 case Match_InvalidImm0_31:
3765 case Match_InvalidImm0_63:
3766 case Match_InvalidImm0_127:
3767 case Match_InvalidImm0_65535:
3768 case Match_InvalidImm1_8:
3769 case Match_InvalidImm1_16:
3770 case Match_InvalidImm1_32:
3771 case Match_InvalidImm1_64:
3772 case Match_InvalidIndex1:
3773 case Match_InvalidIndexB:
3774 case Match_InvalidIndexH:
3775 case Match_InvalidIndexS:
3776 case Match_InvalidIndexD:
3777 case Match_InvalidLabel:
3780 // Any time we get here, there's nothing fancy to do. Just get the
3781 // operand SMLoc and display the diagnostic.
3782 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
3783 if (ErrorLoc == SMLoc())
3785 return showMatchError(ErrorLoc, MatchResult);
3789 llvm_unreachable("Implement any new match types added!");
3793 /// ParseDirective parses the arm specific directives
3794 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
3795 StringRef IDVal = DirectiveID.getIdentifier();
3796 SMLoc Loc = DirectiveID.getLoc();
3797 if (IDVal == ".hword")
3798 return parseDirectiveWord(2, Loc);
3799 if (IDVal == ".word")
3800 return parseDirectiveWord(4, Loc);
3801 if (IDVal == ".xword")
3802 return parseDirectiveWord(8, Loc);
3803 if (IDVal == ".tlsdesccall")
3804 return parseDirectiveTLSDescCall(Loc);
3806 return parseDirectiveLOH(IDVal, Loc);
3809 /// parseDirectiveWord
3810 /// ::= .word [ expression (, expression)* ]
3811 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
3812 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3814 const MCExpr *Value;
3815 if (getParser().parseExpression(Value))
3818 getParser().getStreamer().EmitValue(Value, Size);
3820 if (getLexer().is(AsmToken::EndOfStatement))
3823 // FIXME: Improve diagnostic.
3824 if (getLexer().isNot(AsmToken::Comma))
3825 return Error(L, "unexpected token in directive");
3834 // parseDirectiveTLSDescCall:
3835 // ::= .tlsdesccall symbol
3836 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
3838 if (getParser().parseIdentifier(Name))
3839 return Error(L, "expected symbol after directive");
3841 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
3842 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
3843 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
3846 Inst.setOpcode(ARM64::TLSDESCCALL);
3847 Inst.addOperand(MCOperand::CreateExpr(Expr));
3849 getParser().getStreamer().EmitInstruction(Inst, STI);
3853 /// ::= .loh <lohName | lohId> label1, ..., labelN
3854 /// The number of arguments depends on the loh identifier.
3855 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
3856 if (IDVal != MCLOHDirectiveName())
3859 if (getParser().getTok().isNot(AsmToken::Identifier)) {
3860 if (getParser().getTok().isNot(AsmToken::Integer))
3861 return TokError("expected an identifier or a number in directive");
3862 // We successfully get a numeric value for the identifier.
3863 // Check if it is valid.
3864 int64_t Id = getParser().getTok().getIntVal();
3865 Kind = (MCLOHType)Id;
3866 // Check that Id does not overflow MCLOHType.
3867 if (!isValidMCLOHType(Kind) || Id != Kind)
3868 return TokError("invalid numeric identifier in directive");
3870 StringRef Name = getTok().getIdentifier();
3871 // We successfully parse an identifier.
3872 // Check if it is a recognized one.
3873 int Id = MCLOHNameToId(Name);
3876 return TokError("invalid identifier in directive");
3877 Kind = (MCLOHType)Id;
3879 // Consume the identifier.
3881 // Get the number of arguments of this LOH.
3882 int NbArgs = MCLOHIdToNbArgs(Kind);
3884 assert(NbArgs != -1 && "Invalid number of arguments");
3886 SmallVector<MCSymbol *, 3> Args;
3887 for (int Idx = 0; Idx < NbArgs; ++Idx) {
3889 if (getParser().parseIdentifier(Name))
3890 return TokError("expected identifier in directive");
3891 Args.push_back(getContext().GetOrCreateSymbol(Name));
3893 if (Idx + 1 == NbArgs)
3895 if (getLexer().isNot(AsmToken::Comma))
3896 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3899 if (getLexer().isNot(AsmToken::EndOfStatement))
3900 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
3902 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
3907 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
3908 ARM64MCExpr::VariantKind &ELFRefKind,
3909 MCSymbolRefExpr::VariantKind &DarwinRefKind,
3911 ELFRefKind = ARM64MCExpr::VK_INVALID;
3912 DarwinRefKind = MCSymbolRefExpr::VK_None;
3915 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
3916 ELFRefKind = AE->getKind();
3917 Expr = AE->getSubExpr();
3920 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
3922 // It's a simple symbol reference with no addend.
3923 DarwinRefKind = SE->getKind();
3927 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
3931 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
3934 DarwinRefKind = SE->getKind();
3936 if (BE->getOpcode() != MCBinaryExpr::Add &&
3937 BE->getOpcode() != MCBinaryExpr::Sub)
3940 // See if the addend is is a constant, otherwise there's more going
3941 // on here than we can deal with.
3942 auto AddendExpr = dyn_cast<MCConstantExpr>(BE->getRHS());
3946 Addend = AddendExpr->getValue();
3947 if (BE->getOpcode() == MCBinaryExpr::Sub)
3950 // It's some symbol reference + a constant addend, but really
3951 // shouldn't use both Darwin and ELF syntax.
3952 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
3953 DarwinRefKind == MCSymbolRefExpr::VK_None;
3956 /// Force static initialization.
3957 extern "C" void LLVMInitializeARM64AsmParser() {
3958 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
3959 RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
3962 #define GET_REGISTER_MATCHER
3963 #define GET_SUBTARGET_FEATURE_NAME
3964 #define GET_MATCHER_IMPLEMENTATION
3965 #include "ARM64GenAsmMatcher.inc"
3967 // Define this matcher function after the auto-generated include so we
3968 // have the match class enum definitions.
3969 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
3971 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
3972 // If the kind is a token for a literal immediate, check if our asm
3973 // operand matches. This is for InstAliases which have a fixed-value
3974 // immediate in the syntax.
3975 int64_t ExpectedVal;
3978 return Match_InvalidOperand;
4020 return Match_InvalidOperand;
4021 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4023 return Match_InvalidOperand;
4024 if (CE->getValue() == ExpectedVal)
4025 return Match_Success;
4026 return Match_InvalidOperand;