1 //===-- ARM64AsmParser.cpp - Parse ARM64 assembly to MCInst instructions --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/ARM64AddressingModes.h"
11 #include "MCTargetDesc/ARM64MCExpr.h"
12 #include "Utils/ARM64BaseInfo.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCContext.h"
17 #include "llvm/MC/MCExpr.h"
18 #include "llvm/MC/MCInst.h"
19 #include "llvm/MC/MCRegisterInfo.h"
20 #include "llvm/MC/MCStreamer.h"
21 #include "llvm/MC/MCSubtargetInfo.h"
22 #include "llvm/MC/MCSymbol.h"
23 #include "llvm/MC/MCTargetAsmParser.h"
24 #include "llvm/Support/SourceMgr.h"
25 #include "llvm/Support/TargetRegistry.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/ADT/SmallString.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/StringSwitch.h"
32 #include "llvm/ADT/Twine.h"
40 class ARM64AsmParser : public MCTargetAsmParser {
42 typedef SmallVectorImpl<MCParsedAsmOperand *> OperandVector;
45 StringRef Mnemonic; ///< Instruction mnemonic.
49 MCAsmParser &getParser() const { return Parser; }
50 MCAsmLexer &getLexer() const { return Parser.getLexer(); }
52 SMLoc getLoc() const { return Parser.getTok().getLoc(); }
54 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
55 unsigned parseCondCodeString(StringRef Cond);
56 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
57 int tryParseRegister();
58 int tryMatchVectorRegister(StringRef &Kind, bool expected);
59 bool parseOptionalShift(OperandVector &Operands);
60 bool parseOptionalExtend(OperandVector &Operands);
61 bool parseRegister(OperandVector &Operands);
62 bool parseMemory(OperandVector &Operands);
63 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
64 bool parseVectorList(OperandVector &Operands);
65 bool parseOperand(OperandVector &Operands, bool isCondCode,
68 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
69 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
70 bool showMatchError(SMLoc Loc, unsigned ErrCode);
72 bool parseDirectiveWord(unsigned Size, SMLoc L);
73 bool parseDirectiveTLSDescCall(SMLoc L);
75 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
77 bool validateInstruction(MCInst &Inst, SmallVectorImpl<SMLoc> &Loc);
78 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
79 OperandVector &Operands, MCStreamer &Out,
80 unsigned &ErrorInfo, bool MatchingInlineAsm);
81 /// @name Auto-generated Match Functions
84 #define GET_ASSEMBLER_HEADER
85 #include "ARM64GenAsmMatcher.inc"
89 OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
90 OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
91 OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
92 OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
93 OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
94 OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
95 OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
96 OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
97 OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
98 bool tryParseVectorRegister(OperandVector &Operands);
101 enum ARM64MatchResultTy {
102 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
103 #define GET_OPERAND_DIAGNOSTIC_TYPES
104 #include "ARM64GenAsmMatcher.inc"
106 ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
107 const MCInstrInfo &MII)
108 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
109 MCAsmParserExtension::Initialize(_Parser);
112 virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
113 SMLoc NameLoc, OperandVector &Operands);
114 virtual bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
115 virtual bool ParseDirective(AsmToken DirectiveID);
116 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
118 static bool classifySymbolRef(const MCExpr *Expr,
119 ARM64MCExpr::VariantKind &ELFRefKind,
120 MCSymbolRefExpr::VariantKind &DarwinRefKind,
121 const MCConstantExpr *&Addend);
123 } // end anonymous namespace
127 /// ARM64Operand - Instances of this class represent a parsed ARM64 machine
129 class ARM64Operand : public MCParsedAsmOperand {
132 ImmediateOffset, // pre-indexed, no writeback
133 RegisterOffset // register offset, with optional extend
153 SMLoc StartLoc, EndLoc, OffsetLoc;
158 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
166 struct VectorListOp {
169 unsigned NumElements;
170 unsigned ElementKind;
173 struct VectorIndexOp {
182 unsigned Val; // Encoded 8-bit representation.
186 unsigned Val; // Not the enum since not all values have names.
210 // This is for all forms of ARM64 address expressions
212 unsigned BaseRegNum, OffsetRegNum;
213 ARM64_AM::ExtendType ExtType;
216 const MCExpr *OffsetImm;
223 struct VectorListOp VectorList;
224 struct VectorIndexOp VectorIndex;
226 struct FPImmOp FPImm;
227 struct BarrierOp Barrier;
228 struct SysRegOp SysReg;
229 struct SysCRImmOp SysCRImm;
230 struct PrefetchOp Prefetch;
231 struct ShifterOp Shifter;
232 struct ExtendOp Extend;
236 // Keep the MCContext around as the MCExprs may need manipulated during
237 // the add<>Operands() calls.
240 ARM64Operand(KindTy K, MCContext &_Ctx)
241 : MCParsedAsmOperand(), Kind(K), Ctx(_Ctx) {}
244 ARM64Operand(const ARM64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
246 StartLoc = o.StartLoc;
265 VectorList = o.VectorList;
268 VectorIndex = o.VectorIndex;
274 SysCRImm = o.SysCRImm;
277 Prefetch = o.Prefetch;
291 /// getStartLoc - Get the location of the first token of this operand.
292 SMLoc getStartLoc() const { return StartLoc; }
293 /// getEndLoc - Get the location of the last token of this operand.
294 SMLoc getEndLoc() const { return EndLoc; }
295 /// getOffsetLoc - Get the location of the offset of this memory operand.
296 SMLoc getOffsetLoc() const { return OffsetLoc; }
298 StringRef getToken() const {
299 assert(Kind == k_Token && "Invalid access!");
300 return StringRef(Tok.Data, Tok.Length);
303 bool isTokenSuffix() const {
304 assert(Kind == k_Token && "Invalid access!");
308 const MCExpr *getImm() const {
309 assert(Kind == k_Immediate && "Invalid access!");
313 unsigned getFPImm() const {
314 assert(Kind == k_FPImm && "Invalid access!");
318 unsigned getBarrier() const {
319 assert(Kind == k_Barrier && "Invalid access!");
323 unsigned getReg() const {
324 assert(Kind == k_Register && "Invalid access!");
328 unsigned getVectorListStart() const {
329 assert(Kind == k_VectorList && "Invalid access!");
330 return VectorList.RegNum;
333 unsigned getVectorListCount() const {
334 assert(Kind == k_VectorList && "Invalid access!");
335 return VectorList.Count;
338 unsigned getVectorIndex() const {
339 assert(Kind == k_VectorIndex && "Invalid access!");
340 return VectorIndex.Val;
343 StringRef getSysReg() const {
344 assert(Kind == k_SysReg && "Invalid access!");
345 return StringRef(SysReg.Data, SysReg.Length);
348 unsigned getSysCR() const {
349 assert(Kind == k_SysCR && "Invalid access!");
353 unsigned getPrefetch() const {
354 assert(Kind == k_Prefetch && "Invalid access!");
358 unsigned getShifter() const {
359 assert(Kind == k_Shifter && "Invalid access!");
363 unsigned getExtend() const {
364 assert(Kind == k_Extend && "Invalid access!");
368 bool isImm() const { return Kind == k_Immediate; }
369 bool isSImm9() const {
372 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
375 int64_t Val = MCE->getValue();
376 return (Val >= -256 && Val < 256);
378 bool isSImm7s4() const {
381 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
384 int64_t Val = MCE->getValue();
385 return (Val >= -256 && Val <= 252 && (Val & 3) == 0);
387 bool isSImm7s8() const {
390 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
393 int64_t Val = MCE->getValue();
394 return (Val >= -512 && Val <= 504 && (Val & 7) == 0);
396 bool isSImm7s16() const {
399 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
402 int64_t Val = MCE->getValue();
403 return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
405 bool isImm0_7() const {
408 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
411 int64_t Val = MCE->getValue();
412 return (Val >= 0 && Val < 8);
414 bool isImm1_8() const {
417 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
420 int64_t Val = MCE->getValue();
421 return (Val > 0 && Val < 9);
423 bool isImm0_15() const {
426 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
429 int64_t Val = MCE->getValue();
430 return (Val >= 0 && Val < 16);
432 bool isImm1_16() const {
435 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
438 int64_t Val = MCE->getValue();
439 return (Val > 0 && Val < 17);
441 bool isImm0_31() const {
444 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
447 int64_t Val = MCE->getValue();
448 return (Val >= 0 && Val < 32);
450 bool isImm1_31() const {
453 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
456 int64_t Val = MCE->getValue();
457 return (Val >= 1 && Val < 32);
459 bool isImm1_32() const {
462 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
465 int64_t Val = MCE->getValue();
466 return (Val >= 1 && Val < 33);
468 bool isImm0_63() const {
471 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
474 int64_t Val = MCE->getValue();
475 return (Val >= 0 && Val < 64);
477 bool isImm1_63() const {
480 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
483 int64_t Val = MCE->getValue();
484 return (Val >= 1 && Val < 64);
486 bool isImm1_64() const {
489 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
492 int64_t Val = MCE->getValue();
493 return (Val >= 1 && Val < 65);
495 bool isImm0_127() const {
498 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
501 int64_t Val = MCE->getValue();
502 return (Val >= 0 && Val < 128);
504 bool isImm0_255() const {
507 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
510 int64_t Val = MCE->getValue();
511 return (Val >= 0 && Val < 256);
513 bool isImm0_65535() const {
516 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
519 int64_t Val = MCE->getValue();
520 return (Val >= 0 && Val < 65536);
522 bool isLogicalImm32() const {
525 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
528 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 32);
530 bool isLogicalImm64() const {
533 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
536 return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
538 bool isSIMDImmType10() const {
541 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
544 return ARM64_AM::isAdvSIMDModImmType10(MCE->getValue());
546 bool isBranchTarget26() const {
549 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
552 int64_t Val = MCE->getValue();
555 return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
557 bool isBranchTarget19() const {
560 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
563 int64_t Val = MCE->getValue();
566 return (Val >= -(0x40000 << 2) && Val <= (0x3ffff << 2));
568 bool isBranchTarget14() const {
571 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
574 int64_t Val = MCE->getValue();
577 return (Val >= -(0x2000 << 2) && Val <= (0x1fff << 2));
580 bool isMovWSymbol(ArrayRef<ARM64MCExpr::VariantKind> AllowedModifiers) const {
584 ARM64MCExpr::VariantKind ELFRefKind;
585 MCSymbolRefExpr::VariantKind DarwinRefKind;
586 const MCConstantExpr *Addend;
587 if (!ARM64AsmParser::classifySymbolRef(getImm(), ELFRefKind, DarwinRefKind,
591 if (DarwinRefKind != MCSymbolRefExpr::VK_None)
594 for (unsigned i = 0; i != AllowedModifiers.size(); ++i) {
595 if (ELFRefKind == AllowedModifiers[i])
602 bool isMovZSymbolG3() const {
603 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
604 return isMovWSymbol(Variants);
607 bool isMovZSymbolG2() const {
608 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
609 ARM64MCExpr::VK_TPREL_G2,
610 ARM64MCExpr::VK_DTPREL_G2 };
611 return isMovWSymbol(Variants);
614 bool isMovZSymbolG1() const {
615 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
616 ARM64MCExpr::VK_GOTTPREL_G1,
617 ARM64MCExpr::VK_TPREL_G1,
618 ARM64MCExpr::VK_DTPREL_G1, };
619 return isMovWSymbol(Variants);
622 bool isMovZSymbolG0() const {
623 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
624 ARM64MCExpr::VK_TPREL_G0,
625 ARM64MCExpr::VK_DTPREL_G0 };
626 return isMovWSymbol(Variants);
629 bool isMovKSymbolG2() const {
630 static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
631 return isMovWSymbol(Variants);
634 bool isMovKSymbolG1() const {
635 static ARM64MCExpr::VariantKind Variants[] = {
636 ARM64MCExpr::VK_ABS_G1_NC, ARM64MCExpr::VK_TPREL_G1_NC,
637 ARM64MCExpr::VK_DTPREL_G1_NC
639 return isMovWSymbol(Variants);
642 bool isMovKSymbolG0() const {
643 static ARM64MCExpr::VariantKind Variants[] = {
644 ARM64MCExpr::VK_ABS_G0_NC, ARM64MCExpr::VK_GOTTPREL_G0_NC,
645 ARM64MCExpr::VK_TPREL_G0_NC, ARM64MCExpr::VK_DTPREL_G0_NC
647 return isMovWSymbol(Variants);
650 bool isFPImm() const { return Kind == k_FPImm; }
651 bool isBarrier() const { return Kind == k_Barrier; }
652 bool isSysReg() const { return Kind == k_SysReg; }
653 bool isMRSSystemRegister() const {
654 if (!isSysReg()) return false;
656 bool IsKnownRegister;
657 ARM64SysReg::MRSMapper().fromString(getSysReg(), IsKnownRegister);
659 return IsKnownRegister;
661 bool isMSRSystemRegister() const {
662 if (!isSysReg()) return false;
664 bool IsKnownRegister;
665 ARM64SysReg::MSRMapper().fromString(getSysReg(), IsKnownRegister);
667 return IsKnownRegister;
669 bool isSystemCPSRField() const {
670 if (!isSysReg()) return false;
672 bool IsKnownRegister;
673 ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
675 return IsKnownRegister;
677 bool isReg() const { return Kind == k_Register && !Reg.isVector; }
678 bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
680 /// Is this a vector list with the type implicit (presumably attached to the
681 /// instruction itself)?
682 template <unsigned NumRegs> bool isImplicitlyTypedVectorList() const {
683 return Kind == k_VectorList && VectorList.Count == NumRegs &&
684 !VectorList.ElementKind;
687 template <unsigned NumRegs, unsigned NumElements, char ElementKind>
688 bool isTypedVectorList() const {
689 if (Kind != k_VectorList)
691 if (VectorList.Count != NumRegs)
693 if (VectorList.ElementKind != ElementKind)
695 return VectorList.NumElements == NumElements;
698 bool isVectorIndexB() const {
699 return Kind == k_VectorIndex && VectorIndex.Val < 16;
701 bool isVectorIndexH() const {
702 return Kind == k_VectorIndex && VectorIndex.Val < 8;
704 bool isVectorIndexS() const {
705 return Kind == k_VectorIndex && VectorIndex.Val < 4;
707 bool isVectorIndexD() const {
708 return Kind == k_VectorIndex && VectorIndex.Val < 2;
710 bool isToken() const { return Kind == k_Token; }
711 bool isTokenEqual(StringRef Str) const {
712 return Kind == k_Token && getToken() == Str;
714 bool isMem() const { return Kind == k_Memory; }
715 bool isSysCR() const { return Kind == k_SysCR; }
716 bool isPrefetch() const { return Kind == k_Prefetch; }
717 bool isShifter() const { return Kind == k_Shifter; }
718 bool isExtend() const {
719 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
721 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
722 return ST == ARM64_AM::LSL;
724 return Kind == k_Extend;
726 bool isExtend64() const {
727 if (Kind != k_Extend)
729 // UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
730 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
731 return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
733 bool isExtendLSL64() const {
734 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
736 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
737 return ST == ARM64_AM::LSL;
739 if (Kind != k_Extend)
741 ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
742 return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
745 bool isArithmeticShifter() const {
749 // An arithmetic shifter is LSL, LSR, or ASR.
750 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
751 return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
754 bool isMovImm32Shifter() const {
758 // A MOVi shifter is LSL of 0, 16, 32, or 48.
759 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
760 if (ST != ARM64_AM::LSL)
762 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
763 return (Val == 0 || Val == 16);
766 bool isMovImm64Shifter() const {
770 // A MOVi shifter is LSL of 0 or 16.
771 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
772 if (ST != ARM64_AM::LSL)
774 uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
775 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
778 bool isAddSubShifter() const {
782 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
783 unsigned Val = Shifter.Val;
784 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
785 (ARM64_AM::getShiftValue(Val) == 0 ||
786 ARM64_AM::getShiftValue(Val) == 12);
789 bool isLogicalVecShifter() const {
793 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
794 unsigned Val = Shifter.Val;
795 unsigned Shift = ARM64_AM::getShiftValue(Val);
796 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
797 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
800 bool isLogicalVecHalfWordShifter() const {
801 if (!isLogicalVecShifter())
804 // A logical vector shifter is a left shift by 0 or 8.
805 unsigned Val = Shifter.Val;
806 unsigned Shift = ARM64_AM::getShiftValue(Val);
807 return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
808 (Shift == 0 || Shift == 8);
811 bool isMoveVecShifter() const {
815 // A logical vector shifter is a left shift by 8 or 16.
816 unsigned Val = Shifter.Val;
817 unsigned Shift = ARM64_AM::getShiftValue(Val);
818 return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
819 (Shift == 8 || Shift == 16);
822 bool isMemoryRegisterOffset8() const {
823 return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
826 bool isMemoryRegisterOffset16() const {
827 return isMem() && Mem.Mode == RegisterOffset &&
828 (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
831 bool isMemoryRegisterOffset32() const {
832 return isMem() && Mem.Mode == RegisterOffset &&
833 (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
836 bool isMemoryRegisterOffset64() const {
837 return isMem() && Mem.Mode == RegisterOffset &&
838 (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
841 bool isMemoryRegisterOffset128() const {
842 return isMem() && Mem.Mode == RegisterOffset &&
843 (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
846 bool isMemoryUnscaled() const {
849 if (Mem.Mode != ImmediateOffset)
853 // Make sure the immediate value is valid.
854 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
857 // The offset must fit in a signed 9-bit unscaled immediate.
858 int64_t Value = CE->getValue();
859 return (Value >= -256 && Value < 256);
861 // Fallback unscaled operands are for aliases of LDR/STR that fall back
862 // to LDUR/STUR when the offset is not legal for the former but is for
863 // the latter. As such, in addition to checking for being a legal unscaled
864 // address, also check that it is not a legal scaled address. This avoids
865 // ambiguity in the matcher.
866 bool isMemoryUnscaledFB8() const {
867 return isMemoryUnscaled() && !isMemoryIndexed8();
869 bool isMemoryUnscaledFB16() const {
870 return isMemoryUnscaled() && !isMemoryIndexed16();
872 bool isMemoryUnscaledFB32() const {
873 return isMemoryUnscaled() && !isMemoryIndexed32();
875 bool isMemoryUnscaledFB64() const {
876 return isMemoryUnscaled() && !isMemoryIndexed64();
878 bool isMemoryUnscaledFB128() const {
879 return isMemoryUnscaled() && !isMemoryIndexed128();
881 bool isMemoryIndexed(unsigned Scale) const {
884 if (Mem.Mode != ImmediateOffset)
888 // Make sure the immediate value is valid.
889 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
892 // The offset must be a positive multiple of the scale and in range of
893 // encoding with a 12-bit immediate.
894 int64_t Value = CE->getValue();
895 return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
898 // If it's not a constant, check for some expressions we know.
899 const MCExpr *Expr = Mem.OffsetImm;
900 ARM64MCExpr::VariantKind ELFRefKind;
901 MCSymbolRefExpr::VariantKind DarwinRefKind;
902 const MCConstantExpr *Addend;
903 if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
905 // If we don't understand the expression, assume the best and
906 // let the fixup and relocation code deal with it.
910 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
911 ELFRefKind == ARM64MCExpr::VK_LO12 ||
912 ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
913 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
914 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
915 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
916 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
917 ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
918 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
919 // Note that we don't range-check the addend. It's adjusted modulo page
920 // size when converted, so there is no "out of range" condition when using
922 int64_t Value = Addend ? Addend->getValue() : 0;
923 return Value >= 0 && (Value % Scale) == 0;
924 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
925 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
926 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
932 bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
933 bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
934 bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
935 bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
936 bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
937 bool isMemoryNoIndex() const {
940 if (Mem.Mode != ImmediateOffset)
945 // Make sure the immediate value is valid. Only zero is allowed.
946 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
947 if (!CE || CE->getValue() != 0)
951 bool isMemorySIMDNoIndex() const {
954 if (Mem.Mode != ImmediateOffset)
956 return Mem.OffsetImm == 0;
958 bool isMemoryIndexedSImm9() const {
959 if (!isMem() || Mem.Mode != ImmediateOffset)
963 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
964 assert(CE && "Non-constant pre-indexed offset!");
965 int64_t Value = CE->getValue();
966 return Value >= -256 && Value <= 255;
968 bool isMemoryIndexed32SImm7() const {
969 if (!isMem() || Mem.Mode != ImmediateOffset)
973 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
974 assert(CE && "Non-constant pre-indexed offset!");
975 int64_t Value = CE->getValue();
976 return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
978 bool isMemoryIndexed64SImm7() const {
979 if (!isMem() || Mem.Mode != ImmediateOffset)
983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
984 assert(CE && "Non-constant pre-indexed offset!");
985 int64_t Value = CE->getValue();
986 return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
988 bool isMemoryIndexed128SImm7() const {
989 if (!isMem() || Mem.Mode != ImmediateOffset)
993 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
994 assert(CE && "Non-constant pre-indexed offset!");
995 int64_t Value = CE->getValue();
996 return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
999 bool isAdrpLabel() const {
1000 // Validation was handled during parsing, so we just sanity check that
1001 // something didn't go haywire.
1005 bool isAdrLabel() const {
1006 // Validation was handled during parsing, so we just sanity check that
1007 // something didn't go haywire.
1011 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1012 // Add as immediates when possible. Null MCExpr = 0.
1014 Inst.addOperand(MCOperand::CreateImm(0));
1015 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1016 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1018 Inst.addOperand(MCOperand::CreateExpr(Expr));
1021 void addRegOperands(MCInst &Inst, unsigned N) const {
1022 assert(N == 1 && "Invalid number of operands!");
1023 Inst.addOperand(MCOperand::CreateReg(getReg()));
1026 void addVectorRegOperands(MCInst &Inst, unsigned N) const {
1027 assert(N == 1 && "Invalid number of operands!");
1028 Inst.addOperand(MCOperand::CreateReg(getReg()));
1031 template <unsigned NumRegs>
1032 void addVectorList64Operands(MCInst &Inst, unsigned N) const {
1033 assert(N == 1 && "Invalid number of operands!");
1034 static unsigned FirstRegs[] = { ARM64::D0, ARM64::D0_D1,
1035 ARM64::D0_D1_D2, ARM64::D0_D1_D2_D3 };
1036 unsigned FirstReg = FirstRegs[NumRegs - 1];
1039 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1042 template <unsigned NumRegs>
1043 void addVectorList128Operands(MCInst &Inst, unsigned N) const {
1044 assert(N == 1 && "Invalid number of operands!");
1045 static unsigned FirstRegs[] = { ARM64::Q0, ARM64::Q0_Q1,
1046 ARM64::Q0_Q1_Q2, ARM64::Q0_Q1_Q2_Q3 };
1047 unsigned FirstReg = FirstRegs[NumRegs - 1];
1050 MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
1053 void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
1054 assert(N == 1 && "Invalid number of operands!");
1055 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1058 void addVectorIndexHOperands(MCInst &Inst, unsigned N) const {
1059 assert(N == 1 && "Invalid number of operands!");
1060 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1063 void addVectorIndexSOperands(MCInst &Inst, unsigned N) const {
1064 assert(N == 1 && "Invalid number of operands!");
1065 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1068 void addVectorIndexDOperands(MCInst &Inst, unsigned N) const {
1069 assert(N == 1 && "Invalid number of operands!");
1070 Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1073 void addImmOperands(MCInst &Inst, unsigned N) const {
1074 assert(N == 1 && "Invalid number of operands!");
1075 // If this is a pageoff symrefexpr with an addend, adjust the addend
1076 // to be only the page-offset portion. Otherwise, just add the expr
1078 addExpr(Inst, getImm());
1081 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1082 addImmOperands(Inst, N);
1085 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1086 addImmOperands(Inst, N);
1089 void addSImm9Operands(MCInst &Inst, unsigned N) const {
1090 assert(N == 1 && "Invalid number of operands!");
1091 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1092 assert(MCE && "Invalid constant immediate operand!");
1093 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1096 void addSImm7s4Operands(MCInst &Inst, unsigned N) const {
1097 assert(N == 1 && "Invalid number of operands!");
1098 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1099 assert(MCE && "Invalid constant immediate operand!");
1100 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 4));
1103 void addSImm7s8Operands(MCInst &Inst, unsigned N) const {
1104 assert(N == 1 && "Invalid number of operands!");
1105 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1106 assert(MCE && "Invalid constant immediate operand!");
1107 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 8));
1110 void addSImm7s16Operands(MCInst &Inst, unsigned N) const {
1111 assert(N == 1 && "Invalid number of operands!");
1112 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1113 assert(MCE && "Invalid constant immediate operand!");
1114 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / 16));
1117 void addImm0_7Operands(MCInst &Inst, unsigned N) const {
1118 assert(N == 1 && "Invalid number of operands!");
1119 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1120 assert(MCE && "Invalid constant immediate operand!");
1121 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1124 void addImm1_8Operands(MCInst &Inst, unsigned N) const {
1125 assert(N == 1 && "Invalid number of operands!");
1126 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1127 assert(MCE && "Invalid constant immediate operand!");
1128 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1131 void addImm0_15Operands(MCInst &Inst, unsigned N) const {
1132 assert(N == 1 && "Invalid number of operands!");
1133 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1134 assert(MCE && "Invalid constant immediate operand!");
1135 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1138 void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1139 assert(N == 1 && "Invalid number of operands!");
1140 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1141 assert(MCE && "Invalid constant immediate operand!");
1142 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1145 void addImm0_31Operands(MCInst &Inst, unsigned N) const {
1146 assert(N == 1 && "Invalid number of operands!");
1147 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1148 assert(MCE && "Invalid constant immediate operand!");
1149 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1152 void addImm1_31Operands(MCInst &Inst, unsigned N) const {
1153 assert(N == 1 && "Invalid number of operands!");
1154 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1155 assert(MCE && "Invalid constant immediate operand!");
1156 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1159 void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1160 assert(N == 1 && "Invalid number of operands!");
1161 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1162 assert(MCE && "Invalid constant immediate operand!");
1163 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1166 void addImm0_63Operands(MCInst &Inst, unsigned N) const {
1167 assert(N == 1 && "Invalid number of operands!");
1168 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1169 assert(MCE && "Invalid constant immediate operand!");
1170 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1173 void addImm1_63Operands(MCInst &Inst, unsigned N) const {
1174 assert(N == 1 && "Invalid number of operands!");
1175 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1176 assert(MCE && "Invalid constant immediate operand!");
1177 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1180 void addImm1_64Operands(MCInst &Inst, unsigned N) const {
1181 assert(N == 1 && "Invalid number of operands!");
1182 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1183 assert(MCE && "Invalid constant immediate operand!");
1184 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1187 void addImm0_127Operands(MCInst &Inst, unsigned N) const {
1188 assert(N == 1 && "Invalid number of operands!");
1189 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1190 assert(MCE && "Invalid constant immediate operand!");
1191 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1194 void addImm0_255Operands(MCInst &Inst, unsigned N) const {
1195 assert(N == 1 && "Invalid number of operands!");
1196 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1197 assert(MCE && "Invalid constant immediate operand!");
1198 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1201 void addImm0_65535Operands(MCInst &Inst, unsigned N) const {
1202 assert(N == 1 && "Invalid number of operands!");
1203 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1204 assert(MCE && "Invalid constant immediate operand!");
1205 Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
1208 void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
1209 assert(N == 1 && "Invalid number of operands!");
1210 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1211 assert(MCE && "Invalid logical immediate operand!");
1212 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 32);
1213 Inst.addOperand(MCOperand::CreateImm(encoding));
1216 void addLogicalImm64Operands(MCInst &Inst, unsigned N) const {
1217 assert(N == 1 && "Invalid number of operands!");
1218 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1219 assert(MCE && "Invalid logical immediate operand!");
1220 uint64_t encoding = ARM64_AM::encodeLogicalImmediate(MCE->getValue(), 64);
1221 Inst.addOperand(MCOperand::CreateImm(encoding));
1224 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
1225 assert(N == 1 && "Invalid number of operands!");
1226 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1227 assert(MCE && "Invalid immediate operand!");
1228 uint64_t encoding = ARM64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
1229 Inst.addOperand(MCOperand::CreateImm(encoding));
1232 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
1233 // Branch operands don't encode the low bits, so shift them off
1234 // here. If it's a label, however, just put it on directly as there's
1235 // not enough information now to do anything.
1236 assert(N == 1 && "Invalid number of operands!");
1237 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1239 addExpr(Inst, getImm());
1242 assert(MCE && "Invalid constant immediate operand!");
1243 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1246 void addBranchTarget19Operands(MCInst &Inst, unsigned N) const {
1247 // Branch operands don't encode the low bits, so shift them off
1248 // here. If it's a label, however, just put it on directly as there's
1249 // not enough information now to do anything.
1250 assert(N == 1 && "Invalid number of operands!");
1251 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1253 addExpr(Inst, getImm());
1256 assert(MCE && "Invalid constant immediate operand!");
1257 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1260 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
1261 // Branch operands don't encode the low bits, so shift them off
1262 // here. If it's a label, however, just put it on directly as there's
1263 // not enough information now to do anything.
1264 assert(N == 1 && "Invalid number of operands!");
1265 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1267 addExpr(Inst, getImm());
1270 assert(MCE && "Invalid constant immediate operand!");
1271 Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
1274 void addFPImmOperands(MCInst &Inst, unsigned N) const {
1275 assert(N == 1 && "Invalid number of operands!");
1276 Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1279 void addBarrierOperands(MCInst &Inst, unsigned N) const {
1280 assert(N == 1 && "Invalid number of operands!");
1281 Inst.addOperand(MCOperand::CreateImm(getBarrier()));
1284 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1285 assert(N == 1 && "Invalid number of operands!");
1288 uint32_t Bits = ARM64SysReg::MRSMapper().fromString(getSysReg(), Valid);
1290 Inst.addOperand(MCOperand::CreateImm(Bits));
1293 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
1294 assert(N == 1 && "Invalid number of operands!");
1297 uint32_t Bits = ARM64SysReg::MSRMapper().fromString(getSysReg(), Valid);
1299 Inst.addOperand(MCOperand::CreateImm(Bits));
1302 void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
1303 assert(N == 1 && "Invalid number of operands!");
1306 uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
1308 Inst.addOperand(MCOperand::CreateImm(Bits));
1311 void addSysCROperands(MCInst &Inst, unsigned N) const {
1312 assert(N == 1 && "Invalid number of operands!");
1313 Inst.addOperand(MCOperand::CreateImm(getSysCR()));
1316 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
1317 assert(N == 1 && "Invalid number of operands!");
1318 Inst.addOperand(MCOperand::CreateImm(getPrefetch()));
1321 void addShifterOperands(MCInst &Inst, unsigned N) const {
1322 assert(N == 1 && "Invalid number of operands!");
1323 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1326 void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
1327 assert(N == 1 && "Invalid number of operands!");
1328 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1331 void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
1332 assert(N == 1 && "Invalid number of operands!");
1333 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1336 void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
1337 assert(N == 1 && "Invalid number of operands!");
1338 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1341 void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
1342 assert(N == 1 && "Invalid number of operands!");
1343 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1346 void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
1347 assert(N == 1 && "Invalid number of operands!");
1348 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1351 void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
1352 assert(N == 1 && "Invalid number of operands!");
1353 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1356 void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
1357 assert(N == 1 && "Invalid number of operands!");
1358 Inst.addOperand(MCOperand::CreateImm(getShifter()));
1361 void addExtendOperands(MCInst &Inst, unsigned N) const {
1362 assert(N == 1 && "Invalid number of operands!");
1363 // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
1365 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1366 unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
1367 ARM64_AM::getShiftValue(getShifter()));
1368 Inst.addOperand(MCOperand::CreateImm(imm));
1370 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1373 void addExtend64Operands(MCInst &Inst, unsigned N) const {
1374 assert(N == 1 && "Invalid number of operands!");
1375 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1378 void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
1379 assert(N == 1 && "Invalid number of operands!");
1380 // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
1382 assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
1383 unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
1384 ARM64_AM::getShiftValue(getShifter()));
1385 Inst.addOperand(MCOperand::CreateImm(imm));
1387 Inst.addOperand(MCOperand::CreateImm(getExtend()));
1390 void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
1391 assert(N == 3 && "Invalid number of operands!");
1393 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1394 Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum));
1395 unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
1396 Inst.addOperand(MCOperand::CreateImm(ExtendImm));
1399 void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
1400 addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
1403 void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
1404 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
1407 void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
1408 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
1411 void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
1412 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
1415 void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
1416 addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
1419 void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
1420 unsigned Scale) const {
1421 // Add the base register operand.
1422 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1424 if (!Mem.OffsetImm) {
1425 // There isn't an offset.
1426 Inst.addOperand(MCOperand::CreateImm(0));
1430 // Add the offset operand.
1431 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
1432 assert(CE->getValue() % Scale == 0 &&
1433 "Offset operand must be multiple of the scale!");
1435 // The MCInst offset operand doesn't include the low bits (like the
1436 // instruction encoding).
1437 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
1440 // If this is a pageoff symrefexpr with an addend, the linker will
1441 // do the scaling of the addend.
1443 // Otherwise we don't know what this is, so just add the scaling divide to
1444 // the expression and let the MC fixup evaluation code deal with it.
1445 const MCExpr *Expr = Mem.OffsetImm;
1446 ARM64MCExpr::VariantKind ELFRefKind;
1447 MCSymbolRefExpr::VariantKind DarwinRefKind;
1448 const MCConstantExpr *Addend;
1450 (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
1452 (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
1453 Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
1457 Inst.addOperand(MCOperand::CreateExpr(Expr));
1460 void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
1461 assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
1462 // Add the base register operand.
1463 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1465 // Add the offset operand.
1467 Inst.addOperand(MCOperand::CreateImm(0));
1469 // Only constant offsets supported.
1470 const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
1471 Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1475 void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
1476 assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
1477 addMemoryIndexedOperands(Inst, N, 16);
1480 void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
1481 assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
1482 addMemoryIndexedOperands(Inst, N, 8);
1485 void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
1486 assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
1487 addMemoryIndexedOperands(Inst, N, 4);
1490 void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
1491 assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
1492 addMemoryIndexedOperands(Inst, N, 2);
1495 void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
1496 assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
1497 addMemoryIndexedOperands(Inst, N, 1);
1500 void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
1501 assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
1502 // Add the base register operand (the offset is always zero, so ignore it).
1503 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1506 void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
1507 assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
1508 // Add the base register operand (the offset is always zero, so ignore it).
1509 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1512 void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
1513 unsigned Scale) const {
1514 assert(N == 2 && "Invalid number of operands!");
1516 // Add the base register operand.
1517 Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
1519 // Add the offset operand.
1521 if (Mem.OffsetImm) {
1522 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
1523 assert(CE && "Non-constant indexed offset operand!");
1524 Offset = CE->getValue();
1528 assert(Offset % Scale == 0 &&
1529 "Offset operand must be a multiple of the scale!");
1533 Inst.addOperand(MCOperand::CreateImm(Offset));
1536 void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
1537 addMemoryWritebackIndexedOperands(Inst, N, 1);
1540 void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
1541 addMemoryWritebackIndexedOperands(Inst, N, 4);
1544 void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
1545 addMemoryWritebackIndexedOperands(Inst, N, 8);
1548 void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
1549 addMemoryWritebackIndexedOperands(Inst, N, 16);
1552 virtual void print(raw_ostream &OS) const;
1554 static ARM64Operand *CreateToken(StringRef Str, bool IsSuffix, SMLoc S,
1556 ARM64Operand *Op = new ARM64Operand(k_Token, Ctx);
1557 Op->Tok.Data = Str.data();
1558 Op->Tok.Length = Str.size();
1559 Op->Tok.IsSuffix = IsSuffix;
1565 static ARM64Operand *CreateReg(unsigned RegNum, bool isVector, SMLoc S,
1566 SMLoc E, MCContext &Ctx) {
1567 ARM64Operand *Op = new ARM64Operand(k_Register, Ctx);
1568 Op->Reg.RegNum = RegNum;
1569 Op->Reg.isVector = isVector;
1575 static ARM64Operand *CreateVectorList(unsigned RegNum, unsigned Count,
1576 unsigned NumElements, char ElementKind,
1577 SMLoc S, SMLoc E, MCContext &Ctx) {
1578 ARM64Operand *Op = new ARM64Operand(k_VectorList, Ctx);
1579 Op->VectorList.RegNum = RegNum;
1580 Op->VectorList.Count = Count;
1581 Op->VectorList.NumElements = NumElements;
1582 Op->VectorList.ElementKind = ElementKind;
1588 static ARM64Operand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1590 ARM64Operand *Op = new ARM64Operand(k_VectorIndex, Ctx);
1591 Op->VectorIndex.Val = Idx;
1597 static ARM64Operand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E,
1599 ARM64Operand *Op = new ARM64Operand(k_Immediate, Ctx);
1606 static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1607 ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
1608 Op->FPImm.Val = Val;
1614 static ARM64Operand *CreateBarrier(unsigned Val, SMLoc S, MCContext &Ctx) {
1615 ARM64Operand *Op = new ARM64Operand(k_Barrier, Ctx);
1616 Op->Barrier.Val = Val;
1622 static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S, MCContext &Ctx) {
1623 ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
1624 Op->SysReg.Data = Str.data();
1625 Op->SysReg.Length = Str.size();
1631 static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
1632 SMLoc S, SMLoc E, SMLoc OffsetLoc,
1634 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1635 Op->Mem.BaseRegNum = BaseRegNum;
1636 Op->Mem.OffsetRegNum = 0;
1637 Op->Mem.OffsetImm = Off;
1638 Op->Mem.ExtType = ARM64_AM::UXTX;
1639 Op->Mem.ShiftVal = 0;
1640 Op->Mem.ExplicitShift = false;
1641 Op->Mem.Mode = ImmediateOffset;
1642 Op->OffsetLoc = OffsetLoc;
1648 static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
1649 ARM64_AM::ExtendType ExtType,
1650 unsigned ShiftVal, bool ExplicitShift,
1651 SMLoc S, SMLoc E, MCContext &Ctx) {
1652 ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
1653 Op->Mem.BaseRegNum = BaseReg;
1654 Op->Mem.OffsetRegNum = OffsetReg;
1655 Op->Mem.OffsetImm = 0;
1656 Op->Mem.ExtType = ExtType;
1657 Op->Mem.ShiftVal = ShiftVal;
1658 Op->Mem.ExplicitShift = ExplicitShift;
1659 Op->Mem.Mode = RegisterOffset;
1665 static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
1667 ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
1668 Op->SysCRImm.Val = Val;
1674 static ARM64Operand *CreatePrefetch(unsigned Val, SMLoc S, MCContext &Ctx) {
1675 ARM64Operand *Op = new ARM64Operand(k_Prefetch, Ctx);
1676 Op->Prefetch.Val = Val;
1682 static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
1683 SMLoc S, SMLoc E, MCContext &Ctx) {
1684 ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
1685 Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
1691 static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
1692 SMLoc S, SMLoc E, MCContext &Ctx) {
1693 ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
1694 Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
1701 } // end anonymous namespace.
1703 void ARM64Operand::print(raw_ostream &OS) const {
1706 OS << "<fpimm " << getFPImm() << "(" << ARM64_AM::getFPImmFloat(getFPImm())
1711 StringRef Name = ARM64DB::DBarrierMapper().toString(getBarrier(), Valid);
1713 OS << "<barrier " << Name << ">";
1715 OS << "<barrier invalid #" << getBarrier() << ">";
1719 getImm()->print(OS);
1725 OS << "<register " << getReg() << ">";
1727 case k_VectorList: {
1728 OS << "<vectorlist ";
1729 unsigned Reg = getVectorListStart();
1730 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
1731 OS << Reg + i << " ";
1736 OS << "<vectorindex " << getVectorIndex() << ">";
1739 OS << "<sysreg: " << getSysReg() << '>';
1742 OS << "'" << getToken() << "'";
1745 OS << "c" << getSysCR();
1749 StringRef Name = ARM64PRFM::PRFMMapper().toString(getPrefetch(), Valid);
1751 OS << "<prfop " << Name << ">";
1753 OS << "<prfop invalid #" << getPrefetch() << ">";
1757 unsigned Val = getShifter();
1758 OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
1759 << ARM64_AM::getShiftValue(Val) << ">";
1763 unsigned Val = getExtend();
1764 OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
1765 << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
1771 /// @name Auto-generated Match Functions
1774 static unsigned MatchRegisterName(StringRef Name);
1778 static unsigned matchVectorRegName(StringRef Name) {
1779 return StringSwitch<unsigned>(Name)
1780 .Case("v0", ARM64::Q0)
1781 .Case("v1", ARM64::Q1)
1782 .Case("v2", ARM64::Q2)
1783 .Case("v3", ARM64::Q3)
1784 .Case("v4", ARM64::Q4)
1785 .Case("v5", ARM64::Q5)
1786 .Case("v6", ARM64::Q6)
1787 .Case("v7", ARM64::Q7)
1788 .Case("v8", ARM64::Q8)
1789 .Case("v9", ARM64::Q9)
1790 .Case("v10", ARM64::Q10)
1791 .Case("v11", ARM64::Q11)
1792 .Case("v12", ARM64::Q12)
1793 .Case("v13", ARM64::Q13)
1794 .Case("v14", ARM64::Q14)
1795 .Case("v15", ARM64::Q15)
1796 .Case("v16", ARM64::Q16)
1797 .Case("v17", ARM64::Q17)
1798 .Case("v18", ARM64::Q18)
1799 .Case("v19", ARM64::Q19)
1800 .Case("v20", ARM64::Q20)
1801 .Case("v21", ARM64::Q21)
1802 .Case("v22", ARM64::Q22)
1803 .Case("v23", ARM64::Q23)
1804 .Case("v24", ARM64::Q24)
1805 .Case("v25", ARM64::Q25)
1806 .Case("v26", ARM64::Q26)
1807 .Case("v27", ARM64::Q27)
1808 .Case("v28", ARM64::Q28)
1809 .Case("v29", ARM64::Q29)
1810 .Case("v30", ARM64::Q30)
1811 .Case("v31", ARM64::Q31)
1815 static bool isValidVectorKind(StringRef Name) {
1816 return StringSwitch<bool>(Name.lower())
1826 // Accept the width neutral ones, too, for verbose syntax. If those
1827 // aren't used in the right places, the token operand won't match so
1828 // all will work out.
1836 static void parseValidVectorKind(StringRef Name, unsigned &NumElements,
1837 char &ElementKind) {
1838 assert(isValidVectorKind(Name));
1840 ElementKind = Name.lower()[Name.size() - 1];
1843 if (Name.size() == 2)
1846 // Parse the lane count
1847 Name = Name.drop_front();
1848 while (isdigit(Name.front())) {
1849 NumElements = 10 * NumElements + (Name.front() - '0');
1850 Name = Name.drop_front();
1854 bool ARM64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1856 StartLoc = getLoc();
1857 RegNo = tryParseRegister();
1858 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
1859 return (RegNo == (unsigned)-1);
1862 /// tryParseRegister - Try to parse a register name. The token must be an
1863 /// Identifier when called, and if it is a register name the token is eaten and
1864 /// the register is added to the operand list.
1865 int ARM64AsmParser::tryParseRegister() {
1866 const AsmToken &Tok = Parser.getTok();
1867 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
1869 std::string lowerCase = Tok.getString().lower();
1870 unsigned RegNum = MatchRegisterName(lowerCase);
1871 // Also handle a few aliases of registers.
1873 RegNum = StringSwitch<unsigned>(lowerCase)
1874 .Case("x29", ARM64::FP)
1875 .Case("x30", ARM64::LR)
1876 .Case("x31", ARM64::XZR)
1877 .Case("w31", ARM64::WZR)
1883 Parser.Lex(); // Eat identifier token.
1887 /// tryMatchVectorRegister - Try to parse a vector register name with optional
1888 /// kind specifier. If it is a register specifier, eat the token and return it.
1889 int ARM64AsmParser::tryMatchVectorRegister(StringRef &Kind, bool expected) {
1890 if (Parser.getTok().isNot(AsmToken::Identifier)) {
1891 TokError("vector register expected");
1895 StringRef Name = Parser.getTok().getString();
1896 // If there is a kind specifier, it's separated from the register name by
1898 size_t Start = 0, Next = Name.find('.');
1899 StringRef Head = Name.slice(Start, Next);
1900 unsigned RegNum = matchVectorRegName(Head);
1902 if (Next != StringRef::npos) {
1903 Kind = Name.slice(Next, StringRef::npos);
1904 if (!isValidVectorKind(Kind)) {
1905 TokError("invalid vector kind qualifier");
1909 Parser.Lex(); // Eat the register token.
1914 TokError("vector register expected");
1918 static int MatchSysCRName(StringRef Name) {
1919 // Use the same layout as the tablegen'erated register name matcher. Ugly,
1921 switch (Name.size()) {
1925 if (Name[0] != 'c' && Name[0] != 'C')
1953 if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
1974 llvm_unreachable("Unhandled SysCR operand string!");
1978 /// tryParseSysCROperand - Try to parse a system instruction CR operand name.
1979 ARM64AsmParser::OperandMatchResultTy
1980 ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
1982 const AsmToken &Tok = Parser.getTok();
1983 if (Tok.isNot(AsmToken::Identifier))
1984 return MatchOperand_NoMatch;
1986 int Num = MatchSysCRName(Tok.getString());
1988 return MatchOperand_NoMatch;
1990 Parser.Lex(); // Eat identifier token.
1991 Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
1992 return MatchOperand_Success;
1995 /// tryParsePrefetch - Try to parse a prefetch operand.
1996 ARM64AsmParser::OperandMatchResultTy
1997 ARM64AsmParser::tryParsePrefetch(OperandVector &Operands) {
1999 const AsmToken &Tok = Parser.getTok();
2000 // Either an identifier for named values or a 5-bit immediate.
2001 if (Tok.is(AsmToken::Hash)) {
2002 Parser.Lex(); // Eat hash token.
2003 const MCExpr *ImmVal;
2004 if (getParser().parseExpression(ImmVal))
2005 return MatchOperand_ParseFail;
2007 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2009 TokError("immediate value expected for prefetch operand");
2010 return MatchOperand_ParseFail;
2012 unsigned prfop = MCE->getValue();
2014 TokError("prefetch operand out of range, [0,31] expected");
2015 return MatchOperand_ParseFail;
2018 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2019 return MatchOperand_Success;
2022 if (Tok.isNot(AsmToken::Identifier)) {
2023 TokError("pre-fetch hint expected");
2024 return MatchOperand_ParseFail;
2028 unsigned prfop = ARM64PRFM::PRFMMapper().fromString(Tok.getString(), Valid);
2030 TokError("pre-fetch hint expected");
2031 return MatchOperand_ParseFail;
2034 Parser.Lex(); // Eat identifier token.
2035 Operands.push_back(ARM64Operand::CreatePrefetch(prfop, S, getContext()));
2036 return MatchOperand_Success;
2039 /// tryParseAdrpLabel - Parse and validate a source label for the ADRP
2041 ARM64AsmParser::OperandMatchResultTy
2042 ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
2045 if (parseSymbolicImmVal(Expr))
2046 return MatchOperand_ParseFail;
2048 ARM64MCExpr::VariantKind ELFRefKind;
2049 MCSymbolRefExpr::VariantKind DarwinRefKind;
2050 const MCConstantExpr *Addend;
2051 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2052 Error(S, "modified label reference + constant expected");
2053 return MatchOperand_ParseFail;
2056 if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
2057 ELFRefKind == ARM64MCExpr::VK_INVALID) {
2058 // No modifier was specified at all; this is the syntax for an ELF basic
2059 // ADRP relocation (unfortunately).
2060 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
2061 } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
2062 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
2064 Error(S, "gotpage label reference not allowed an addend");
2065 return MatchOperand_ParseFail;
2066 } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
2067 DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
2068 DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
2069 ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
2070 ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
2071 ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
2072 // The operand must be an @page or @gotpage qualified symbolref.
2073 Error(S, "page or gotpage label reference expected");
2074 return MatchOperand_ParseFail;
2077 // We have a label reference possibly with addend. The addend is a raw value
2078 // here. The linker will adjust it to only reference the page.
2079 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2080 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2082 return MatchOperand_Success;
2085 /// tryParseAdrLabel - Parse and validate a source label for the ADR
2087 ARM64AsmParser::OperandMatchResultTy
2088 ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
2091 if (getParser().parseExpression(Expr))
2092 return MatchOperand_ParseFail;
2094 // The operand must be an un-qualified assembler local symbolref.
2095 // FIXME: wrong for ELF.
2096 if (const MCSymbolRefExpr *SRE = dyn_cast<const MCSymbolRefExpr>(Expr)) {
2097 // FIXME: Should reference the MachineAsmInfo to get the private prefix.
2098 bool isTemporary = SRE->getSymbol().getName().startswith("L");
2099 if (!isTemporary || SRE->getKind() != MCSymbolRefExpr::VK_None) {
2100 Error(S, "unqualified, assembler-local label name expected");
2101 return MatchOperand_ParseFail;
2105 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2106 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
2108 return MatchOperand_Success;
2111 /// tryParseFPImm - A floating point immediate expression operand.
2112 ARM64AsmParser::OperandMatchResultTy
2113 ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
2116 if (Parser.getTok().isNot(AsmToken::Hash))
2117 return MatchOperand_NoMatch;
2118 Parser.Lex(); // Eat the '#'.
2120 // Handle negation, as that still comes through as a separate token.
2121 bool isNegative = false;
2122 if (Parser.getTok().is(AsmToken::Minus)) {
2126 const AsmToken &Tok = Parser.getTok();
2127 if (Tok.is(AsmToken::Real)) {
2128 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2129 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2130 // If we had a '-' in front, toggle the sign bit.
2131 IntVal ^= (uint64_t)isNegative << 63;
2132 int Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2133 Parser.Lex(); // Eat the token.
2134 // Check for out of range values. As an exception, we let Zero through,
2135 // as we handle that special case in post-processing before matching in
2136 // order to use the zero register for it.
2137 if (Val == -1 && !RealVal.isZero()) {
2138 TokError("floating point value out of range");
2139 return MatchOperand_ParseFail;
2141 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2142 return MatchOperand_Success;
2144 if (Tok.is(AsmToken::Integer)) {
2146 if (!isNegative && Tok.getString().startswith("0x")) {
2147 Val = Tok.getIntVal();
2148 if (Val > 255 || Val < 0) {
2149 TokError("encoded floating point value out of range");
2150 return MatchOperand_ParseFail;
2153 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
2154 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
2155 // If we had a '-' in front, toggle the sign bit.
2156 IntVal ^= (uint64_t)isNegative << 63;
2157 Val = ARM64_AM::getFP64Imm(APInt(64, IntVal));
2159 Parser.Lex(); // Eat the token.
2160 Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
2161 return MatchOperand_Success;
2164 TokError("invalid floating point immediate");
2165 return MatchOperand_ParseFail;
2168 /// parseCondCodeString - Parse a Condition Code string.
2169 unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
2170 unsigned CC = StringSwitch<unsigned>(Cond.lower())
2171 .Case("eq", ARM64CC::EQ)
2172 .Case("ne", ARM64CC::NE)
2173 .Case("cs", ARM64CC::CS)
2174 .Case("hs", ARM64CC::CS)
2175 .Case("cc", ARM64CC::CC)
2176 .Case("lo", ARM64CC::CC)
2177 .Case("mi", ARM64CC::MI)
2178 .Case("pl", ARM64CC::PL)
2179 .Case("vs", ARM64CC::VS)
2180 .Case("vc", ARM64CC::VC)
2181 .Case("hi", ARM64CC::HI)
2182 .Case("ls", ARM64CC::LS)
2183 .Case("ge", ARM64CC::GE)
2184 .Case("lt", ARM64CC::LT)
2185 .Case("gt", ARM64CC::GT)
2186 .Case("le", ARM64CC::LE)
2187 .Case("al", ARM64CC::AL)
2188 .Case("nv", ARM64CC::NV)
2189 .Default(ARM64CC::Invalid);
2193 /// parseCondCode - Parse a Condition Code operand.
2194 bool ARM64AsmParser::parseCondCode(OperandVector &Operands,
2195 bool invertCondCode) {
2197 const AsmToken &Tok = Parser.getTok();
2198 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2200 StringRef Cond = Tok.getString();
2201 unsigned CC = parseCondCodeString(Cond);
2202 if (CC == ARM64CC::Invalid)
2203 return TokError("invalid condition code");
2204 Parser.Lex(); // Eat identifier token.
2207 CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
2209 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
2211 ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
2215 /// ParseOptionalShift - Some operands take an optional shift argument. Parse
2216 /// them if present.
2217 bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
2218 const AsmToken &Tok = Parser.getTok();
2219 ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
2220 .Case("lsl", ARM64_AM::LSL)
2221 .Case("lsr", ARM64_AM::LSR)
2222 .Case("asr", ARM64_AM::ASR)
2223 .Case("ror", ARM64_AM::ROR)
2224 .Case("msl", ARM64_AM::MSL)
2225 .Case("LSL", ARM64_AM::LSL)
2226 .Case("LSR", ARM64_AM::LSR)
2227 .Case("ASR", ARM64_AM::ASR)
2228 .Case("ROR", ARM64_AM::ROR)
2229 .Case("MSL", ARM64_AM::MSL)
2230 .Default(ARM64_AM::InvalidShift);
2231 if (ShOp == ARM64_AM::InvalidShift)
2234 SMLoc S = Tok.getLoc();
2237 // We expect a number here.
2238 if (getLexer().isNot(AsmToken::Hash))
2239 return TokError("immediate value expected for shifter operand");
2240 Parser.Lex(); // Eat the '#'.
2242 SMLoc ExprLoc = getLoc();
2243 const MCExpr *ImmVal;
2244 if (getParser().parseExpression(ImmVal))
2247 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2249 return TokError("immediate value expected for shifter operand");
2251 if ((MCE->getValue() & 0x3f) != MCE->getValue())
2252 return Error(ExprLoc, "immediate value too large for shifter operand");
2254 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2256 ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
2260 /// parseOptionalExtend - Some operands take an optional extend argument. Parse
2261 /// them if present.
2262 bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
2263 const AsmToken &Tok = Parser.getTok();
2264 ARM64_AM::ExtendType ExtOp =
2265 StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2266 .Case("uxtb", ARM64_AM::UXTB)
2267 .Case("uxth", ARM64_AM::UXTH)
2268 .Case("uxtw", ARM64_AM::UXTW)
2269 .Case("uxtx", ARM64_AM::UXTX)
2270 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2271 .Case("sxtb", ARM64_AM::SXTB)
2272 .Case("sxth", ARM64_AM::SXTH)
2273 .Case("sxtw", ARM64_AM::SXTW)
2274 .Case("sxtx", ARM64_AM::SXTX)
2275 .Case("UXTB", ARM64_AM::UXTB)
2276 .Case("UXTH", ARM64_AM::UXTH)
2277 .Case("UXTW", ARM64_AM::UXTW)
2278 .Case("UXTX", ARM64_AM::UXTX)
2279 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2280 .Case("SXTB", ARM64_AM::SXTB)
2281 .Case("SXTH", ARM64_AM::SXTH)
2282 .Case("SXTW", ARM64_AM::SXTW)
2283 .Case("SXTX", ARM64_AM::SXTX)
2284 .Default(ARM64_AM::InvalidExtend);
2285 if (ExtOp == ARM64_AM::InvalidExtend)
2288 SMLoc S = Tok.getLoc();
2291 if (getLexer().is(AsmToken::EndOfStatement) ||
2292 getLexer().is(AsmToken::Comma)) {
2293 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2295 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2299 if (getLexer().isNot(AsmToken::Hash)) {
2300 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2302 ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
2306 Parser.Lex(); // Eat the '#'.
2308 const MCExpr *ImmVal;
2309 if (getParser().parseExpression(ImmVal))
2312 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2314 return TokError("immediate value expected for extend operand");
2316 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
2318 ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
2322 /// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
2323 /// the SYS instruction. Parse them specially so that we create a SYS MCInst.
2324 bool ARM64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
2325 OperandVector &Operands) {
2326 if (Name.find('.') != StringRef::npos)
2327 return TokError("invalid operand");
2331 ARM64Operand::CreateToken("sys", false, NameLoc, getContext()));
2333 const AsmToken &Tok = Parser.getTok();
2334 StringRef Op = Tok.getString();
2335 SMLoc S = Tok.getLoc();
2337 const MCExpr *Expr = 0;
2339 #define SYS_ALIAS(op1, Cn, Cm, op2) \
2341 Expr = MCConstantExpr::Create(op1, getContext()); \
2342 Operands.push_back( \
2343 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2344 Operands.push_back( \
2345 ARM64Operand::CreateSysCR(Cn, S, getLoc(), getContext())); \
2346 Operands.push_back( \
2347 ARM64Operand::CreateSysCR(Cm, S, getLoc(), getContext())); \
2348 Expr = MCConstantExpr::Create(op2, getContext()); \
2349 Operands.push_back( \
2350 ARM64Operand::CreateImm(Expr, S, getLoc(), getContext())); \
2353 if (Mnemonic == "ic") {
2354 if (!Op.compare_lower("ialluis")) {
2355 // SYS #0, C7, C1, #0
2356 SYS_ALIAS(0, 7, 1, 0);
2357 } else if (!Op.compare_lower("iallu")) {
2358 // SYS #0, C7, C5, #0
2359 SYS_ALIAS(0, 7, 5, 0);
2360 } else if (!Op.compare_lower("ivau")) {
2361 // SYS #3, C7, C5, #1
2362 SYS_ALIAS(3, 7, 5, 1);
2364 return TokError("invalid operand for IC instruction");
2366 } else if (Mnemonic == "dc") {
2367 if (!Op.compare_lower("zva")) {
2368 // SYS #3, C7, C4, #1
2369 SYS_ALIAS(3, 7, 4, 1);
2370 } else if (!Op.compare_lower("ivac")) {
2371 // SYS #3, C7, C6, #1
2372 SYS_ALIAS(0, 7, 6, 1);
2373 } else if (!Op.compare_lower("isw")) {
2374 // SYS #0, C7, C6, #2
2375 SYS_ALIAS(0, 7, 6, 2);
2376 } else if (!Op.compare_lower("cvac")) {
2377 // SYS #3, C7, C10, #1
2378 SYS_ALIAS(3, 7, 10, 1);
2379 } else if (!Op.compare_lower("csw")) {
2380 // SYS #0, C7, C10, #2
2381 SYS_ALIAS(0, 7, 10, 2);
2382 } else if (!Op.compare_lower("cvau")) {
2383 // SYS #3, C7, C11, #1
2384 SYS_ALIAS(3, 7, 11, 1);
2385 } else if (!Op.compare_lower("civac")) {
2386 // SYS #3, C7, C14, #1
2387 SYS_ALIAS(3, 7, 14, 1);
2388 } else if (!Op.compare_lower("cisw")) {
2389 // SYS #0, C7, C14, #2
2390 SYS_ALIAS(0, 7, 14, 2);
2392 return TokError("invalid operand for DC instruction");
2394 } else if (Mnemonic == "at") {
2395 if (!Op.compare_lower("s1e1r")) {
2396 // SYS #0, C7, C8, #0
2397 SYS_ALIAS(0, 7, 8, 0);
2398 } else if (!Op.compare_lower("s1e2r")) {
2399 // SYS #4, C7, C8, #0
2400 SYS_ALIAS(4, 7, 8, 0);
2401 } else if (!Op.compare_lower("s1e3r")) {
2402 // SYS #6, C7, C8, #0
2403 SYS_ALIAS(6, 7, 8, 0);
2404 } else if (!Op.compare_lower("s1e1w")) {
2405 // SYS #0, C7, C8, #1
2406 SYS_ALIAS(0, 7, 8, 1);
2407 } else if (!Op.compare_lower("s1e2w")) {
2408 // SYS #4, C7, C8, #1
2409 SYS_ALIAS(4, 7, 8, 1);
2410 } else if (!Op.compare_lower("s1e3w")) {
2411 // SYS #6, C7, C8, #1
2412 SYS_ALIAS(6, 7, 8, 1);
2413 } else if (!Op.compare_lower("s1e0r")) {
2414 // SYS #0, C7, C8, #3
2415 SYS_ALIAS(0, 7, 8, 2);
2416 } else if (!Op.compare_lower("s1e0w")) {
2417 // SYS #0, C7, C8, #3
2418 SYS_ALIAS(0, 7, 8, 3);
2419 } else if (!Op.compare_lower("s12e1r")) {
2420 // SYS #4, C7, C8, #4
2421 SYS_ALIAS(4, 7, 8, 4);
2422 } else if (!Op.compare_lower("s12e1w")) {
2423 // SYS #4, C7, C8, #5
2424 SYS_ALIAS(4, 7, 8, 5);
2425 } else if (!Op.compare_lower("s12e0r")) {
2426 // SYS #4, C7, C8, #6
2427 SYS_ALIAS(4, 7, 8, 6);
2428 } else if (!Op.compare_lower("s12e0w")) {
2429 // SYS #4, C7, C8, #7
2430 SYS_ALIAS(4, 7, 8, 7);
2432 return TokError("invalid operand for AT instruction");
2434 } else if (Mnemonic == "tlbi") {
2435 if (!Op.compare_lower("vmalle1is")) {
2436 // SYS #0, C8, C3, #0
2437 SYS_ALIAS(0, 8, 3, 0);
2438 } else if (!Op.compare_lower("alle2is")) {
2439 // SYS #4, C8, C3, #0
2440 SYS_ALIAS(4, 8, 3, 0);
2441 } else if (!Op.compare_lower("alle3is")) {
2442 // SYS #6, C8, C3, #0
2443 SYS_ALIAS(6, 8, 3, 0);
2444 } else if (!Op.compare_lower("vae1is")) {
2445 // SYS #0, C8, C3, #1
2446 SYS_ALIAS(0, 8, 3, 1);
2447 } else if (!Op.compare_lower("vae2is")) {
2448 // SYS #4, C8, C3, #1
2449 SYS_ALIAS(4, 8, 3, 1);
2450 } else if (!Op.compare_lower("vae3is")) {
2451 // SYS #6, C8, C3, #1
2452 SYS_ALIAS(6, 8, 3, 1);
2453 } else if (!Op.compare_lower("aside1is")) {
2454 // SYS #0, C8, C3, #2
2455 SYS_ALIAS(0, 8, 3, 2);
2456 } else if (!Op.compare_lower("vaae1is")) {
2457 // SYS #0, C8, C3, #3
2458 SYS_ALIAS(0, 8, 3, 3);
2459 } else if (!Op.compare_lower("alle1is")) {
2460 // SYS #4, C8, C3, #4
2461 SYS_ALIAS(4, 8, 3, 4);
2462 } else if (!Op.compare_lower("vale1is")) {
2463 // SYS #0, C8, C3, #5
2464 SYS_ALIAS(0, 8, 3, 5);
2465 } else if (!Op.compare_lower("vaale1is")) {
2466 // SYS #0, C8, C3, #7
2467 SYS_ALIAS(0, 8, 3, 7);
2468 } else if (!Op.compare_lower("vmalle1")) {
2469 // SYS #0, C8, C7, #0
2470 SYS_ALIAS(0, 8, 7, 0);
2471 } else if (!Op.compare_lower("alle2")) {
2472 // SYS #4, C8, C7, #0
2473 SYS_ALIAS(4, 8, 7, 0);
2474 } else if (!Op.compare_lower("vale2is")) {
2475 // SYS #4, C8, C3, #5
2476 SYS_ALIAS(4, 8, 3, 5);
2477 } else if (!Op.compare_lower("vale3is")) {
2478 // SYS #6, C8, C3, #5
2479 SYS_ALIAS(6, 8, 3, 5);
2480 } else if (!Op.compare_lower("alle3")) {
2481 // SYS #6, C8, C7, #0
2482 SYS_ALIAS(6, 8, 7, 0);
2483 } else if (!Op.compare_lower("vae1")) {
2484 // SYS #0, C8, C7, #1
2485 SYS_ALIAS(0, 8, 7, 1);
2486 } else if (!Op.compare_lower("vae2")) {
2487 // SYS #4, C8, C7, #1
2488 SYS_ALIAS(4, 8, 7, 1);
2489 } else if (!Op.compare_lower("vae3")) {
2490 // SYS #6, C8, C7, #1
2491 SYS_ALIAS(6, 8, 7, 1);
2492 } else if (!Op.compare_lower("aside1")) {
2493 // SYS #0, C8, C7, #2
2494 SYS_ALIAS(0, 8, 7, 2);
2495 } else if (!Op.compare_lower("vaae1")) {
2496 // SYS #0, C8, C7, #3
2497 SYS_ALIAS(0, 8, 7, 3);
2498 } else if (!Op.compare_lower("alle1")) {
2499 // SYS #4, C8, C7, #4
2500 SYS_ALIAS(4, 8, 7, 4);
2501 } else if (!Op.compare_lower("vale1")) {
2502 // SYS #0, C8, C7, #5
2503 SYS_ALIAS(0, 8, 7, 5);
2504 } else if (!Op.compare_lower("vale2")) {
2505 // SYS #4, C8, C7, #5
2506 SYS_ALIAS(4, 8, 7, 5);
2507 } else if (!Op.compare_lower("vale3")) {
2508 // SYS #6, C8, C7, #5
2509 SYS_ALIAS(6, 8, 7, 5);
2510 } else if (!Op.compare_lower("vaale1")) {
2511 // SYS #0, C8, C7, #7
2512 SYS_ALIAS(0, 8, 7, 7);
2513 } else if (!Op.compare_lower("ipas2e1")) {
2514 // SYS #4, C8, C4, #1
2515 SYS_ALIAS(4, 8, 4, 1);
2516 } else if (!Op.compare_lower("ipas2le1")) {
2517 // SYS #4, C8, C4, #5
2518 SYS_ALIAS(4, 8, 4, 5);
2519 } else if (!Op.compare_lower("ipas2e1is")) {
2520 // SYS #4, C8, C4, #1
2521 SYS_ALIAS(4, 8, 0, 1);
2522 } else if (!Op.compare_lower("ipas2le1is")) {
2523 // SYS #4, C8, C4, #5
2524 SYS_ALIAS(4, 8, 0, 5);
2525 } else if (!Op.compare_lower("vmalls12e1")) {
2526 // SYS #4, C8, C7, #6
2527 SYS_ALIAS(4, 8, 7, 6);
2528 } else if (!Op.compare_lower("vmalls12e1is")) {
2529 // SYS #4, C8, C3, #6
2530 SYS_ALIAS(4, 8, 3, 6);
2532 return TokError("invalid operand for TLBI instruction");
2538 Parser.Lex(); // Eat operand.
2540 bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
2541 bool HasRegister = false;
2543 // Check for the optional register operand.
2544 if (getLexer().is(AsmToken::Comma)) {
2545 Parser.Lex(); // Eat comma.
2547 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
2548 return TokError("expected register operand");
2553 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2554 Parser.eatToEndOfStatement();
2555 return TokError("unexpected token in argument list");
2558 if (ExpectRegister && !HasRegister) {
2559 return TokError("specified " + Mnemonic + " op requires a register");
2561 else if (!ExpectRegister && HasRegister) {
2562 return TokError("specified " + Mnemonic + " op does not use a register");
2565 Parser.Lex(); // Consume the EndOfStatement
2569 ARM64AsmParser::OperandMatchResultTy
2570 ARM64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
2571 const AsmToken &Tok = Parser.getTok();
2573 // Can be either a #imm style literal or an option name
2574 if (Tok.is(AsmToken::Hash)) {
2575 // Immediate operand.
2576 Parser.Lex(); // Eat the '#'
2577 const MCExpr *ImmVal;
2578 SMLoc ExprLoc = getLoc();
2579 if (getParser().parseExpression(ImmVal))
2580 return MatchOperand_ParseFail;
2581 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2583 Error(ExprLoc, "immediate value expected for barrier operand");
2584 return MatchOperand_ParseFail;
2586 if (MCE->getValue() < 0 || MCE->getValue() > 15) {
2587 Error(ExprLoc, "barrier operand out of range");
2588 return MatchOperand_ParseFail;
2591 ARM64Operand::CreateBarrier(MCE->getValue(), ExprLoc, getContext()));
2592 return MatchOperand_Success;
2595 if (Tok.isNot(AsmToken::Identifier)) {
2596 TokError("invalid operand for instruction");
2597 return MatchOperand_ParseFail;
2601 unsigned Opt = ARM64DB::DBarrierMapper().fromString(Tok.getString(), Valid);
2603 TokError("invalid barrier option name");
2604 return MatchOperand_ParseFail;
2607 // The only valid named option for ISB is 'sy'
2608 if (Mnemonic == "isb" && Opt != ARM64DB::SY) {
2609 TokError("'sy' or #imm operand expected");
2610 return MatchOperand_ParseFail;
2613 Operands.push_back(ARM64Operand::CreateBarrier(Opt, getLoc(), getContext()));
2614 Parser.Lex(); // Consume the option
2616 return MatchOperand_Success;
2619 ARM64AsmParser::OperandMatchResultTy
2620 ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
2621 const AsmToken &Tok = Parser.getTok();
2623 if (Tok.isNot(AsmToken::Identifier))
2624 return MatchOperand_NoMatch;
2626 Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
2628 Parser.Lex(); // Eat identifier
2630 return MatchOperand_Success;
2633 /// tryParseVectorRegister - Parse a vector register operand.
2634 bool ARM64AsmParser::tryParseVectorRegister(OperandVector &Operands) {
2635 if (Parser.getTok().isNot(AsmToken::Identifier))
2639 // Check for a vector register specifier first.
2641 int64_t Reg = tryMatchVectorRegister(Kind, false);
2645 ARM64Operand::CreateReg(Reg, true, S, getLoc(), getContext()));
2646 // If there was an explicit qualifier, that goes on as a literal text
2649 Operands.push_back(ARM64Operand::CreateToken(Kind, false, S, getContext()));
2651 // If there is an index specifier following the register, parse that too.
2652 if (Parser.getTok().is(AsmToken::LBrac)) {
2653 SMLoc SIdx = getLoc();
2654 Parser.Lex(); // Eat left bracket token.
2656 const MCExpr *ImmVal;
2657 if (getParser().parseExpression(ImmVal))
2659 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2661 TokError("immediate value expected for vector index");
2666 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2667 Error(E, "']' expected");
2671 Parser.Lex(); // Eat right bracket token.
2673 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
2680 /// parseRegister - Parse a non-vector register operand.
2681 bool ARM64AsmParser::parseRegister(OperandVector &Operands) {
2683 // Try for a vector register.
2684 if (!tryParseVectorRegister(Operands))
2687 // Try for a scalar register.
2688 int64_t Reg = tryParseRegister();
2692 ARM64Operand::CreateReg(Reg, false, S, getLoc(), getContext()));
2694 // A small number of instructions (FMOVXDhighr, for example) have "[1]"
2695 // as a string token in the instruction itself.
2696 if (getLexer().getKind() == AsmToken::LBrac) {
2697 SMLoc LBracS = getLoc();
2699 const AsmToken &Tok = Parser.getTok();
2700 if (Tok.is(AsmToken::Integer)) {
2701 SMLoc IntS = getLoc();
2702 int64_t Val = Tok.getIntVal();
2705 if (getLexer().getKind() == AsmToken::RBrac) {
2706 SMLoc RBracS = getLoc();
2709 ARM64Operand::CreateToken("[", false, LBracS, getContext()));
2711 ARM64Operand::CreateToken("1", false, IntS, getContext()));
2713 ARM64Operand::CreateToken("]", false, RBracS, getContext()));
2723 /// tryParseNoIndexMemory - Custom parser method for memory operands that
2724 /// do not allow base regisrer writeback modes,
2725 /// or those that handle writeback separately from
2726 /// the memory operand (like the AdvSIMD ldX/stX
2728 ARM64AsmParser::OperandMatchResultTy
2729 ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
2730 if (Parser.getTok().isNot(AsmToken::LBrac))
2731 return MatchOperand_NoMatch;
2733 Parser.Lex(); // Eat left bracket token.
2735 const AsmToken &BaseRegTok = Parser.getTok();
2736 if (BaseRegTok.isNot(AsmToken::Identifier)) {
2737 Error(BaseRegTok.getLoc(), "register expected");
2738 return MatchOperand_ParseFail;
2741 int64_t Reg = tryParseRegister();
2743 Error(BaseRegTok.getLoc(), "register expected");
2744 return MatchOperand_ParseFail;
2748 if (Parser.getTok().isNot(AsmToken::RBrac)) {
2749 Error(E, "']' expected");
2750 return MatchOperand_ParseFail;
2753 Parser.Lex(); // Eat right bracket token.
2755 Operands.push_back(ARM64Operand::CreateMem(Reg, 0, S, E, E, getContext()));
2756 return MatchOperand_Success;
2759 /// parseMemory - Parse a memory operand for a basic load/store instruction.
2760 bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
2761 assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
2763 Parser.Lex(); // Eat left bracket token.
2765 const AsmToken &BaseRegTok = Parser.getTok();
2766 if (BaseRegTok.isNot(AsmToken::Identifier))
2767 return Error(BaseRegTok.getLoc(), "register expected");
2769 int64_t Reg = tryParseRegister();
2771 return Error(BaseRegTok.getLoc(), "register expected");
2773 // If there is an offset expression, parse it.
2774 const MCExpr *OffsetExpr = 0;
2776 if (Parser.getTok().is(AsmToken::Comma)) {
2777 Parser.Lex(); // Eat the comma.
2778 OffsetLoc = getLoc();
2781 const AsmToken &OffsetRegTok = Parser.getTok();
2782 int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
2784 // Default shift is LSL, with an omitted shift. We use the third bit of
2785 // the extend value to indicate presence/omission of the immediate offset.
2786 ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
2787 int64_t ShiftVal = 0;
2788 bool ExplicitShift = false;
2790 if (Parser.getTok().is(AsmToken::Comma)) {
2791 // Embedded extend operand.
2792 Parser.Lex(); // Eat the comma
2794 SMLoc ExtLoc = getLoc();
2795 const AsmToken &Tok = Parser.getTok();
2796 ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
2797 .Case("uxtw", ARM64_AM::UXTW)
2798 .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
2799 .Case("sxtw", ARM64_AM::SXTW)
2800 .Case("sxtx", ARM64_AM::SXTX)
2801 .Case("UXTW", ARM64_AM::UXTW)
2802 .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
2803 .Case("SXTW", ARM64_AM::SXTW)
2804 .Case("SXTX", ARM64_AM::SXTX)
2805 .Default(ARM64_AM::InvalidExtend);
2806 if (ExtOp == ARM64_AM::InvalidExtend)
2807 return Error(ExtLoc, "expected valid extend operation");
2809 Parser.Lex(); // Eat the extend op.
2811 if (getLexer().is(AsmToken::RBrac)) {
2812 // No immediate operand.
2813 if (ExtOp == ARM64_AM::UXTX)
2814 return Error(ExtLoc, "LSL extend requires immediate operand");
2815 } else if (getLexer().is(AsmToken::Hash)) {
2816 // Immediate operand.
2817 Parser.Lex(); // Eat the '#'
2818 const MCExpr *ImmVal;
2819 SMLoc ExprLoc = getLoc();
2820 if (getParser().parseExpression(ImmVal))
2822 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2824 return TokError("immediate value expected for extend operand");
2826 ExplicitShift = true;
2827 ShiftVal = MCE->getValue();
2828 if (ShiftVal < 0 || ShiftVal > 4)
2829 return Error(ExprLoc, "immediate operand out of range");
2831 return Error(getLoc(), "expected immediate operand");
2834 if (Parser.getTok().isNot(AsmToken::RBrac))
2835 return Error(getLoc(), "']' expected");
2837 Parser.Lex(); // Eat right bracket token.
2840 Operands.push_back(ARM64Operand::CreateRegOffsetMem(
2841 Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
2844 // Immediate expressions.
2845 } else if (Parser.getTok().is(AsmToken::Hash)) {
2846 Parser.Lex(); // Eat hash token.
2848 if (parseSymbolicImmVal(OffsetExpr))
2851 // FIXME: We really should make sure that we're dealing with a LDR/STR
2852 // instruction that can legally have a symbolic expression here.
2853 // Symbol reference.
2854 if (Parser.getTok().isNot(AsmToken::Identifier) &&
2855 Parser.getTok().isNot(AsmToken::String))
2856 return Error(getLoc(), "identifier or immediate expression expected");
2857 if (getParser().parseExpression(OffsetExpr))
2859 // If this is a plain ref, Make sure a legal variant kind was specified.
2860 // Otherwise, it's a more complicated expression and we have to just
2861 // assume it's OK and let the relocation stuff puke if it's not.
2862 ARM64MCExpr::VariantKind ELFRefKind;
2863 MCSymbolRefExpr::VariantKind DarwinRefKind;
2864 const MCConstantExpr *Addend;
2865 if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
2867 assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
2868 "ELF symbol modifiers not supported here yet");
2870 switch (DarwinRefKind) {
2872 return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
2873 case MCSymbolRefExpr::VK_GOTPAGEOFF:
2874 case MCSymbolRefExpr::VK_PAGEOFF:
2875 case MCSymbolRefExpr::VK_TLVPPAGEOFF:
2876 // These are what we're expecting.
2884 if (Parser.getTok().isNot(AsmToken::RBrac))
2885 return Error(E, "']' expected");
2887 Parser.Lex(); // Eat right bracket token.
2889 // Create the memory operand.
2891 ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
2893 // Check for a '!', indicating pre-indexed addressing with writeback.
2894 if (Parser.getTok().is(AsmToken::Exclaim)) {
2895 // There needs to have been an immediate or wback doesn't make sense.
2897 return Error(E, "missing offset for pre-indexed addressing");
2898 // Pre-indexed with writeback must have a constant expression for the
2899 // offset. FIXME: Theoretically, we'd like to allow fixups so long
2900 // as they don't require a relocation.
2901 if (!isa<MCConstantExpr>(OffsetExpr))
2902 return Error(OffsetLoc, "constant immediate expression expected");
2904 // Create the Token operand for the '!'.
2905 Operands.push_back(ARM64Operand::CreateToken(
2906 "!", false, Parser.getTok().getLoc(), getContext()));
2907 Parser.Lex(); // Eat the '!' token.
2913 bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
2914 bool HasELFModifier = false;
2915 ARM64MCExpr::VariantKind RefKind;
2917 if (Parser.getTok().is(AsmToken::Colon)) {
2918 Parser.Lex(); // Eat ':"
2919 HasELFModifier = true;
2921 if (Parser.getTok().isNot(AsmToken::Identifier)) {
2922 Error(Parser.getTok().getLoc(),
2923 "expect relocation specifier in operand after ':'");
2927 std::string LowerCase = Parser.getTok().getIdentifier().lower();
2928 RefKind = StringSwitch<ARM64MCExpr::VariantKind>(LowerCase)
2929 .Case("lo12", ARM64MCExpr::VK_LO12)
2930 .Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
2931 .Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
2932 .Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
2933 .Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
2934 .Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
2935 .Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
2936 .Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
2937 .Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
2938 .Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
2939 .Case("dtprel_g1_nc", ARM64MCExpr::VK_DTPREL_G1_NC)
2940 .Case("dtprel_g0", ARM64MCExpr::VK_DTPREL_G0)
2941 .Case("dtprel_g0_nc", ARM64MCExpr::VK_DTPREL_G0_NC)
2942 .Case("dtprel_lo12", ARM64MCExpr::VK_DTPREL_LO12)
2943 .Case("dtprel_lo12_nc", ARM64MCExpr::VK_DTPREL_LO12_NC)
2944 .Case("tprel_g2", ARM64MCExpr::VK_TPREL_G2)
2945 .Case("tprel_g1", ARM64MCExpr::VK_TPREL_G1)
2946 .Case("tprel_g1_nc", ARM64MCExpr::VK_TPREL_G1_NC)
2947 .Case("tprel_g0", ARM64MCExpr::VK_TPREL_G0)
2948 .Case("tprel_g0_nc", ARM64MCExpr::VK_TPREL_G0_NC)
2949 .Case("tprel_lo12", ARM64MCExpr::VK_TPREL_LO12)
2950 .Case("tprel_lo12_nc", ARM64MCExpr::VK_TPREL_LO12_NC)
2951 .Case("tlsdesc_lo12", ARM64MCExpr::VK_TLSDESC_LO12)
2952 .Case("got", ARM64MCExpr::VK_GOT_PAGE)
2953 .Case("got_lo12", ARM64MCExpr::VK_GOT_LO12)
2954 .Case("gottprel", ARM64MCExpr::VK_GOTTPREL_PAGE)
2955 .Case("gottprel_lo12", ARM64MCExpr::VK_GOTTPREL_LO12_NC)
2956 .Case("gottprel_g1", ARM64MCExpr::VK_GOTTPREL_G1)
2957 .Case("gottprel_g0_nc", ARM64MCExpr::VK_GOTTPREL_G0_NC)
2958 .Case("tlsdesc", ARM64MCExpr::VK_TLSDESC_PAGE)
2959 .Default(ARM64MCExpr::VK_INVALID);
2961 if (RefKind == ARM64MCExpr::VK_INVALID) {
2962 Error(Parser.getTok().getLoc(),
2963 "expect relocation specifier in operand after ':'");
2967 Parser.Lex(); // Eat identifier
2969 if (Parser.getTok().isNot(AsmToken::Colon)) {
2970 Error(Parser.getTok().getLoc(), "expect ':' after relocation specifier");
2973 Parser.Lex(); // Eat ':'
2976 if (getParser().parseExpression(ImmVal))
2980 ImmVal = ARM64MCExpr::Create(ImmVal, RefKind, getContext());
2985 /// parseVectorList - Parse a vector list operand for AdvSIMD instructions.
2986 bool ARM64AsmParser::parseVectorList(OperandVector &Operands) {
2987 assert(Parser.getTok().is(AsmToken::LCurly) && "Token is not a Left Bracket");
2989 Parser.Lex(); // Eat left bracket token.
2991 int64_t FirstReg = tryMatchVectorRegister(Kind, true);
2994 int64_t PrevReg = FirstReg;
2997 if (Parser.getTok().is(AsmToken::Minus)) {
2998 Parser.Lex(); // Eat the minus.
3000 SMLoc Loc = getLoc();
3002 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3005 // Any Kind suffices must match on all regs in the list.
3006 if (Kind != NextKind)
3007 return Error(Loc, "mismatched register size suffix");
3009 unsigned Space = (PrevReg < Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3011 if (Space == 0 || Space > 3) {
3012 return Error(Loc, "invalid number of vectors");
3018 while (Parser.getTok().is(AsmToken::Comma)) {
3019 Parser.Lex(); // Eat the comma token.
3021 SMLoc Loc = getLoc();
3023 int64_t Reg = tryMatchVectorRegister(NextKind, true);
3026 // Any Kind suffices must match on all regs in the list.
3027 if (Kind != NextKind)
3028 return Error(Loc, "mismatched register size suffix");
3030 // Registers must be incremental (with wraparound at 31)
3031 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3032 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32)
3033 return Error(Loc, "registers must be sequential");
3040 if (Parser.getTok().is(AsmToken::EndOfStatement))
3041 Error(getLoc(), "'}' expected");
3042 Parser.Lex(); // Eat the '}' token.
3044 unsigned NumElements = 0;
3045 char ElementKind = 0;
3047 parseValidVectorKind(Kind, NumElements, ElementKind);
3049 Operands.push_back(ARM64Operand::CreateVectorList(
3050 FirstReg, Count, NumElements, ElementKind, S, getLoc(), getContext()));
3052 // If there is an index specifier following the list, parse that too.
3053 if (Parser.getTok().is(AsmToken::LBrac)) {
3054 SMLoc SIdx = getLoc();
3055 Parser.Lex(); // Eat left bracket token.
3057 const MCExpr *ImmVal;
3058 if (getParser().parseExpression(ImmVal))
3060 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3062 TokError("immediate value expected for vector index");
3067 if (Parser.getTok().isNot(AsmToken::RBrac)) {
3068 Error(E, "']' expected");
3072 Parser.Lex(); // Eat right bracket token.
3074 Operands.push_back(ARM64Operand::CreateVectorIndex(MCE->getValue(), SIdx, E,
3080 /// parseOperand - Parse a arm instruction operand. For now this parses the
3081 /// operand regardless of the mnemonic.
3082 bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
3083 bool invertCondCode) {
3084 // Check if the current operand has a custom associated parser, if so, try to
3085 // custom parse the operand, or fallback to the general approach.
3086 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3087 if (ResTy == MatchOperand_Success)
3089 // If there wasn't a custom match, try the generic matcher below. Otherwise,
3090 // there was a match, but an error occurred, in which case, just return that
3091 // the operand parsing failed.
3092 if (ResTy == MatchOperand_ParseFail)
3095 // Nothing custom, so do general case parsing.
3097 switch (getLexer().getKind()) {
3101 if (parseSymbolicImmVal(Expr))
3102 return Error(S, "invalid operand");
3104 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3105 Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
3108 case AsmToken::LBrac:
3109 return parseMemory(Operands);
3110 case AsmToken::LCurly:
3111 return parseVectorList(Operands);
3112 case AsmToken::Identifier: {
3113 // If we're expecting a Condition Code operand, then just parse that.
3115 return parseCondCode(Operands, invertCondCode);
3117 // If it's a register name, parse it.
3118 if (!parseRegister(Operands))
3121 // This could be an optional "shift" operand.
3122 if (!parseOptionalShift(Operands))
3125 // Or maybe it could be an optional "extend" operand.
3126 if (!parseOptionalExtend(Operands))
3129 // This was not a register so parse other operands that start with an
3130 // identifier (like labels) as expressions and create them as immediates.
3131 const MCExpr *IdVal;
3133 if (getParser().parseExpression(IdVal))
3136 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3137 Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
3140 case AsmToken::Hash: {
3141 // #42 -> immediate.
3145 // The only Real that should come through here is a literal #0.0 for
3146 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
3147 // so convert the value.
3148 const AsmToken &Tok = Parser.getTok();
3149 if (Tok.is(AsmToken::Real)) {
3150 APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3151 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3152 if (IntVal != 0 || (Mnemonic != "fcmp" && Mnemonic != "fcmpe"))
3153 return TokError("unexpected floating point literal");
3154 Parser.Lex(); // Eat the token.
3157 ARM64Operand::CreateToken("#0", false, S, getContext()));
3159 ARM64Operand::CreateToken(".0", false, S, getContext()));
3163 const MCExpr *ImmVal;
3164 if (parseSymbolicImmVal(ImmVal))
3167 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3168 Operands.push_back(ARM64Operand::CreateImm(ImmVal, S, E, getContext()));
3174 /// ParseInstruction - Parse an ARM64 instruction mnemonic followed by its
3176 bool ARM64AsmParser::ParseInstruction(ParseInstructionInfo &Info,
3177 StringRef Name, SMLoc NameLoc,
3178 OperandVector &Operands) {
3179 // Create the leading tokens for the mnemonic, split by '.' characters.
3180 size_t Start = 0, Next = Name.find('.');
3181 StringRef Head = Name.slice(Start, Next);
3183 // IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
3184 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
3185 return parseSysAlias(Head, NameLoc, Operands);
3188 ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
3191 // Handle condition codes for a branch mnemonic
3192 if (Head == "b" && Next != StringRef::npos) {
3194 Next = Name.find('.', Start + 1);
3195 Head = Name.slice(Start + 1, Next);
3197 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3198 (Head.data() - Name.data()));
3199 unsigned CC = parseCondCodeString(Head);
3200 if (CC == ARM64CC::Invalid)
3201 return Error(SuffixLoc, "invalid condition code");
3202 const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
3204 ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
3207 // Add the remaining tokens in the mnemonic.
3208 while (Next != StringRef::npos) {
3210 Next = Name.find('.', Start + 1);
3211 Head = Name.slice(Start, Next);
3212 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
3213 (Head.data() - Name.data()) + 1);
3215 ARM64Operand::CreateToken(Head, true, SuffixLoc, getContext()));
3218 // Conditional compare instructions have a Condition Code operand, which needs
3219 // to be parsed and an immediate operand created.
3220 bool condCodeFourthOperand =
3221 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
3222 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
3223 Head == "csinc" || Head == "csinv" || Head == "csneg");
3225 // These instructions are aliases to some of the conditional select
3226 // instructions. However, the condition code is inverted in the aliased
3229 // FIXME: Is this the correct way to handle these? Or should the parser
3230 // generate the aliased instructions directly?
3231 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
3232 bool condCodeThirdOperand =
3233 (Head == "cinc" || Head == "cinv" || Head == "cneg");
3235 // Read the remaining operands.
3236 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3237 // Read the first operand.
3238 if (parseOperand(Operands, false, false)) {
3239 Parser.eatToEndOfStatement();
3244 while (getLexer().is(AsmToken::Comma)) {
3245 Parser.Lex(); // Eat the comma.
3247 // Parse and remember the operand.
3248 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3249 (N == 3 && condCodeThirdOperand) ||
3250 (N == 2 && condCodeSecondOperand),
3251 condCodeSecondOperand || condCodeThirdOperand)) {
3252 Parser.eatToEndOfStatement();
3260 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3261 SMLoc Loc = Parser.getTok().getLoc();
3262 Parser.eatToEndOfStatement();
3263 return Error(Loc, "unexpected token in argument list");
3266 Parser.Lex(); // Consume the EndOfStatement
3270 /// isFPR32Register - Check if a register is in the FPR32 register class.
3271 /// (The parser does not have the target register info to check the register
3272 /// class directly.)
3273 static bool isFPR32Register(unsigned Reg) {
3274 using namespace ARM64;
3278 case S0: case S1: case S2: case S3: case S4: case S5: case S6:
3279 case S7: case S8: case S9: case S10: case S11: case S12: case S13:
3280 case S14: case S15: case S16: case S17: case S18: case S19: case S20:
3281 case S21: case S22: case S23: case S24: case S25: case S26: case S27:
3282 case S28: case S29: case S30: case S31:
3288 /// isGPR32Register - Check if a register is in the GPR32sp register class.
3289 /// (The parser does not have the target register info to check the register
3290 /// class directly.)
3291 static bool isGPR32Register(unsigned Reg) {
3292 using namespace ARM64;
3296 case W0: case W1: case W2: case W3: case W4: case W5: case W6:
3297 case W7: case W8: case W9: case W10: case W11: case W12: case W13:
3298 case W14: case W15: case W16: case W17: case W18: case W19: case W20:
3299 case W21: case W22: case W23: case W24: case W25: case W26: case W27:
3300 case W28: case W29: case W30: case WSP: case WZR:
3306 static bool isGPR64Reg(unsigned Reg) {
3307 using namespace ARM64;
3309 case X0: case X1: case X2: case X3: case X4: case X5: case X6:
3310 case X7: case X8: case X9: case X10: case X11: case X12: case X13:
3311 case X14: case X15: case X16: case X17: case X18: case X19: case X20:
3312 case X21: case X22: case X23: case X24: case X25: case X26: case X27:
3313 case X28: case FP: case LR: case SP: case XZR:
3321 // FIXME: This entire function is a giant hack to provide us with decent
3322 // operand range validation/diagnostics until TableGen/MC can be extended
3323 // to support autogeneration of this kind of validation.
3324 bool ARM64AsmParser::validateInstruction(MCInst &Inst,
3325 SmallVectorImpl<SMLoc> &Loc) {
3326 const MCRegisterInfo *RI = getContext().getRegisterInfo();
3327 // Check for indexed addressing modes w/ the base register being the
3328 // same as a destination/source register or pair load where
3329 // the Rt == Rt2. All of those are undefined behaviour.
3330 switch (Inst.getOpcode()) {
3331 case ARM64::LDPSWpre:
3332 case ARM64::LDPWpost:
3333 case ARM64::LDPWpre:
3334 case ARM64::LDPXpost:
3335 case ARM64::LDPXpre: {
3336 unsigned Rt = Inst.getOperand(0).getReg();
3337 unsigned Rt2 = Inst.getOperand(1).getReg();
3338 unsigned Rn = Inst.getOperand(2).getReg();
3339 if (RI->isSubRegisterEq(Rn, Rt))
3340 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
3341 "is also a destination");
3342 if (RI->isSubRegisterEq(Rn, Rt2))
3343 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
3344 "is also a destination");
3347 case ARM64::LDPDpost:
3348 case ARM64::LDPDpre:
3349 case ARM64::LDPQpost:
3350 case ARM64::LDPQpre:
3351 case ARM64::LDPSpost:
3352 case ARM64::LDPSpre:
3353 case ARM64::LDPSWpost:
3359 case ARM64::LDPXi: {
3360 unsigned Rt = Inst.getOperand(0).getReg();
3361 unsigned Rt2 = Inst.getOperand(1).getReg();
3363 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
3366 case ARM64::STPDpost:
3367 case ARM64::STPDpre:
3368 case ARM64::STPQpost:
3369 case ARM64::STPQpre:
3370 case ARM64::STPSpost:
3371 case ARM64::STPSpre:
3372 case ARM64::STPWpost:
3373 case ARM64::STPWpre:
3374 case ARM64::STPXpost:
3375 case ARM64::STPXpre: {
3376 unsigned Rt = Inst.getOperand(0).getReg();
3377 unsigned Rt2 = Inst.getOperand(1).getReg();
3378 unsigned Rn = Inst.getOperand(2).getReg();
3379 if (RI->isSubRegisterEq(Rn, Rt))
3380 return Error(Loc[0], "unpredictable STP instruction, writeback base "
3381 "is also a source");
3382 if (RI->isSubRegisterEq(Rn, Rt2))
3383 return Error(Loc[1], "unpredictable STP instruction, writeback base "
3384 "is also a source");
3387 case ARM64::LDRBBpre:
3388 case ARM64::LDRBpre:
3389 case ARM64::LDRHHpre:
3390 case ARM64::LDRHpre:
3391 case ARM64::LDRSBWpre:
3392 case ARM64::LDRSBXpre:
3393 case ARM64::LDRSHWpre:
3394 case ARM64::LDRSHXpre:
3395 case ARM64::LDRSWpre:
3396 case ARM64::LDRWpre:
3397 case ARM64::LDRXpre:
3398 case ARM64::LDRBBpost:
3399 case ARM64::LDRBpost:
3400 case ARM64::LDRHHpost:
3401 case ARM64::LDRHpost:
3402 case ARM64::LDRSBWpost:
3403 case ARM64::LDRSBXpost:
3404 case ARM64::LDRSHWpost:
3405 case ARM64::LDRSHXpost:
3406 case ARM64::LDRSWpost:
3407 case ARM64::LDRWpost:
3408 case ARM64::LDRXpost: {
3409 unsigned Rt = Inst.getOperand(0).getReg();
3410 unsigned Rn = Inst.getOperand(1).getReg();
3411 if (RI->isSubRegisterEq(Rn, Rt))
3412 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
3413 "is also a source");
3416 case ARM64::STRBBpost:
3417 case ARM64::STRBpost:
3418 case ARM64::STRHHpost:
3419 case ARM64::STRHpost:
3420 case ARM64::STRWpost:
3421 case ARM64::STRXpost:
3422 case ARM64::STRBBpre:
3423 case ARM64::STRBpre:
3424 case ARM64::STRHHpre:
3425 case ARM64::STRHpre:
3426 case ARM64::STRWpre:
3427 case ARM64::STRXpre: {
3428 unsigned Rt = Inst.getOperand(0).getReg();
3429 unsigned Rn = Inst.getOperand(1).getReg();
3430 if (RI->isSubRegisterEq(Rn, Rt))
3431 return Error(Loc[0], "unpredictable STR instruction, writeback base "
3432 "is also a source");
3437 // Now check immediate ranges. Separate from the above as there is overlap
3438 // in the instructions being checked and this keeps the nested conditionals
3440 switch (Inst.getOpcode()) {
3442 case ARM64::ANDSWrs:
3444 case ARM64::ORRWrs: {
3445 if (!Inst.getOperand(3).isImm())
3446 return Error(Loc[3], "immediate value expected");
3447 int64_t shifter = Inst.getOperand(3).getImm();
3448 ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
3449 if (ST == ARM64_AM::LSL && shifter > 31)
3450 return Error(Loc[3], "shift value out of range");
3453 case ARM64::ADDSWri:
3454 case ARM64::ADDSXri:
3457 case ARM64::SUBSWri:
3458 case ARM64::SUBSXri:
3460 case ARM64::SUBXri: {
3461 if (!Inst.getOperand(3).isImm())
3462 return Error(Loc[3], "immediate value expected");
3463 int64_t shifter = Inst.getOperand(3).getImm();
3464 if (shifter != 0 && shifter != 12)
3465 return Error(Loc[3], "shift value out of range");
3466 // The imm12 operand can be an expression. Validate that it's legit.
3467 // FIXME: We really, really want to allow arbitrary expressions here
3468 // and resolve the value and validate the result at fixup time, but
3469 // that's hard as we have long since lost any source information we
3470 // need to generate good diagnostics by that point.
3471 if (Inst.getOpcode() == ARM64::ADDXri && Inst.getOperand(2).isExpr()) {
3472 const MCExpr *Expr = Inst.getOperand(2).getExpr();
3473 ARM64MCExpr::VariantKind ELFRefKind;
3474 MCSymbolRefExpr::VariantKind DarwinRefKind;
3475 const MCConstantExpr *Addend;
3476 if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
3477 return Error(Loc[2], "invalid immediate expression");
3480 if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
3481 DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF ||
3482 ELFRefKind == ARM64MCExpr::VK_LO12 ||
3483 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
3484 ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
3485 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
3486 ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
3487 ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
3488 // Note that we don't range-check the addend. It's adjusted
3489 // modulo page size when converted, so there is no "out of range"
3490 // condition when using @pageoff. Any validity checking for the value
3491 // was done in the is*() predicate function.
3493 } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
3494 // @gotpageoff can only be used directly, not with an addend.
3498 // Otherwise, we're not sure, so don't allow it for now.
3499 return Error(Loc[2], "invalid immediate expression");
3502 // If it's anything but an immediate, it's not legit.
3503 if (!Inst.getOperand(2).isImm())
3504 return Error(Loc[2], "invalid immediate expression");
3505 int64_t imm = Inst.getOperand(2).getImm();
3506 if (imm > 4095 || imm < 0)
3507 return Error(Loc[2], "immediate value out of range");
3510 case ARM64::LDRBpre:
3511 case ARM64::LDRHpre:
3512 case ARM64::LDRSBWpre:
3513 case ARM64::LDRSBXpre:
3514 case ARM64::LDRSHWpre:
3515 case ARM64::LDRSHXpre:
3516 case ARM64::LDRWpre:
3517 case ARM64::LDRXpre:
3518 case ARM64::LDRSpre:
3519 case ARM64::LDRDpre:
3520 case ARM64::LDRQpre:
3521 case ARM64::STRBpre:
3522 case ARM64::STRHpre:
3523 case ARM64::STRWpre:
3524 case ARM64::STRXpre:
3525 case ARM64::STRSpre:
3526 case ARM64::STRDpre:
3527 case ARM64::STRQpre:
3528 case ARM64::LDRBpost:
3529 case ARM64::LDRHpost:
3530 case ARM64::LDRSBWpost:
3531 case ARM64::LDRSBXpost:
3532 case ARM64::LDRSHWpost:
3533 case ARM64::LDRSHXpost:
3534 case ARM64::LDRWpost:
3535 case ARM64::LDRXpost:
3536 case ARM64::LDRSpost:
3537 case ARM64::LDRDpost:
3538 case ARM64::LDRQpost:
3539 case ARM64::STRBpost:
3540 case ARM64::STRHpost:
3541 case ARM64::STRWpost:
3542 case ARM64::STRXpost:
3543 case ARM64::STRSpost:
3544 case ARM64::STRDpost:
3545 case ARM64::STRQpost:
3550 case ARM64::LDTRSHWi:
3551 case ARM64::LDTRSHXi:
3552 case ARM64::LDTRSBWi:
3553 case ARM64::LDTRSBXi:
3554 case ARM64::LDTRSWi:
3566 case ARM64::LDURSHWi:
3567 case ARM64::LDURSHXi:
3568 case ARM64::LDURSBWi:
3569 case ARM64::LDURSBXi:
3570 case ARM64::LDURSWi:
3578 case ARM64::STURBi: {
3579 // FIXME: Should accept expressions and error in fixup evaluation
3581 if (!Inst.getOperand(2).isImm())
3582 return Error(Loc[1], "immediate value expected");
3583 int64_t offset = Inst.getOperand(2).getImm();
3584 if (offset > 255 || offset < -256)
3585 return Error(Loc[1], "offset value out of range");
3590 case ARM64::LDRSWro:
3592 case ARM64::STRSro: {
3593 // FIXME: Should accept expressions and error in fixup evaluation
3595 if (!Inst.getOperand(3).isImm())
3596 return Error(Loc[1], "immediate value expected");
3597 int64_t shift = Inst.getOperand(3).getImm();
3598 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3599 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3600 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3601 return Error(Loc[1], "shift type invalid");
3610 case ARM64::STRQro: {
3611 // FIXME: Should accept expressions and error in fixup evaluation
3613 if (!Inst.getOperand(3).isImm())
3614 return Error(Loc[1], "immediate value expected");
3615 int64_t shift = Inst.getOperand(3).getImm();
3616 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3617 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3618 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3619 return Error(Loc[1], "shift type invalid");
3623 case ARM64::LDRHHro:
3624 case ARM64::LDRSHWro:
3625 case ARM64::LDRSHXro:
3627 case ARM64::STRHHro: {
3628 // FIXME: Should accept expressions and error in fixup evaluation
3630 if (!Inst.getOperand(3).isImm())
3631 return Error(Loc[1], "immediate value expected");
3632 int64_t shift = Inst.getOperand(3).getImm();
3633 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3634 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3635 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3636 return Error(Loc[1], "shift type invalid");
3640 case ARM64::LDRBBro:
3641 case ARM64::LDRSBWro:
3642 case ARM64::LDRSBXro:
3644 case ARM64::STRBBro: {
3645 // FIXME: Should accept expressions and error in fixup evaluation
3647 if (!Inst.getOperand(3).isImm())
3648 return Error(Loc[1], "immediate value expected");
3649 int64_t shift = Inst.getOperand(3).getImm();
3650 ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
3651 if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
3652 type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
3653 return Error(Loc[1], "shift type invalid");
3667 case ARM64::LDPWpre:
3668 case ARM64::LDPXpre:
3669 case ARM64::LDPSpre:
3670 case ARM64::LDPDpre:
3671 case ARM64::LDPQpre:
3672 case ARM64::LDPSWpre:
3673 case ARM64::STPWpre:
3674 case ARM64::STPXpre:
3675 case ARM64::STPSpre:
3676 case ARM64::STPDpre:
3677 case ARM64::STPQpre:
3678 case ARM64::LDPWpost:
3679 case ARM64::LDPXpost:
3680 case ARM64::LDPSpost:
3681 case ARM64::LDPDpost:
3682 case ARM64::LDPQpost:
3683 case ARM64::LDPSWpost:
3684 case ARM64::STPWpost:
3685 case ARM64::STPXpost:
3686 case ARM64::STPSpost:
3687 case ARM64::STPDpost:
3688 case ARM64::STPQpost:
3698 case ARM64::STNPQi: {
3699 // FIXME: Should accept expressions and error in fixup evaluation
3701 if (!Inst.getOperand(3).isImm())
3702 return Error(Loc[2], "immediate value expected");
3703 int64_t offset = Inst.getOperand(3).getImm();
3704 if (offset > 63 || offset < -64)
3705 return Error(Loc[2], "offset value out of range");
3713 static void rewriteMOV(ARM64AsmParser::OperandVector &Operands,
3714 StringRef mnemonic, uint64_t imm, unsigned shift,
3715 MCContext &Context) {
3716 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3717 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3719 ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
3721 const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
3722 Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
3723 Op2->getEndLoc(), Context);
3725 Operands.push_back(ARM64Operand::CreateShifter(
3726 ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
3731 bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
3733 case Match_MissingFeature:
3735 "instruction requires a CPU feature not currently enabled");
3736 case Match_InvalidOperand:
3737 return Error(Loc, "invalid operand for instruction");
3738 case Match_InvalidSuffix:
3739 return Error(Loc, "invalid type suffix for instruction");
3740 case Match_InvalidMemoryIndexedSImm9:
3741 return Error(Loc, "index must be an integer in range [-256,255].");
3742 case Match_InvalidMemoryIndexed32SImm7:
3743 return Error(Loc, "index must be a multiple of 4 in range [-256,252].");
3744 case Match_InvalidMemoryIndexed64SImm7:
3745 return Error(Loc, "index must be a multiple of 8 in range [-512,504].");
3746 case Match_InvalidMemoryIndexed128SImm7:
3747 return Error(Loc, "index must be a multiple of 16 in range [-1024,1008].");
3748 case Match_InvalidMemoryIndexed8:
3749 return Error(Loc, "index must be an integer in range [0,4095].");
3750 case Match_InvalidMemoryIndexed16:
3751 return Error(Loc, "index must be a multiple of 2 in range [0,8190].");
3752 case Match_InvalidMemoryIndexed32:
3753 return Error(Loc, "index must be a multiple of 4 in range [0,16380].");
3754 case Match_InvalidMemoryIndexed64:
3755 return Error(Loc, "index must be a multiple of 8 in range [0,32760].");
3756 case Match_InvalidMemoryIndexed128:
3757 return Error(Loc, "index must be a multiple of 16 in range [0,65520].");
3758 case Match_InvalidImm1_8:
3759 return Error(Loc, "immediate must be an integer in range [1,8].");
3760 case Match_InvalidImm1_16:
3761 return Error(Loc, "immediate must be an integer in range [1,16].");
3762 case Match_InvalidImm1_32:
3763 return Error(Loc, "immediate must be an integer in range [1,32].");
3764 case Match_InvalidImm1_64:
3765 return Error(Loc, "immediate must be an integer in range [1,64].");
3766 case Match_MnemonicFail:
3767 return Error(Loc, "unrecognized instruction mnemonic");
3769 assert(0 && "unexpected error code!");
3770 return Error(Loc, "invalid instruction format");
3774 bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
3775 OperandVector &Operands,
3777 unsigned &ErrorInfo,
3778 bool MatchingInlineAsm) {
3779 assert(!Operands.empty() && "Unexpect empty operand list!");
3780 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3781 assert(Op->isToken() && "Leading operand should always be a mnemonic!");
3783 StringRef Tok = Op->getToken();
3784 // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
3785 // This needs to be done before the special handling of ADD/SUB immediates.
3786 if (Tok == "cmp" || Tok == "cmn") {
3787 // Replace the opcode with either ADDS or SUBS.
3788 const char *Repl = StringSwitch<const char *>(Tok)
3789 .Case("cmp", "subs")
3790 .Case("cmn", "adds")
3792 assert(Repl && "Unknown compare instruction");
3794 Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
3796 // Insert WZR or XZR as destination operand.
3797 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
3799 if (RegOp->isReg() && isGPR32Register(RegOp->getReg()))
3800 ZeroReg = ARM64::WZR;
3802 ZeroReg = ARM64::XZR;
3804 Operands.begin() + 1,
3805 ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
3806 // Update since we modified it above.
3807 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
3808 Tok = Op->getToken();
3811 unsigned NumOperands = Operands.size();
3813 if (Tok == "mov" && NumOperands == 3) {
3814 // The MOV mnemomic is aliased to movn/movz, depending on the value of
3815 // the immediate being instantiated.
3816 // FIXME: Catching this here is a total hack, and we should use tblgen
3817 // support to implement this instead as soon as it is available.
3819 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3821 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
3822 uint64_t Val = CE->getValue();
3823 uint64_t NVal = ~Val;
3825 // If this is a 32-bit register and the value has none of the upper
3826 // set, clear the complemented upper 32-bits so the logic below works
3827 // for 32-bit registers too.
3828 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3829 if (Op1->isReg() && isGPR32Register(Op1->getReg()) &&
3830 (Val & 0xFFFFFFFFULL) == Val)
3831 NVal &= 0x00000000FFFFFFFFULL;
3833 // MOVK Rd, imm << 0
3834 if ((Val & 0xFFFF) == Val)
3835 rewriteMOV(Operands, "movz", Val, 0, getContext());
3837 // MOVK Rd, imm << 16
3838 else if ((Val & 0xFFFF0000ULL) == Val)
3839 rewriteMOV(Operands, "movz", Val, 16, getContext());
3841 // MOVK Rd, imm << 32
3842 else if ((Val & 0xFFFF00000000ULL) == Val)
3843 rewriteMOV(Operands, "movz", Val, 32, getContext());
3845 // MOVK Rd, imm << 48
3846 else if ((Val & 0xFFFF000000000000ULL) == Val)
3847 rewriteMOV(Operands, "movz", Val, 48, getContext());
3849 // MOVN Rd, (~imm << 0)
3850 else if ((NVal & 0xFFFFULL) == NVal)
3851 rewriteMOV(Operands, "movn", NVal, 0, getContext());
3853 // MOVN Rd, ~(imm << 16)
3854 else if ((NVal & 0xFFFF0000ULL) == NVal)
3855 rewriteMOV(Operands, "movn", NVal, 16, getContext());
3857 // MOVN Rd, ~(imm << 32)
3858 else if ((NVal & 0xFFFF00000000ULL) == NVal)
3859 rewriteMOV(Operands, "movn", NVal, 32, getContext());
3861 // MOVN Rd, ~(imm << 48)
3862 else if ((NVal & 0xFFFF000000000000ULL) == NVal)
3863 rewriteMOV(Operands, "movn", NVal, 48, getContext());
3866 } else if (NumOperands == 4) {
3867 if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
3868 // Handle the uimm24 immediate form, where the shift is not specified.
3869 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3871 if (const MCConstantExpr *CE =
3872 dyn_cast<MCConstantExpr>(Op3->getImm())) {
3873 uint64_t Val = CE->getValue();
3874 if (Val >= (1 << 24)) {
3875 Error(IDLoc, "immediate value is too large");
3878 if (Val < (1 << 12)) {
3879 Operands.push_back(ARM64Operand::CreateShifter(
3880 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3881 } else if ((Val & 0xfff) == 0) {
3883 CE = MCConstantExpr::Create(Val >> 12, getContext());
3885 ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
3886 Operands.push_back(ARM64Operand::CreateShifter(
3887 ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
3889 Error(IDLoc, "immediate value is too large");
3893 Operands.push_back(ARM64Operand::CreateShifter(
3894 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3898 // FIXME: Horible hack to handle the LSL -> UBFM alias.
3899 } else if (NumOperands == 4 && Tok == "lsl") {
3900 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3901 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3902 if (Op2->isReg() && Op3->isImm()) {
3903 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3905 uint64_t Op3Val = Op3CE->getValue();
3906 uint64_t NewOp3Val = 0;
3907 uint64_t NewOp4Val = 0;
3908 if (isGPR32Register(Op2->getReg())) {
3909 NewOp3Val = (32 - Op3Val) & 0x1f;
3910 NewOp4Val = 31 - Op3Val;
3912 NewOp3Val = (64 - Op3Val) & 0x3f;
3913 NewOp4Val = 63 - Op3Val;
3916 const MCExpr *NewOp3 =
3917 MCConstantExpr::Create(NewOp3Val, getContext());
3918 const MCExpr *NewOp4 =
3919 MCConstantExpr::Create(NewOp4Val, getContext());
3921 Operands[0] = ARM64Operand::CreateToken(
3922 "ubfm", false, Op->getStartLoc(), getContext());
3923 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3924 Op3->getEndLoc(), getContext());
3925 Operands.push_back(ARM64Operand::CreateImm(
3926 NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
3932 // FIXME: Horrible hack to handle the optional LSL shift for vector
3934 } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
3935 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3936 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3937 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3938 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
3939 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
3940 Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
3941 IDLoc, getContext()));
3942 } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
3943 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3944 ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
3945 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3946 if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
3947 (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
3948 StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
3949 // Canonicalize on lower-case for ease of comparison.
3950 std::string CanonicalSuffix = Suffix.lower();
3951 if (Tok != "movi" ||
3952 (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
3953 CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
3954 Operands.push_back(ARM64Operand::CreateShifter(
3955 ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
3958 } else if (NumOperands == 5) {
3959 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
3960 // UBFIZ -> UBFM aliases.
3961 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
3962 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
3963 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
3964 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
3966 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
3967 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
3968 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
3970 if (Op3CE && Op4CE) {
3971 uint64_t Op3Val = Op3CE->getValue();
3972 uint64_t Op4Val = Op4CE->getValue();
3974 uint64_t NewOp3Val = 0;
3975 if (isGPR32Register(Op1->getReg()))
3976 NewOp3Val = (32 - Op3Val) & 0x1f;
3978 NewOp3Val = (64 - Op3Val) & 0x3f;
3980 uint64_t NewOp4Val = Op4Val - 1;
3982 const MCExpr *NewOp3 =
3983 MCConstantExpr::Create(NewOp3Val, getContext());
3984 const MCExpr *NewOp4 =
3985 MCConstantExpr::Create(NewOp4Val, getContext());
3986 Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
3987 Op3->getEndLoc(), getContext());
3988 Operands[4] = ARM64Operand::CreateImm(NewOp4, Op4->getStartLoc(),
3989 Op4->getEndLoc(), getContext());
3991 Operands[0] = ARM64Operand::CreateToken(
3992 "bfm", false, Op->getStartLoc(), getContext());
3993 else if (Tok == "sbfiz")
3994 Operands[0] = ARM64Operand::CreateToken(
3995 "sbfm", false, Op->getStartLoc(), getContext());
3996 else if (Tok == "ubfiz")
3997 Operands[0] = ARM64Operand::CreateToken(
3998 "ubfm", false, Op->getStartLoc(), getContext());
4000 llvm_unreachable("No valid mnemonic for alias?");
4008 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
4009 // UBFX -> UBFM aliases.
4010 } else if (NumOperands == 5 &&
4011 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
4012 ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
4013 ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
4014 ARM64Operand *Op4 = static_cast<ARM64Operand *>(Operands[4]);
4016 if (Op1->isReg() && Op3->isImm() && Op4->isImm()) {
4017 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
4018 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4->getImm());
4020 if (Op3CE && Op4CE) {
4021 uint64_t Op3Val = Op3CE->getValue();
4022 uint64_t Op4Val = Op4CE->getValue();
4023 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4025 if (NewOp4Val >= Op3Val) {
4026 const MCExpr *NewOp4 =
4027 MCConstantExpr::Create(NewOp4Val, getContext());
4028 Operands[4] = ARM64Operand::CreateImm(
4029 NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
4031 Operands[0] = ARM64Operand::CreateToken(
4032 "bfm", false, Op->getStartLoc(), getContext());
4033 else if (Tok == "sbfx")
4034 Operands[0] = ARM64Operand::CreateToken(
4035 "sbfm", false, Op->getStartLoc(), getContext());
4036 else if (Tok == "ubfx")
4037 Operands[0] = ARM64Operand::CreateToken(
4038 "ubfm", false, Op->getStartLoc(), getContext());
4040 llvm_unreachable("No valid mnemonic for alias?");
4049 // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
4050 // InstAlias can't quite handle this since the reg classes aren't
4052 if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
4053 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4055 if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
4056 if (OpCE->getValue() < 32) {
4057 // The source register can be Wn here, but the matcher expects a
4058 // GPR64. Twiddle it here if necessary.
4059 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4061 unsigned Reg = getXRegFromWReg(Op->getReg());
4062 Operands[1] = ARM64Operand::CreateReg(
4063 Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
4070 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
4071 // InstAlias can't quite handle this since the reg classes aren't
4073 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
4074 // The source register can be Wn here, but the matcher expects a
4075 // GPR64. Twiddle it here if necessary.
4076 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4078 unsigned Reg = getXRegFromWReg(Op->getReg());
4079 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4080 Op->getEndLoc(), getContext());
4084 // FIXME: Likewise for [su]xt[bh] with a Xd dst operand
4085 else if (NumOperands == 3 &&
4086 (Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
4087 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
4088 if (Op->isReg() && isGPR64Reg(Op->getReg())) {
4089 // The source register can be Wn here, but the matcher expects a
4090 // GPR64. Twiddle it here if necessary.
4091 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
4093 unsigned Reg = getXRegFromWReg(Op->getReg());
4094 Operands[2] = ARM64Operand::CreateReg(Reg, false, Op->getStartLoc(),
4095 Op->getEndLoc(), getContext());
4101 // Yet another horrible hack to handle FMOV Rd, #0.0 using [WX]ZR.
4102 if (NumOperands == 3 && Tok == "fmov") {
4103 ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
4104 ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
4105 if (RegOp->isReg() && ImmOp->isFPImm() &&
4106 ImmOp->getFPImm() == (unsigned)-1) {
4108 isFPR32Register(RegOp->getReg()) ? ARM64::WZR : ARM64::XZR;
4109 Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
4110 Op->getEndLoc(), getContext());
4115 // FIXME: Horrible hack to handle the literal .d[1] vector index on
4116 // FMOV instructions. The index isn't an actual instruction operand
4117 // but rather syntactic sugar. It really should be part of the mnemonic,
4118 // not the operand, but whatever.
4119 if ((NumOperands == 5) && Tok == "fmov") {
4120 // If the last operand is a vectorindex of '1', then replace it with
4121 // a '[' '1' ']' token sequence, which is what the matcher
4122 // (annoyingly) expects for a literal vector index operand.
4123 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
4124 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4125 SMLoc Loc = Op->getStartLoc();
4126 Operands.pop_back();
4128 ARM64Operand::CreateToken("[", false, Loc, getContext()));
4130 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4132 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4133 } else if (Op->isReg()) {
4134 // Similarly, check the destination operand for the GPR->High-lane
4136 unsigned OpNo = NumOperands - 2;
4137 ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
4138 if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
4139 SMLoc Loc = Op->getStartLoc();
4141 ARM64Operand::CreateToken("[", false, Loc, getContext());
4143 Operands.begin() + OpNo + 1,
4144 ARM64Operand::CreateToken("1", false, Loc, getContext()));
4146 Operands.begin() + OpNo + 2,
4147 ARM64Operand::CreateToken("]", false, Loc, getContext()));
4153 // First try to match against the secondary set of tables containing the
4154 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
4155 unsigned MatchResult =
4156 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4158 // If that fails, try against the alternate table containing long-form NEON:
4159 // "fadd v0.2s, v1.2s, v2.2s"
4160 if (MatchResult != Match_Success)
4162 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4164 switch (MatchResult) {
4165 case Match_Success: {
4166 // Perform range checking and other semantic validations
4167 SmallVector<SMLoc, 8> OperandLocs;
4168 NumOperands = Operands.size();
4169 for (unsigned i = 1; i < NumOperands; ++i)
4170 OperandLocs.push_back(Operands[i]->getStartLoc());
4171 if (validateInstruction(Inst, OperandLocs))
4175 Out.EmitInstruction(Inst, STI);
4178 case Match_MissingFeature:
4179 case Match_MnemonicFail:
4180 return showMatchError(IDLoc, MatchResult);
4181 case Match_InvalidOperand: {
4182 SMLoc ErrorLoc = IDLoc;
4183 if (ErrorInfo != ~0U) {
4184 if (ErrorInfo >= Operands.size())
4185 return Error(IDLoc, "too few operands for instruction");
4187 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4188 if (ErrorLoc == SMLoc())
4191 // If the match failed on a suffix token operand, tweak the diagnostic
4193 if (((ARM64Operand *)Operands[ErrorInfo])->isToken() &&
4194 ((ARM64Operand *)Operands[ErrorInfo])->isTokenSuffix())
4195 MatchResult = Match_InvalidSuffix;
4197 return showMatchError(ErrorLoc, MatchResult);
4199 case Match_InvalidMemoryIndexedSImm9: {
4200 // If there is not a '!' after the memory operand that failed, we really
4201 // want the diagnostic for the non-pre-indexed instruction variant instead.
4202 // Be careful to check for the post-indexed variant as well, which also
4203 // uses this match diagnostic. Also exclude the explicitly unscaled
4204 // mnemonics, as they want the unscaled diagnostic as well.
4205 if (Operands.size() == ErrorInfo + 1 &&
4206 !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
4207 !Tok.startswith("stur") && !Tok.startswith("ldur")) {
4208 // whether we want an Indexed64 or Indexed32 diagnostic depends on
4209 // the register class of the previous operand. Default to 64 in case
4210 // we see something unexpected.
4211 MatchResult = Match_InvalidMemoryIndexed64;
4213 ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
4214 if (PrevOp->isReg() && ARM64MCRegisterClasses[ARM64::GPR32RegClassID]
4215 .contains(PrevOp->getReg()))
4216 MatchResult = Match_InvalidMemoryIndexed32;
4219 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4220 if (ErrorLoc == SMLoc())
4222 return showMatchError(ErrorLoc, MatchResult);
4224 case Match_InvalidMemoryIndexed32:
4225 case Match_InvalidMemoryIndexed64:
4226 case Match_InvalidMemoryIndexed128:
4227 // If there is a '!' after the memory operand that failed, we really
4228 // want the diagnostic for the pre-indexed instruction variant instead.
4229 if (Operands.size() > ErrorInfo + 1 &&
4230 ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
4231 MatchResult = Match_InvalidMemoryIndexedSImm9;
4233 case Match_InvalidMemoryIndexed8:
4234 case Match_InvalidMemoryIndexed16:
4235 case Match_InvalidMemoryIndexed32SImm7:
4236 case Match_InvalidMemoryIndexed64SImm7:
4237 case Match_InvalidMemoryIndexed128SImm7:
4238 case Match_InvalidImm1_8:
4239 case Match_InvalidImm1_16:
4240 case Match_InvalidImm1_32:
4241 case Match_InvalidImm1_64: {
4242 // Any time we get here, there's nothing fancy to do. Just get the
4243 // operand SMLoc and display the diagnostic.
4244 SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
4245 // If it's a memory operand, the error is with the offset immediate,
4246 // so get that location instead.
4247 if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
4248 ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
4249 if (ErrorLoc == SMLoc())
4251 return showMatchError(ErrorLoc, MatchResult);
4255 llvm_unreachable("Implement any new match types added!");
4259 /// ParseDirective parses the arm specific directives
4260 bool ARM64AsmParser::ParseDirective(AsmToken DirectiveID) {
4261 StringRef IDVal = DirectiveID.getIdentifier();
4262 SMLoc Loc = DirectiveID.getLoc();
4263 if (IDVal == ".hword")
4264 return parseDirectiveWord(2, Loc);
4265 if (IDVal == ".word")
4266 return parseDirectiveWord(4, Loc);
4267 if (IDVal == ".xword")
4268 return parseDirectiveWord(8, Loc);
4269 if (IDVal == ".tlsdesccall")
4270 return parseDirectiveTLSDescCall(Loc);
4272 return parseDirectiveLOH(IDVal, Loc);
4275 /// parseDirectiveWord
4276 /// ::= .word [ expression (, expression)* ]
4277 bool ARM64AsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
4278 if (getLexer().isNot(AsmToken::EndOfStatement)) {
4280 const MCExpr *Value;
4281 if (getParser().parseExpression(Value))
4284 getParser().getStreamer().EmitValue(Value, Size);
4286 if (getLexer().is(AsmToken::EndOfStatement))
4289 // FIXME: Improve diagnostic.
4290 if (getLexer().isNot(AsmToken::Comma))
4291 return Error(L, "unexpected token in directive");
4300 // parseDirectiveTLSDescCall:
4301 // ::= .tlsdesccall symbol
4302 bool ARM64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
4304 if (getParser().parseIdentifier(Name))
4305 return Error(L, "expected symbol after directive");
4307 MCSymbol *Sym = getContext().GetOrCreateSymbol(Name);
4308 const MCExpr *Expr = MCSymbolRefExpr::Create(Sym, getContext());
4309 Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_TLSDESC, getContext());
4312 Inst.setOpcode(ARM64::TLSDESCCALL);
4313 Inst.addOperand(MCOperand::CreateExpr(Expr));
4315 getParser().getStreamer().EmitInstruction(Inst, STI);
4319 /// ::= .loh <lohName | lohId> label1, ..., labelN
4320 /// The number of arguments depends on the loh identifier.
4321 bool ARM64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
4322 if (IDVal != MCLOHDirectiveName())
4325 if (getParser().getTok().isNot(AsmToken::Identifier)) {
4326 if (getParser().getTok().isNot(AsmToken::Integer))
4327 return TokError("expected an identifier or a number in directive");
4328 // We successfully get a numeric value for the identifier.
4329 // Check if it is valid.
4330 int64_t Id = getParser().getTok().getIntVal();
4331 Kind = (MCLOHType)Id;
4332 // Check that Id does not overflow MCLOHType.
4333 if (!isValidMCLOHType(Kind) || Id != Kind)
4334 return TokError("invalid numeric identifier in directive");
4336 StringRef Name = getTok().getIdentifier();
4337 // We successfully parse an identifier.
4338 // Check if it is a recognized one.
4339 int Id = MCLOHNameToId(Name);
4342 return TokError("invalid identifier in directive");
4343 Kind = (MCLOHType)Id;
4345 // Consume the identifier.
4347 // Get the number of arguments of this LOH.
4348 int NbArgs = MCLOHIdToNbArgs(Kind);
4350 assert(NbArgs != -1 && "Invalid number of arguments");
4352 SmallVector<MCSymbol *, 3> Args;
4353 for (int Idx = 0; Idx < NbArgs; ++Idx) {
4355 if (getParser().parseIdentifier(Name))
4356 return TokError("expected identifier in directive");
4357 Args.push_back(getContext().GetOrCreateSymbol(Name));
4359 if (Idx + 1 == NbArgs)
4361 if (getLexer().isNot(AsmToken::Comma))
4362 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4365 if (getLexer().isNot(AsmToken::EndOfStatement))
4366 return TokError("unexpected token in '" + Twine(IDVal) + "' directive");
4368 getStreamer().EmitLOHDirective((MCLOHType)Kind, Args);
4373 ARM64AsmParser::classifySymbolRef(const MCExpr *Expr,
4374 ARM64MCExpr::VariantKind &ELFRefKind,
4375 MCSymbolRefExpr::VariantKind &DarwinRefKind,
4376 const MCConstantExpr *&Addend) {
4377 ELFRefKind = ARM64MCExpr::VK_INVALID;
4378 DarwinRefKind = MCSymbolRefExpr::VK_None;
4380 if (const ARM64MCExpr *AE = dyn_cast<ARM64MCExpr>(Expr)) {
4381 ELFRefKind = AE->getKind();
4382 Expr = AE->getSubExpr();
4385 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
4387 // It's a simple symbol reference with no addend.
4388 DarwinRefKind = SE->getKind();
4393 const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr);
4397 SE = dyn_cast<MCSymbolRefExpr>(BE->getLHS());
4400 DarwinRefKind = SE->getKind();
4402 if (BE->getOpcode() != MCBinaryExpr::Add)
4405 // See if the addend is is a constant, otherwise there's more going
4406 // on here than we can deal with.
4407 Addend = dyn_cast<MCConstantExpr>(BE->getRHS());
4411 // It's some symbol reference + a constant addend, but really
4412 // shouldn't use both Darwin and ELF syntax.
4413 return ELFRefKind == ARM64MCExpr::VK_INVALID ||
4414 DarwinRefKind == MCSymbolRefExpr::VK_None;
4417 /// Force static initialization.
4418 extern "C" void LLVMInitializeARM64AsmParser() {
4419 RegisterMCAsmParser<ARM64AsmParser> X(TheARM64Target);
4422 #define GET_REGISTER_MATCHER
4423 #define GET_MATCHER_IMPLEMENTATION
4424 #include "ARM64GenAsmMatcher.inc"
4426 // Define this matcher function after the auto-generated include so we
4427 // have the match class enum definitions.
4428 unsigned ARM64AsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
4430 ARM64Operand *Op = static_cast<ARM64Operand *>(AsmOp);
4431 // If the kind is a token for a literal immediate, check if our asm
4432 // operand matches. This is for InstAliases which have a fixed-value
4433 // immediate in the syntax.
4434 int64_t ExpectedVal;
4437 return Match_InvalidOperand;
4479 return Match_InvalidOperand;
4480 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4482 return Match_InvalidOperand;
4483 if (CE->getValue() == ExpectedVal)
4484 return Match_Success;
4485 return Match_InvalidOperand;