SMLoc getLoc() const { return Parser.getTok().getLoc(); }
bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
- unsigned parseCondCodeString(StringRef Cond);
+ ARM64CC::CondCode parseCondCodeString(StringRef Cond);
bool parseCondCode(OperandVector &Operands, bool invertCondCode);
int tryParseRegister();
int tryMatchVectorRegister(StringRef &Kind, bool expected);
- bool parseOptionalShift(OperandVector &Operands);
- bool parseOptionalExtend(OperandVector &Operands);
bool parseRegister(OperandVector &Operands);
- bool parseMemory(OperandVector &Operands);
bool parseSymbolicImmVal(const MCExpr *&ImmVal);
bool parseVectorList(OperandVector &Operands);
bool parseOperand(OperandVector &Operands, bool isCondCode,
/// }
- OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
+ OperandMatchResultTy tryParseOptionalShiftExtend(OperandVector &Operands);
OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
OperandMatchResultTy tryParseAdrLabel(OperandVector &Operands);
OperandMatchResultTy tryParseFPImm(OperandVector &Operands);
+ OperandMatchResultTy tryParseAddSubImm(OperandVector &Operands);
+ OperandMatchResultTy tryParseGPR64sp0Operand(OperandVector &Operands);
bool tryParseVectorRegister(OperandVector &Operands);
public:
/// ARM64Operand - Instances of this class represent a parsed ARM64 machine
/// instruction.
class ARM64Operand : public MCParsedAsmOperand {
-public:
- enum MemIdxKindTy {
- ImmediateOffset, // pre-indexed, no writeback
- RegisterOffset // register offset, with optional extend
- };
-
private:
enum KindTy {
k_Immediate,
- k_Memory,
+ k_ShiftedImm,
+ k_CondCode,
k_Register,
k_VectorList,
k_VectorIndex,
k_SysReg,
k_SysCR,
k_Prefetch,
- k_Shifter,
- k_Extend,
+ k_ShiftExtend,
k_FPImm,
k_Barrier
} Kind;
- SMLoc StartLoc, EndLoc, OffsetLoc;
+ SMLoc StartLoc, EndLoc;
struct TokOp {
const char *Data;
const MCExpr *Val;
};
+ struct ShiftedImmOp {
+ const MCExpr *Val;
+ unsigned ShiftAmount;
+ };
+
+ struct CondCodeOp {
+ ARM64CC::CondCode Code;
+ };
+
struct FPImmOp {
unsigned Val; // Encoded 8-bit representation.
};
unsigned Val;
};
- struct ShifterOp {
- unsigned Val;
+ struct ShiftExtendOp {
+ ARM64_AM::ShiftExtendType Type;
+ unsigned Amount;
+ bool HasExplicitAmount;
};
struct ExtendOp {
unsigned Val;
};
- // This is for all forms of ARM64 address expressions
- struct MemOp {
- unsigned BaseRegNum, OffsetRegNum;
- ARM64_AM::ExtendType ExtType;
- unsigned ShiftVal;
- bool ExplicitShift;
- const MCExpr *OffsetImm;
- MemIdxKindTy Mode;
- };
-
union {
struct TokOp Tok;
struct RegOp Reg;
struct VectorListOp VectorList;
struct VectorIndexOp VectorIndex;
struct ImmOp Imm;
+ struct ShiftedImmOp ShiftedImm;
+ struct CondCodeOp CondCode;
struct FPImmOp FPImm;
struct BarrierOp Barrier;
struct SysRegOp SysReg;
struct SysCRImmOp SysCRImm;
struct PrefetchOp Prefetch;
- struct ShifterOp Shifter;
- struct ExtendOp Extend;
- struct MemOp Mem;
+ struct ShiftExtendOp ShiftExtend;
};
// Keep the MCContext around as the MCExprs may need manipulated during
case k_Immediate:
Imm = o.Imm;
break;
+ case k_ShiftedImm:
+ ShiftedImm = o.ShiftedImm;
+ break;
+ case k_CondCode:
+ CondCode = o.CondCode;
+ break;
case k_FPImm:
FPImm = o.FPImm;
break;
case k_Prefetch:
Prefetch = o.Prefetch;
break;
- case k_Memory:
- Mem = o.Mem;
- break;
- case k_Shifter:
- Shifter = o.Shifter;
- break;
- case k_Extend:
- Extend = o.Extend;
+ case k_ShiftExtend:
+ ShiftExtend = o.ShiftExtend;
break;
}
}
SMLoc getStartLoc() const override { return StartLoc; }
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const override { return EndLoc; }
- /// getOffsetLoc - Get the location of the offset of this memory operand.
- SMLoc getOffsetLoc() const { return OffsetLoc; }
StringRef getToken() const {
assert(Kind == k_Token && "Invalid access!");
return Imm.Val;
}
+ const MCExpr *getShiftedImmVal() const {
+ assert(Kind == k_ShiftedImm && "Invalid access!");
+ return ShiftedImm.Val;
+ }
+
+ unsigned getShiftedImmShift() const {
+ assert(Kind == k_ShiftedImm && "Invalid access!");
+ return ShiftedImm.ShiftAmount;
+ }
+
+ ARM64CC::CondCode getCondCode() const {
+ assert(Kind == k_CondCode && "Invalid access!");
+ return CondCode.Code;
+ }
+
unsigned getFPImm() const {
assert(Kind == k_FPImm && "Invalid access!");
return FPImm.Val;
return Prefetch.Val;
}
- unsigned getShifter() const {
- assert(Kind == k_Shifter && "Invalid access!");
- return Shifter.Val;
+ ARM64_AM::ShiftExtendType getShiftExtendType() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.Type;
+ }
+
+ unsigned getShiftExtendAmount() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.Amount;
}
- unsigned getExtend() const {
- assert(Kind == k_Extend && "Invalid access!");
- return Extend.Val;
+ bool hasShiftExtendAmount() const {
+ assert(Kind == k_ShiftExtend && "Invalid access!");
+ return ShiftExtend.HasExplicitAmount;
}
bool isImm() const override { return Kind == k_Immediate; }
+ bool isMem() const override { return false; }
bool isSImm9() const {
if (!isImm())
return false;
int64_t Val = MCE->getValue();
return (Val >= -1024 && Val <= 1008 && (Val & 15) == 0);
}
+
+ bool isSymbolicUImm12Offset(const MCExpr *Expr, unsigned Scale) const {
+ ARM64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
+ Addend)) {
+ // If we don't understand the expression, assume the best and
+ // let the fixup and relocation code deal with it.
+ return true;
+ }
+
+ if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
+ ELFRefKind == ARM64MCExpr::VK_LO12 ||
+ ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
+ ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
+ ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
+ ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
+ ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
+ ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
+ ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
+ // Note that we don't range-check the addend. It's adjusted modulo page
+ // size when converted, so there is no "out of range" condition when using
+ // @pageoff.
+ return Addend >= 0 && (Addend % Scale) == 0;
+ } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
+ // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
+ return Addend == 0;
+ }
+
+ return false;
+ }
+
+ template <int Scale> bool isUImm12Offset() const {
+ if (!isImm())
+ return false;
+
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return isSymbolicUImm12Offset(getImm(), Scale);
+
+ int64_t Val = MCE->getValue();
+ return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
+ }
+
bool isImm0_7() const {
if (!isImm())
return false;
int64_t Val = MCE->getValue();
return (Val >= 0 && Val < 65536);
}
+ bool isImm32_63() const {
+ if (!isImm())
+ return false;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ return false;
+ int64_t Val = MCE->getValue();
+ return (Val >= 32 && Val < 64);
+ }
bool isLogicalImm32() const {
if (!isImm())
return false;
return false;
return ARM64_AM::isLogicalImmediate(MCE->getValue(), 64);
}
+ bool isShiftedImm() const { return Kind == k_ShiftedImm; }
+ bool isAddSubImm() const {
+ if (!isShiftedImm() && !isImm())
+ return false;
+
+ const MCExpr *Expr;
+
+ // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
+ if (isShiftedImm()) {
+ unsigned Shift = ShiftedImm.ShiftAmount;
+ Expr = ShiftedImm.Val;
+ if (Shift != 0 && Shift != 12)
+ return false;
+ } else {
+ Expr = getImm();
+ }
+
+ ARM64MCExpr::VariantKind ELFRefKind;
+ MCSymbolRefExpr::VariantKind DarwinRefKind;
+ int64_t Addend;
+ if (ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind,
+ DarwinRefKind, Addend)) {
+ return DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF
+ || DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF
+ || (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF && Addend == 0)
+ || ELFRefKind == ARM64MCExpr::VK_LO12
+ || ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12
+ || ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12
+ || ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC
+ || ELFRefKind == ARM64MCExpr::VK_TPREL_HI12
+ || ELFRefKind == ARM64MCExpr::VK_TPREL_LO12
+ || ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC
+ || ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12;
+ }
+
+ // Otherwise it should be a real immediate in range:
+ const MCConstantExpr *CE = cast<MCConstantExpr>(Expr);
+ return CE->getValue() >= 0 && CE->getValue() <= 0xfff;
+ }
+ bool isCondCode() const { return Kind == k_CondCode; }
bool isSIMDImmType10() const {
if (!isImm())
return false;
return isMovWSymbol(Variants);
}
+ template<int RegWidth, int Shift>
+ bool isMOVZMovAlias() const {
+ if (!isImm()) return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ uint64_t Value = CE->getValue();
+
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ // "lsl #0" takes precedence: in practice this only affects "#0, lsl #0".
+ if (Value == 0 && Shift != 0)
+ return false;
+
+ return (Value & ~(0xffffULL << Shift)) == 0;
+ }
+
+ template<int RegWidth, int Shift>
+ bool isMOVNMovAlias() const {
+ if (!isImm()) return false;
+
+ const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
+ if (!CE) return false;
+ uint64_t Value = CE->getValue();
+
+ // MOVZ takes precedence over MOVN.
+ for (int MOVZShift = 0; MOVZShift <= 48; MOVZShift += 16)
+ if ((Value & ~(0xffffULL << MOVZShift)) == 0)
+ return false;
+
+ Value = ~Value;
+ if (RegWidth == 32)
+ Value &= 0xffffffffULL;
+
+ return (Value & ~(0xffffULL << Shift)) == 0;
+ }
+
bool isFPImm() const { return Kind == k_FPImm; }
bool isBarrier() const { return Kind == k_Barrier; }
bool isSysReg() const { return Kind == k_SysReg; }
return Kind == k_Register && Reg.isVector &&
ARM64MCRegisterClasses[ARM64::FPR128_loRegClassID].contains(Reg.RegNum);
}
+ bool isGPR32as64() const {
+ return Kind == k_Register && !Reg.isVector &&
+ ARM64MCRegisterClasses[ARM64::GPR64RegClassID].contains(Reg.RegNum);
+ }
+
+ bool isGPR64sp0() const {
+ return Kind == k_Register && !Reg.isVector &&
+ ARM64MCRegisterClasses[ARM64::GPR64spRegClassID].contains(Reg.RegNum);
+ }
/// Is this a vector list with the type implicit (presumably attached to the
/// instruction itself)?
return VectorList.NumElements == NumElements;
}
+ bool isVectorIndex1() const {
+ return Kind == k_VectorIndex && VectorIndex.Val == 1;
+ }
bool isVectorIndexB() const {
return Kind == k_VectorIndex && VectorIndex.Val < 16;
}
bool isTokenEqual(StringRef Str) const {
return Kind == k_Token && getToken() == Str;
}
- bool isMem() const override { return Kind == k_Memory; }
bool isSysCR() const { return Kind == k_SysCR; }
bool isPrefetch() const { return Kind == k_Prefetch; }
- bool isShifter() const { return Kind == k_Shifter; }
+ bool isShiftExtend() const { return Kind == k_ShiftExtend; }
+ bool isShifter() const {
+ if (!isShiftExtend())
+ return false;
+
+ ARM64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR ||
+ ST == ARM64_AM::ROR || ST == ARM64_AM::MSL);
+ }
bool isExtend() const {
- // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
- if (isShifter()) {
- ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
- return ST == ARM64_AM::LSL;
- }
- return Kind == k_Extend;
+ if (!isShiftExtend())
+ return false;
+
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == ARM64_AM::UXTB || ET == ARM64_AM::SXTB ||
+ ET == ARM64_AM::UXTH || ET == ARM64_AM::SXTH ||
+ ET == ARM64_AM::UXTW || ET == ARM64_AM::SXTW ||
+ ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX ||
+ ET == ARM64_AM::LSL) &&
+ getShiftExtendAmount() <= 4;
}
+
bool isExtend64() const {
- if (Kind != k_Extend)
+ if (!isExtend())
return false;
// UXTX and SXTX require a 64-bit source register (the ExtendLSL64 class).
- ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
return ET != ARM64_AM::UXTX && ET != ARM64_AM::SXTX;
}
bool isExtendLSL64() const {
- // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
- if (isShifter()) {
- ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
- return ST == ARM64_AM::LSL;
- }
- if (Kind != k_Extend)
+ if (!isExtend())
+ return false;
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX || ET == ARM64_AM::LSL) &&
+ getShiftExtendAmount() <= 4;
+ }
+
+ template<int Width> bool isMemXExtend() const {
+ if (!isExtend())
return false;
- ARM64_AM::ExtendType ET = ARM64_AM::getArithExtendType(Extend.Val);
- return ET == ARM64_AM::UXTX || ET == ARM64_AM::SXTX;
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == ARM64_AM::LSL || ET == ARM64_AM::SXTX) &&
+ (getShiftExtendAmount() == Log2_32(Width / 8) ||
+ getShiftExtendAmount() == 0);
}
+ template<int Width> bool isMemWExtend() const {
+ if (!isExtend())
+ return false;
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ return (ET == ARM64_AM::UXTW || ET == ARM64_AM::SXTW) &&
+ (getShiftExtendAmount() == Log2_32(Width / 8) ||
+ getShiftExtendAmount() == 0);
+ }
+
+ template <unsigned width>
bool isArithmeticShifter() const {
if (!isShifter())
return false;
// An arithmetic shifter is LSL, LSR, or ASR.
- ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
- return ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR;
+ ARM64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR ||
+ ST == ARM64_AM::ASR) && getShiftExtendAmount() < width;
+ }
+
+ template <unsigned width>
+ bool isLogicalShifter() const {
+ if (!isShifter())
+ return false;
+
+ // A logical shifter is LSL, LSR, ASR or ROR.
+ ARM64_AM::ShiftExtendType ST = getShiftExtendType();
+ return (ST == ARM64_AM::LSL || ST == ARM64_AM::LSR || ST == ARM64_AM::ASR ||
+ ST == ARM64_AM::ROR) &&
+ getShiftExtendAmount() < width;
}
bool isMovImm32Shifter() const {
return false;
// A MOVi shifter is LSL of 0, 16, 32, or 48.
- ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
+ ARM64_AM::ShiftExtendType ST = getShiftExtendType();
if (ST != ARM64_AM::LSL)
return false;
- uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
+ uint64_t Val = getShiftExtendAmount();
return (Val == 0 || Val == 16);
}
return false;
// A MOVi shifter is LSL of 0 or 16.
- ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(Shifter.Val);
+ ARM64_AM::ShiftExtendType ST = getShiftExtendType();
if (ST != ARM64_AM::LSL)
return false;
- uint64_t Val = ARM64_AM::getShiftValue(Shifter.Val);
+ uint64_t Val = getShiftExtendAmount();
return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
}
- bool isAddSubShifter() const {
- if (!isShifter())
- return false;
-
- // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
- unsigned Val = Shifter.Val;
- return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
- (ARM64_AM::getShiftValue(Val) == 0 ||
- ARM64_AM::getShiftValue(Val) == 12);
- }
-
bool isLogicalVecShifter() const {
if (!isShifter())
return false;
// A logical vector shifter is a left shift by 0, 8, 16, or 24.
- unsigned Val = Shifter.Val;
- unsigned Shift = ARM64_AM::getShiftValue(Val);
- return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == ARM64_AM::LSL &&
(Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
}
return false;
// A logical vector shifter is a left shift by 0 or 8.
- unsigned Val = Shifter.Val;
- unsigned Shift = ARM64_AM::getShiftValue(Val);
- return ARM64_AM::getShiftType(Val) == ARM64_AM::LSL &&
- (Shift == 0 || Shift == 8);
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == ARM64_AM::LSL && (Shift == 0 || Shift == 8);
}
bool isMoveVecShifter() const {
- if (!isShifter())
+ if (!isShiftExtend())
return false;
// A logical vector shifter is a left shift by 8 or 16.
- unsigned Val = Shifter.Val;
- unsigned Shift = ARM64_AM::getShiftValue(Val);
- return ARM64_AM::getShiftType(Val) == ARM64_AM::MSL &&
- (Shift == 8 || Shift == 16);
- }
-
- bool isMemoryRegisterOffset8() const {
- return isMem() && Mem.Mode == RegisterOffset && Mem.ShiftVal == 0;
- }
-
- bool isMemoryRegisterOffset16() const {
- return isMem() && Mem.Mode == RegisterOffset &&
- (Mem.ShiftVal == 0 || Mem.ShiftVal == 1);
- }
-
- bool isMemoryRegisterOffset32() const {
- return isMem() && Mem.Mode == RegisterOffset &&
- (Mem.ShiftVal == 0 || Mem.ShiftVal == 2);
- }
-
- bool isMemoryRegisterOffset64() const {
- return isMem() && Mem.Mode == RegisterOffset &&
- (Mem.ShiftVal == 0 || Mem.ShiftVal == 3);
- }
-
- bool isMemoryRegisterOffset128() const {
- return isMem() && Mem.Mode == RegisterOffset &&
- (Mem.ShiftVal == 0 || Mem.ShiftVal == 4);
+ unsigned Shift = getShiftExtendAmount();
+ return getShiftExtendType() == ARM64_AM::MSL && (Shift == 8 || Shift == 16);
}
- bool isMemoryUnscaled() const {
- if (!isMem())
- return false;
- if (Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
- // Make sure the immediate value is valid.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- if (!CE)
- return false;
- // The offset must fit in a signed 9-bit unscaled immediate.
- int64_t Value = CE->getValue();
- return (Value >= -256 && Value < 256);
- }
// Fallback unscaled operands are for aliases of LDR/STR that fall back
// to LDUR/STUR when the offset is not legal for the former but is for
// the latter. As such, in addition to checking for being a legal unscaled
// address, also check that it is not a legal scaled address. This avoids
// ambiguity in the matcher.
- bool isMemoryUnscaledFB8() const {
- return isMemoryUnscaled() && !isMemoryIndexed8();
- }
- bool isMemoryUnscaledFB16() const {
- return isMemoryUnscaled() && !isMemoryIndexed16();
- }
- bool isMemoryUnscaledFB32() const {
- return isMemoryUnscaled() && !isMemoryIndexed32();
- }
- bool isMemoryUnscaledFB64() const {
- return isMemoryUnscaled() && !isMemoryIndexed64();
- }
- bool isMemoryUnscaledFB128() const {
- return isMemoryUnscaled() && !isMemoryIndexed128();
- }
- bool isMemoryIndexed(unsigned Scale) const {
- if (!isMem())
- return false;
- if (Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
- // Make sure the immediate value is valid.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
-
- if (CE) {
- // The offset must be a positive multiple of the scale and in range of
- // encoding with a 12-bit immediate.
- int64_t Value = CE->getValue();
- return (Value >= 0 && (Value % Scale) == 0 && Value <= (4095 * Scale));
- }
-
- // If it's not a constant, check for some expressions we know.
- const MCExpr *Expr = Mem.OffsetImm;
- ARM64MCExpr::VariantKind ELFRefKind;
- MCSymbolRefExpr::VariantKind DarwinRefKind;
- int64_t Addend;
- if (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
- Addend)) {
- // If we don't understand the expression, assume the best and
- // let the fixup and relocation code deal with it.
- return true;
- }
-
- if (DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
- ELFRefKind == ARM64MCExpr::VK_LO12 ||
- ELFRefKind == ARM64MCExpr::VK_GOT_LO12 ||
- ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
- ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
- ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
- ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
- ELFRefKind == ARM64MCExpr::VK_GOTTPREL_LO12_NC ||
- ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
- // Note that we don't range-check the addend. It's adjusted modulo page
- // size when converted, so there is no "out of range" condition when using
- // @pageoff.
- return Addend >= 0 && (Addend % Scale) == 0;
- } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF ||
- DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) {
- // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
- return Addend == 0;
- }
-
- return false;
- }
- bool isMemoryIndexed128() const { return isMemoryIndexed(16); }
- bool isMemoryIndexed64() const { return isMemoryIndexed(8); }
- bool isMemoryIndexed32() const { return isMemoryIndexed(4); }
- bool isMemoryIndexed16() const { return isMemoryIndexed(2); }
- bool isMemoryIndexed8() const { return isMemoryIndexed(1); }
- bool isMemoryNoIndex() const {
- if (!isMem())
- return false;
- if (Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
-
- // Make sure the immediate value is valid. Only zero is allowed.
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- if (!CE || CE->getValue() != 0)
- return false;
- return true;
- }
- bool isMemorySIMDNoIndex() const {
- if (!isMem())
- return false;
- if (Mem.Mode != ImmediateOffset)
- return false;
- return Mem.OffsetImm == nullptr;
- }
- bool isMemoryIndexedSImm9() const {
- if (!isMem() || Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- assert(CE && "Non-constant pre-indexed offset!");
- int64_t Value = CE->getValue();
- return Value >= -256 && Value <= 255;
- }
- bool isMemoryIndexed32SImm7() const {
- if (!isMem() || Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- assert(CE && "Non-constant pre-indexed offset!");
- int64_t Value = CE->getValue();
- return ((Value % 4) == 0) && Value >= -256 && Value <= 252;
- }
- bool isMemoryIndexed64SImm7() const {
- if (!isMem() || Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- assert(CE && "Non-constant pre-indexed offset!");
- int64_t Value = CE->getValue();
- return ((Value % 8) == 0) && Value >= -512 && Value <= 504;
- }
- bool isMemoryIndexed128SImm7() const {
- if (!isMem() || Mem.Mode != ImmediateOffset)
- return false;
- if (!Mem.OffsetImm)
- return true;
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- assert(CE && "Non-constant pre-indexed offset!");
- int64_t Value = CE->getValue();
- return ((Value % 16) == 0) && Value >= -1024 && Value <= 1008;
+ template<int Width>
+ bool isSImm9OffsetFB() const {
+ return isSImm9() && !isUImm12Offset<Width / 8>();
}
bool isAdrpLabel() const {
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
- void addVectorRegOperands(MCInst &Inst, unsigned N) const {
+ void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
+ assert(ARM64MCRegisterClasses[ARM64::GPR64RegClassID].contains(getReg()));
+
+ const MCRegisterInfo *RI = Ctx.getRegisterInfo();
+ uint32_t Reg = RI->getRegClass(ARM64::GPR32RegClassID).getRegister(
+ RI->getEncodingValue(getReg()));
+
+ Inst.addOperand(MCOperand::CreateReg(Reg));
+ }
+
+ void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(ARM64MCRegisterClasses[ARM64::FPR128RegClassID].contains(getReg()));
+ Inst.addOperand(MCOperand::CreateReg(ARM64::D0 + getReg() - ARM64::Q0));
+ }
+
+ void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ assert(ARM64MCRegisterClasses[ARM64::FPR128RegClassID].contains(getReg()));
Inst.addOperand(MCOperand::CreateReg(getReg()));
}
MCOperand::CreateReg(FirstReg + getVectorListStart() - ARM64::Q0));
}
+ void addVectorIndex1Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
+ }
+
void addVectorIndexBOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
addExpr(Inst, getImm());
}
+ void addAddSubImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ if (isShiftedImm()) {
+ addExpr(Inst, getShiftedImmVal());
+ Inst.addOperand(MCOperand::CreateImm(getShiftedImmShift()));
+ } else {
+ addExpr(Inst, getImm());
+ Inst.addOperand(MCOperand::CreateImm(0));
+ }
+ }
+
+ void addCondCodeOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::CreateImm(getCondCode()));
+ }
+
void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
addImmOperands(Inst, N);
}
+ template<int Scale>
+ void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+
+ if (!MCE) {
+ Inst.addOperand(MCOperand::CreateExpr(getImm()));
+ return;
+ }
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() / Scale));
+ }
+
void addSImm9Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
}
+ void addImm32_63Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ assert(MCE && "Invalid constant immediate operand!");
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue()));
+ }
+
void addLogicalImm32Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
void addShifterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
+ unsigned Imm =
+ ARM64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
}
- void addArithmeticShifterOperands(MCInst &Inst, unsigned N) const {
+ void addExtendOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ if (ET == ARM64_AM::LSL) ET = ARM64_AM::UXTW;
+ unsigned Imm = ARM64_AM::getArithExtendImm(ET, getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
}
- void addMovImm32ShifterOperands(MCInst &Inst, unsigned N) const {
+ void addExtend64Operands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ if (ET == ARM64_AM::LSL) ET = ARM64_AM::UXTX;
+ unsigned Imm = ARM64_AM::getArithExtendImm(ET, getShiftExtendAmount());
+ Inst.addOperand(MCOperand::CreateImm(Imm));
}
- void addMovImm64ShifterOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
+ void addMemExtendOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ bool IsSigned = ET == ARM64_AM::SXTW || ET == ARM64_AM::SXTX;
+ Inst.addOperand(MCOperand::CreateImm(IsSigned));
+ Inst.addOperand(MCOperand::CreateImm(getShiftExtendAmount() != 0));
}
- void addAddSubShifterOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
+ // For 8-bit load/store instructions with a register offset, both the
+ // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
+ // they're disambiguated by whether the shift was explicit or implicit rather
+ // than its size.
+ void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+ ARM64_AM::ShiftExtendType ET = getShiftExtendType();
+ bool IsSigned = ET == ARM64_AM::SXTW || ET == ARM64_AM::SXTX;
+ Inst.addOperand(MCOperand::CreateImm(IsSigned));
+ Inst.addOperand(MCOperand::CreateImm(hasShiftExtendAmount()));
}
- void addLogicalVecShifterOperands(MCInst &Inst, unsigned N) const {
+ template<int Shift>
+ void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
- }
- void addLogicalVecHalfWordShifterOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
+ const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm((Value >> Shift) & 0xffff));
}
- void addMoveVecShifterOperands(MCInst &Inst, unsigned N) const {
+ template<int Shift>
+ void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getShifter()));
- }
- void addExtendOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- // lsl is an alias for UXTW but will be a parsed as a k_Shifter operand.
- if (isShifter()) {
- assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
- unsigned imm = getArithExtendImm(ARM64_AM::UXTW,
- ARM64_AM::getShiftValue(getShifter()));
- Inst.addOperand(MCOperand::CreateImm(imm));
- } else
- Inst.addOperand(MCOperand::CreateImm(getExtend()));
- }
-
- void addExtend64Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getExtend()));
- }
-
- void addExtendLSL64Operands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && "Invalid number of operands!");
- // lsl is an alias for UXTX but will be a parsed as a k_Shifter operand.
- if (isShifter()) {
- assert(ARM64_AM::getShiftType(getShifter()) == ARM64_AM::LSL);
- unsigned imm = getArithExtendImm(ARM64_AM::UXTX,
- ARM64_AM::getShiftValue(getShifter()));
- Inst.addOperand(MCOperand::CreateImm(imm));
- } else
- Inst.addOperand(MCOperand::CreateImm(getExtend()));
- }
-
- void addMemoryRegisterOffsetOperands(MCInst &Inst, unsigned N, bool DoShift) {
- assert(N == 3 && "Invalid number of operands!");
-
- Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
- Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
- unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
- Inst.addOperand(MCOperand::CreateImm(ExtendImm));
- }
-
- void addMemoryRegisterOffset8Operands(MCInst &Inst, unsigned N) {
- addMemoryRegisterOffsetOperands(Inst, N, Mem.ExplicitShift);
- }
-
- void addMemoryRegisterOffset16Operands(MCInst &Inst, unsigned N) {
- addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 1);
- }
-
- void addMemoryRegisterOffset32Operands(MCInst &Inst, unsigned N) {
- addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 2);
- }
-
- void addMemoryRegisterOffset64Operands(MCInst &Inst, unsigned N) {
- addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 3);
- }
-
- void addMemoryRegisterOffset128Operands(MCInst &Inst, unsigned N) {
- addMemoryRegisterOffsetOperands(Inst, N, Mem.ShiftVal == 4);
- }
-
- void addMemoryIndexedOperands(MCInst &Inst, unsigned N,
- unsigned Scale) const {
- // Add the base register operand.
- Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
-
- if (!Mem.OffsetImm) {
- // There isn't an offset.
- Inst.addOperand(MCOperand::CreateImm(0));
- return;
- }
-
- // Add the offset operand.
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm)) {
- assert(CE->getValue() % Scale == 0 &&
- "Offset operand must be multiple of the scale!");
-
- // The MCInst offset operand doesn't include the low bits (like the
- // instruction encoding).
- Inst.addOperand(MCOperand::CreateImm(CE->getValue() / Scale));
- }
-
- // If this is a pageoff symrefexpr with an addend, the linker will
- // do the scaling of the addend.
- //
- // Otherwise we don't know what this is, so just add the scaling divide to
- // the expression and let the MC fixup evaluation code deal with it.
- const MCExpr *Expr = Mem.OffsetImm;
- ARM64MCExpr::VariantKind ELFRefKind;
- MCSymbolRefExpr::VariantKind DarwinRefKind;
- int64_t Addend;
- if (Scale > 1 &&
- (!ARM64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
- Addend) ||
- (Addend != 0 && DarwinRefKind != MCSymbolRefExpr::VK_PAGEOFF))) {
- Expr = MCBinaryExpr::CreateDiv(Expr, MCConstantExpr::Create(Scale, Ctx),
- Ctx);
- }
-
- Inst.addOperand(MCOperand::CreateExpr(Expr));
- }
-
- void addMemoryUnscaledOperands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && isMemoryUnscaled() && "Invalid number of operands!");
- // Add the base register operand.
- Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
-
- // Add the offset operand.
- if (!Mem.OffsetImm)
- Inst.addOperand(MCOperand::CreateImm(0));
- else {
- // Only constant offsets supported.
- const MCConstantExpr *CE = cast<MCConstantExpr>(Mem.OffsetImm);
- Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
- }
- }
-
- void addMemoryIndexed128Operands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && isMemoryIndexed128() && "Invalid number of operands!");
- addMemoryIndexedOperands(Inst, N, 16);
- }
-
- void addMemoryIndexed64Operands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && isMemoryIndexed64() && "Invalid number of operands!");
- addMemoryIndexedOperands(Inst, N, 8);
- }
-
- void addMemoryIndexed32Operands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && isMemoryIndexed32() && "Invalid number of operands!");
- addMemoryIndexedOperands(Inst, N, 4);
- }
-
- void addMemoryIndexed16Operands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && isMemoryIndexed16() && "Invalid number of operands!");
- addMemoryIndexedOperands(Inst, N, 2);
- }
-
- void addMemoryIndexed8Operands(MCInst &Inst, unsigned N) const {
- assert(N == 2 && isMemoryIndexed8() && "Invalid number of operands!");
- addMemoryIndexedOperands(Inst, N, 1);
- }
-
- void addMemoryNoIndexOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && isMemoryNoIndex() && "Invalid number of operands!");
- // Add the base register operand (the offset is always zero, so ignore it).
- Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
- }
-
- void addMemorySIMDNoIndexOperands(MCInst &Inst, unsigned N) const {
- assert(N == 1 && isMemorySIMDNoIndex() && "Invalid number of operands!");
- // Add the base register operand (the offset is always zero, so ignore it).
- Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
- }
-
- void addMemoryWritebackIndexedOperands(MCInst &Inst, unsigned N,
- unsigned Scale) const {
- assert(N == 2 && "Invalid number of operands!");
-
- // Add the base register operand.
- Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
-
- // Add the offset operand.
- int64_t Offset = 0;
- if (Mem.OffsetImm) {
- const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Mem.OffsetImm);
- assert(CE && "Non-constant indexed offset operand!");
- Offset = CE->getValue();
- }
-
- if (Scale != 1) {
- assert(Offset % Scale == 0 &&
- "Offset operand must be a multiple of the scale!");
- Offset /= Scale;
- }
-
- Inst.addOperand(MCOperand::CreateImm(Offset));
- }
-
- void addMemoryIndexedSImm9Operands(MCInst &Inst, unsigned N) const {
- addMemoryWritebackIndexedOperands(Inst, N, 1);
- }
-
- void addMemoryIndexed32SImm7Operands(MCInst &Inst, unsigned N) const {
- addMemoryWritebackIndexedOperands(Inst, N, 4);
- }
-
- void addMemoryIndexed64SImm7Operands(MCInst &Inst, unsigned N) const {
- addMemoryWritebackIndexedOperands(Inst, N, 8);
- }
-
- void addMemoryIndexed128SImm7Operands(MCInst &Inst, unsigned N) const {
- addMemoryWritebackIndexedOperands(Inst, N, 16);
+ const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
+ uint64_t Value = CE->getValue();
+ Inst.addOperand(MCOperand::CreateImm((~Value >> Shift) & 0xffff));
}
void print(raw_ostream &OS) const override;
return Op;
}
+ static ARM64Operand *CreateShiftedImm(const MCExpr *Val, unsigned ShiftAmount,
+ SMLoc S, SMLoc E, MCContext &Ctx) {
+ ARM64Operand *Op = new ARM64Operand(k_ShiftedImm, Ctx);
+ Op->ShiftedImm .Val = Val;
+ Op->ShiftedImm.ShiftAmount = ShiftAmount;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static ARM64Operand *CreateCondCode(ARM64CC::CondCode Code, SMLoc S, SMLoc E,
+ MCContext &Ctx) {
+ ARM64Operand *Op = new ARM64Operand(k_CondCode, Ctx);
+ Op->CondCode.Code = Code;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
static ARM64Operand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
ARM64Operand *Op = new ARM64Operand(k_FPImm, Ctx);
Op->FPImm.Val = Val;
return Op;
}
- static ARM64Operand *CreateMem(unsigned BaseRegNum, const MCExpr *Off,
- SMLoc S, SMLoc E, SMLoc OffsetLoc,
- MCContext &Ctx) {
- ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
- Op->Mem.BaseRegNum = BaseRegNum;
- Op->Mem.OffsetRegNum = 0;
- Op->Mem.OffsetImm = Off;
- Op->Mem.ExtType = ARM64_AM::UXTX;
- Op->Mem.ShiftVal = 0;
- Op->Mem.ExplicitShift = false;
- Op->Mem.Mode = ImmediateOffset;
- Op->OffsetLoc = OffsetLoc;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static ARM64Operand *CreateRegOffsetMem(unsigned BaseReg, unsigned OffsetReg,
- ARM64_AM::ExtendType ExtType,
- unsigned ShiftVal, bool ExplicitShift,
- SMLoc S, SMLoc E, MCContext &Ctx) {
- ARM64Operand *Op = new ARM64Operand(k_Memory, Ctx);
- Op->Mem.BaseRegNum = BaseReg;
- Op->Mem.OffsetRegNum = OffsetReg;
- Op->Mem.OffsetImm = nullptr;
- Op->Mem.ExtType = ExtType;
- Op->Mem.ShiftVal = ShiftVal;
- Op->Mem.ExplicitShift = ExplicitShift;
- Op->Mem.Mode = RegisterOffset;
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
static ARM64Operand *CreateSysCR(unsigned Val, SMLoc S, SMLoc E,
MCContext &Ctx) {
ARM64Operand *Op = new ARM64Operand(k_SysCR, Ctx);
return Op;
}
- static ARM64Operand *CreateShifter(ARM64_AM::ShiftType ShOp, unsigned Val,
- SMLoc S, SMLoc E, MCContext &Ctx) {
- ARM64Operand *Op = new ARM64Operand(k_Shifter, Ctx);
- Op->Shifter.Val = ARM64_AM::getShifterImm(ShOp, Val);
- Op->StartLoc = S;
- Op->EndLoc = E;
- return Op;
- }
-
- static ARM64Operand *CreateExtend(ARM64_AM::ExtendType ExtOp, unsigned Val,
- SMLoc S, SMLoc E, MCContext &Ctx) {
- ARM64Operand *Op = new ARM64Operand(k_Extend, Ctx);
- Op->Extend.Val = ARM64_AM::getArithExtendImm(ExtOp, Val);
+ static ARM64Operand *CreateShiftExtend(ARM64_AM::ShiftExtendType ShOp,
+ unsigned Val, bool HasExplicitAmount,
+ SMLoc S, SMLoc E, MCContext &Ctx) {
+ ARM64Operand *Op = new ARM64Operand(k_ShiftExtend, Ctx);
+ Op->ShiftExtend.Type = ShOp;
+ Op->ShiftExtend.Amount = Val;
+ Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
Op->StartLoc = S;
Op->EndLoc = E;
return Op;
case k_Immediate:
getImm()->print(OS);
break;
- case k_Memory:
- OS << "<memory>";
+ case k_ShiftedImm: {
+ unsigned Shift = getShiftedImmShift();
+ OS << "<shiftedimm ";
+ getShiftedImmVal()->print(OS);
+ OS << ", lsl #" << ARM64_AM::getShiftValue(Shift) << ">";
+ break;
+ }
+ case k_CondCode:
+ OS << "<condcode " << getCondCode() << ">";
break;
case k_Register:
OS << "<register " << getReg() << ">";
OS << "<prfop invalid #" << getPrefetch() << ">";
break;
}
- case k_Shifter: {
- unsigned Val = getShifter();
- OS << "<" << ARM64_AM::getShiftName(ARM64_AM::getShiftType(Val)) << " #"
- << ARM64_AM::getShiftValue(Val) << ">";
- break;
- }
- case k_Extend: {
- unsigned Val = getExtend();
- OS << "<" << ARM64_AM::getExtendName(ARM64_AM::getArithExtendType(Val))
- << " #" << ARM64_AM::getArithShiftValue(Val) << ">";
+ case k_ShiftExtend: {
+ OS << "<" << ARM64_AM::getShiftExtendName(getShiftExtendType()) << " #"
+ << getShiftExtendAmount();
+ if (!hasShiftExtendAmount())
+ OS << "<imp>";
+ OS << '>';
break;
}
}
return -1;
}
-static int MatchSysCRName(StringRef Name) {
- // Use the same layout as the tablegen'erated register name matcher. Ugly,
- // but efficient.
- switch (Name.size()) {
- default:
- break;
- case 2:
- if (Name[0] != 'c' && Name[0] != 'C')
- return -1;
- switch (Name[1]) {
- default:
- return -1;
- case '0':
- return 0;
- case '1':
- return 1;
- case '2':
- return 2;
- case '3':
- return 3;
- case '4':
- return 4;
- case '5':
- return 5;
- case '6':
- return 6;
- case '7':
- return 7;
- case '8':
- return 8;
- case '9':
- return 9;
- }
- break;
- case 3:
- if ((Name[0] != 'c' && Name[0] != 'C') || Name[1] != '1')
- return -1;
- switch (Name[2]) {
- default:
- return -1;
- case '0':
- return 10;
- case '1':
- return 11;
- case '2':
- return 12;
- case '3':
- return 13;
- case '4':
- return 14;
- case '5':
- return 15;
- }
- break;
- }
-
- llvm_unreachable("Unhandled SysCR operand string!");
- return -1;
-}
-
/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
ARM64AsmParser::OperandMatchResultTy
ARM64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
SMLoc S = getLoc();
- const AsmToken &Tok = Parser.getTok();
- if (Tok.isNot(AsmToken::Identifier))
- return MatchOperand_NoMatch;
- int Num = MatchSysCRName(Tok.getString());
- if (Num == -1)
- return MatchOperand_NoMatch;
+ if (Parser.getTok().isNot(AsmToken::Identifier)) {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
+
+ StringRef Tok = Parser.getTok().getIdentifier();
+ if (Tok[0] != 'c' && Tok[0] != 'C') {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
+
+ uint32_t CRNum;
+ bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
+ if (BadNum || CRNum > 15) {
+ Error(S, "Expected cN operand where 0 <= N <= 15");
+ return MatchOperand_ParseFail;
+ }
Parser.Lex(); // Eat identifier token.
- Operands.push_back(ARM64Operand::CreateSysCR(Num, S, getLoc(), getContext()));
+ Operands.push_back(ARM64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
return MatchOperand_Success;
}
// as we handle that special case in post-processing before matching in
// order to use the zero register for it.
if (Val == -1 && !RealVal.isZero()) {
- TokError("floating point value out of range");
+ TokError("expected compatible register or floating-point constant");
return MatchOperand_ParseFail;
}
Operands.push_back(ARM64Operand::CreateFPImm(Val, S, getContext()));
return MatchOperand_ParseFail;
}
+/// tryParseAddSubImm - Parse ADD/SUB shifted immediate operand
+ARM64AsmParser::OperandMatchResultTy
+ARM64AsmParser::tryParseAddSubImm(OperandVector &Operands) {
+ SMLoc S = getLoc();
+
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat '#'
+ else if (Parser.getTok().isNot(AsmToken::Integer))
+ // Operand should start from # or should be integer, emit error otherwise.
+ return MatchOperand_NoMatch;
+
+ const MCExpr *Imm;
+ if (parseSymbolicImmVal(Imm))
+ return MatchOperand_ParseFail;
+ else if (Parser.getTok().isNot(AsmToken::Comma)) {
+ uint64_t ShiftAmount = 0;
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(Imm);
+ if (MCE) {
+ int64_t Val = MCE->getValue();
+ if (Val > 0xfff && (Val & 0xfff) == 0) {
+ Imm = MCConstantExpr::Create(Val >> 12, getContext());
+ ShiftAmount = 12;
+ }
+ }
+ SMLoc E = Parser.getTok().getLoc();
+ Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount, S, E,
+ getContext()));
+ return MatchOperand_Success;
+ }
+
+ // Eat ','
+ Parser.Lex();
+
+ // The optional operand must be "lsl #N" where N is non-negative.
+ if (!Parser.getTok().is(AsmToken::Identifier) ||
+ !Parser.getTok().getIdentifier().equals_lower("lsl")) {
+ Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
+ return MatchOperand_ParseFail;
+ }
+
+ // Eat 'lsl'
+ Parser.Lex();
+
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex();
+ }
+
+ if (Parser.getTok().isNot(AsmToken::Integer)) {
+ Error(Parser.getTok().getLoc(), "only 'lsl #+N' valid after immediate");
+ return MatchOperand_ParseFail;
+ }
+
+ int64_t ShiftAmount = Parser.getTok().getIntVal();
+
+ if (ShiftAmount < 0) {
+ Error(Parser.getTok().getLoc(), "positive shift amount required");
+ return MatchOperand_ParseFail;
+ }
+ Parser.Lex(); // Eat the number
+
+ SMLoc E = Parser.getTok().getLoc();
+ Operands.push_back(ARM64Operand::CreateShiftedImm(Imm, ShiftAmount,
+ S, E, getContext()));
+ return MatchOperand_Success;
+}
+
/// parseCondCodeString - Parse a Condition Code string.
-unsigned ARM64AsmParser::parseCondCodeString(StringRef Cond) {
- unsigned CC = StringSwitch<unsigned>(Cond.lower())
+ARM64CC::CondCode ARM64AsmParser::parseCondCodeString(StringRef Cond) {
+ ARM64CC::CondCode CC = StringSwitch<ARM64CC::CondCode>(Cond.lower())
.Case("eq", ARM64CC::EQ)
.Case("ne", ARM64CC::NE)
.Case("cs", ARM64CC::HS)
assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
StringRef Cond = Tok.getString();
- unsigned CC = parseCondCodeString(Cond);
+ ARM64CC::CondCode CC = parseCondCodeString(Cond);
if (CC == ARM64CC::Invalid)
return TokError("invalid condition code");
Parser.Lex(); // Eat identifier token.
if (invertCondCode)
CC = ARM64CC::getInvertedCondCode(ARM64CC::CondCode(CC));
- const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
Operands.push_back(
- ARM64Operand::CreateImm(CCExpr, S, getLoc(), getContext()));
+ ARM64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
return false;
}
-/// ParseOptionalShift - Some operands take an optional shift argument. Parse
+/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
/// them if present.
-bool ARM64AsmParser::parseOptionalShift(OperandVector &Operands) {
- const AsmToken &Tok = Parser.getTok();
- ARM64_AM::ShiftType ShOp = StringSwitch<ARM64_AM::ShiftType>(Tok.getString())
- .Case("lsl", ARM64_AM::LSL)
- .Case("lsr", ARM64_AM::LSR)
- .Case("asr", ARM64_AM::ASR)
- .Case("ror", ARM64_AM::ROR)
- .Case("msl", ARM64_AM::MSL)
- .Case("LSL", ARM64_AM::LSL)
- .Case("LSR", ARM64_AM::LSR)
- .Case("ASR", ARM64_AM::ASR)
- .Case("ROR", ARM64_AM::ROR)
- .Case("MSL", ARM64_AM::MSL)
- .Default(ARM64_AM::InvalidShift);
- if (ShOp == ARM64_AM::InvalidShift)
- return true;
-
- SMLoc S = Tok.getLoc();
- Parser.Lex();
-
- // We expect a number here.
- bool Hash = getLexer().is(AsmToken::Hash);
- if (!Hash && getLexer().isNot(AsmToken::Integer))
- return TokError("immediate value expected for shifter operand");
-
- if (Hash)
- Parser.Lex(); // Eat the '#'.
-
- SMLoc ExprLoc = getLoc();
- const MCExpr *ImmVal;
- if (getParser().parseExpression(ImmVal))
- return true;
-
- const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!MCE)
- return TokError("immediate value expected for shifter operand");
-
- if ((MCE->getValue() & 0x3f) != MCE->getValue())
- return Error(ExprLoc, "immediate value too large for shifter operand");
-
- SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
- Operands.push_back(
- ARM64Operand::CreateShifter(ShOp, MCE->getValue(), S, E, getContext()));
- return false;
-}
-
-/// parseOptionalExtend - Some operands take an optional extend argument. Parse
-/// them if present.
-bool ARM64AsmParser::parseOptionalExtend(OperandVector &Operands) {
+ARM64AsmParser::OperandMatchResultTy
+ARM64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
- ARM64_AM::ExtendType ExtOp =
- StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
+ std::string LowerID = Tok.getString().lower();
+ ARM64_AM::ShiftExtendType ShOp =
+ StringSwitch<ARM64_AM::ShiftExtendType>(LowerID)
+ .Case("lsl", ARM64_AM::LSL)
+ .Case("lsr", ARM64_AM::LSR)
+ .Case("asr", ARM64_AM::ASR)
+ .Case("ror", ARM64_AM::ROR)
+ .Case("msl", ARM64_AM::MSL)
.Case("uxtb", ARM64_AM::UXTB)
.Case("uxth", ARM64_AM::UXTH)
.Case("uxtw", ARM64_AM::UXTW)
.Case("uxtx", ARM64_AM::UXTX)
- .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
.Case("sxtb", ARM64_AM::SXTB)
.Case("sxth", ARM64_AM::SXTH)
.Case("sxtw", ARM64_AM::SXTW)
.Case("sxtx", ARM64_AM::SXTX)
- .Case("UXTB", ARM64_AM::UXTB)
- .Case("UXTH", ARM64_AM::UXTH)
- .Case("UXTW", ARM64_AM::UXTW)
- .Case("UXTX", ARM64_AM::UXTX)
- .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
- .Case("SXTB", ARM64_AM::SXTB)
- .Case("SXTH", ARM64_AM::SXTH)
- .Case("SXTW", ARM64_AM::SXTW)
- .Case("SXTX", ARM64_AM::SXTX)
- .Default(ARM64_AM::InvalidExtend);
- if (ExtOp == ARM64_AM::InvalidExtend)
- return true;
+ .Default(ARM64_AM::InvalidShiftExtend);
+
+ if (ShOp == ARM64_AM::InvalidShiftExtend)
+ return MatchOperand_NoMatch;
SMLoc S = Tok.getLoc();
Parser.Lex();
- if (getLexer().is(AsmToken::EndOfStatement) ||
- getLexer().is(AsmToken::Comma)) {
- SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
- Operands.push_back(
- ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
- return false;
- }
-
bool Hash = getLexer().is(AsmToken::Hash);
if (!Hash && getLexer().isNot(AsmToken::Integer)) {
+ if (ShOp == ARM64_AM::LSL || ShOp == ARM64_AM::LSR ||
+ ShOp == ARM64_AM::ASR || ShOp == ARM64_AM::ROR ||
+ ShOp == ARM64_AM::MSL) {
+ // We expect a number here.
+ TokError("expected #imm after shift specifier");
+ return MatchOperand_ParseFail;
+ }
+
+ // "extend" type operatoins don't need an immediate, #0 is implicit.
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(
- ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
- return false;
+ ARM64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
+ return MatchOperand_Success;
}
if (Hash)
Parser.Lex(); // Eat the '#'.
+ // Make sure we do actually have a number
+ if (!Parser.getTok().is(AsmToken::Integer)) {
+ Error(Parser.getTok().getLoc(),
+ "expected integer shift amount");
+ return MatchOperand_ParseFail;
+ }
+
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
- return true;
+ return MatchOperand_ParseFail;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!MCE)
- return TokError("immediate value expected for extend operand");
+ if (!MCE) {
+ TokError("expected #imm after shift specifier");
+ return MatchOperand_ParseFail;
+ }
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
- Operands.push_back(
- ARM64Operand::CreateExtend(ExtOp, MCE->getValue(), S, E, getContext()));
- return false;
+ Operands.push_back(ARM64Operand::CreateShiftExtend(ShOp, MCE->getValue(),
+ true, S, E, getContext()));
+ return MatchOperand_Success;
}
/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
return false;
}
-/// tryParseNoIndexMemory - Custom parser method for memory operands that
-/// do not allow base regisrer writeback modes,
-/// or those that handle writeback separately from
-/// the memory operand (like the AdvSIMD ldX/stX
-/// instructions.
-ARM64AsmParser::OperandMatchResultTy
-ARM64AsmParser::tryParseNoIndexMemory(OperandVector &Operands) {
- if (Parser.getTok().isNot(AsmToken::LBrac))
- return MatchOperand_NoMatch;
- SMLoc S = getLoc();
- Parser.Lex(); // Eat left bracket token.
-
- const AsmToken &BaseRegTok = Parser.getTok();
- if (BaseRegTok.isNot(AsmToken::Identifier)) {
- Error(BaseRegTok.getLoc(), "register expected");
- return MatchOperand_ParseFail;
- }
-
- int64_t Reg = tryParseRegister();
- if (Reg == -1) {
- Error(BaseRegTok.getLoc(), "register expected");
- return MatchOperand_ParseFail;
- }
-
- SMLoc E = getLoc();
- if (Parser.getTok().isNot(AsmToken::RBrac)) {
- Error(E, "']' expected");
- return MatchOperand_ParseFail;
- }
-
- Parser.Lex(); // Eat right bracket token.
-
- Operands.push_back(ARM64Operand::CreateMem(Reg, nullptr, S, E, E, getContext()));
- return MatchOperand_Success;
-}
-
-/// parseMemory - Parse a memory operand for a basic load/store instruction.
-bool ARM64AsmParser::parseMemory(OperandVector &Operands) {
- assert(Parser.getTok().is(AsmToken::LBrac) && "Token is not a Left Bracket");
- SMLoc S = getLoc();
- Parser.Lex(); // Eat left bracket token.
-
- const AsmToken &BaseRegTok = Parser.getTok();
- if (BaseRegTok.isNot(AsmToken::Identifier))
- return Error(BaseRegTok.getLoc(), "register expected");
-
- int64_t Reg = tryParseRegister();
- if (Reg == -1)
- return Error(BaseRegTok.getLoc(), "register expected");
-
- // If there is an offset expression, parse it.
- const MCExpr *OffsetExpr = nullptr;
- SMLoc OffsetLoc;
- if (Parser.getTok().is(AsmToken::Comma)) {
- Parser.Lex(); // Eat the comma.
- OffsetLoc = getLoc();
-
- // Register offset
- const AsmToken &OffsetRegTok = Parser.getTok();
- int Reg2 = OffsetRegTok.is(AsmToken::Identifier) ? tryParseRegister() : -1;
- if (Reg2 != -1) {
- // Default shift is LSL, with an omitted shift. We use the third bit of
- // the extend value to indicate presence/omission of the immediate offset.
- ARM64_AM::ExtendType ExtOp = ARM64_AM::UXTX;
- int64_t ShiftVal = 0;
- bool ExplicitShift = false;
-
- if (Parser.getTok().is(AsmToken::Comma)) {
- // Embedded extend operand.
- Parser.Lex(); // Eat the comma
-
- SMLoc ExtLoc = getLoc();
- const AsmToken &Tok = Parser.getTok();
- ExtOp = StringSwitch<ARM64_AM::ExtendType>(Tok.getString())
- .Case("uxtw", ARM64_AM::UXTW)
- .Case("lsl", ARM64_AM::UXTX) // Alias for UXTX
- .Case("sxtw", ARM64_AM::SXTW)
- .Case("sxtx", ARM64_AM::SXTX)
- .Case("UXTW", ARM64_AM::UXTW)
- .Case("LSL", ARM64_AM::UXTX) // Alias for UXTX
- .Case("SXTW", ARM64_AM::SXTW)
- .Case("SXTX", ARM64_AM::SXTX)
- .Default(ARM64_AM::InvalidExtend);
- if (ExtOp == ARM64_AM::InvalidExtend)
- return Error(ExtLoc, "expected valid extend operation");
-
- Parser.Lex(); // Eat the extend op.
-
- // A 32-bit offset register is only valid for [SU]/XTW extend
- // operators.
- if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
- if (ExtOp != ARM64_AM::UXTW &&
- ExtOp != ARM64_AM::SXTW)
- return Error(ExtLoc, "32-bit general purpose offset register "
- "requires sxtw or uxtw extend");
- } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
- Reg2))
- return Error(OffsetLoc,
- "64-bit general purpose offset register expected");
-
- bool Hash = getLexer().is(AsmToken::Hash);
- if (getLexer().is(AsmToken::RBrac)) {
- // No immediate operand.
- if (ExtOp == ARM64_AM::UXTX)
- return Error(ExtLoc, "LSL extend requires immediate operand");
- } else if (Hash || getLexer().is(AsmToken::Integer)) {
- // Immediate operand.
- if (Hash)
- Parser.Lex(); // Eat the '#'
- const MCExpr *ImmVal;
- SMLoc ExprLoc = getLoc();
- if (getParser().parseExpression(ImmVal))
- return true;
- const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
- if (!MCE)
- return TokError("immediate value expected for extend operand");
-
- ExplicitShift = true;
- ShiftVal = MCE->getValue();
- if (ShiftVal < 0 || ShiftVal > 4)
- return Error(ExprLoc, "immediate operand out of range");
- } else
- return Error(getLoc(), "expected immediate operand");
- }
-
- if (Parser.getTok().isNot(AsmToken::RBrac))
- return Error(getLoc(), "']' expected");
-
- Parser.Lex(); // Eat right bracket token.
-
- SMLoc E = getLoc();
- Operands.push_back(ARM64Operand::CreateRegOffsetMem(
- Reg, Reg2, ExtOp, ShiftVal, ExplicitShift, S, E, getContext()));
- return false;
-
- // Immediate expressions.
- } else if (Parser.getTok().is(AsmToken::Hash) ||
- Parser.getTok().is(AsmToken::Colon) ||
- Parser.getTok().is(AsmToken::Integer)) {
- if (Parser.getTok().is(AsmToken::Hash))
- Parser.Lex(); // Eat hash token.
-
- if (parseSymbolicImmVal(OffsetExpr))
- return true;
- } else {
- // FIXME: We really should make sure that we're dealing with a LDR/STR
- // instruction that can legally have a symbolic expression here.
- // Symbol reference.
- if (Parser.getTok().isNot(AsmToken::Identifier) &&
- Parser.getTok().isNot(AsmToken::String))
- return Error(getLoc(), "identifier or immediate expression expected");
- if (getParser().parseExpression(OffsetExpr))
- return true;
- // If this is a plain ref, Make sure a legal variant kind was specified.
- // Otherwise, it's a more complicated expression and we have to just
- // assume it's OK and let the relocation stuff puke if it's not.
- ARM64MCExpr::VariantKind ELFRefKind;
- MCSymbolRefExpr::VariantKind DarwinRefKind;
- int64_t Addend;
- if (classifySymbolRef(OffsetExpr, ELFRefKind, DarwinRefKind, Addend) &&
- Addend == 0) {
- assert(ELFRefKind == ARM64MCExpr::VK_INVALID &&
- "ELF symbol modifiers not supported here yet");
-
- switch (DarwinRefKind) {
- default:
- return Error(getLoc(), "expected @pageoff or @gotpageoff modifier");
- case MCSymbolRefExpr::VK_GOTPAGEOFF:
- case MCSymbolRefExpr::VK_PAGEOFF:
- case MCSymbolRefExpr::VK_TLVPPAGEOFF:
- // These are what we're expecting.
- break;
- }
- }
- }
- }
-
- SMLoc E = getLoc();
- if (Parser.getTok().isNot(AsmToken::RBrac))
- return Error(E, "']' expected");
-
- Parser.Lex(); // Eat right bracket token.
-
- // Create the memory operand.
- Operands.push_back(
- ARM64Operand::CreateMem(Reg, OffsetExpr, S, E, OffsetLoc, getContext()));
-
- // Check for a '!', indicating pre-indexed addressing with writeback.
- if (Parser.getTok().is(AsmToken::Exclaim)) {
- // There needs to have been an immediate or wback doesn't make sense.
- if (!OffsetExpr)
- return Error(E, "missing offset for pre-indexed addressing");
- // Pre-indexed with writeback must have a constant expression for the
- // offset. FIXME: Theoretically, we'd like to allow fixups so long
- // as they don't require a relocation.
- if (!isa<MCConstantExpr>(OffsetExpr))
- return Error(OffsetLoc, "constant immediate expression expected");
-
- // Create the Token operand for the '!'.
- Operands.push_back(ARM64Operand::CreateToken(
- "!", false, Parser.getTok().getLoc(), getContext()));
- Parser.Lex(); // Eat the '!' token.
- }
-
- return false;
-}
-
bool ARM64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
bool HasELFModifier = false;
ARM64MCExpr::VariantKind RefKind;
}
}
- if (Parser.getTok().is(AsmToken::EndOfStatement))
- Error(getLoc(), "'}' expected");
+ if (Parser.getTok().isNot(AsmToken::RCurly))
+ return Error(getLoc(), "'}' expected");
Parser.Lex(); // Eat the '}' token.
+ if (Count > 4)
+ return Error(S, "invalid number of vectors");
+
unsigned NumElements = 0;
char ElementKind = 0;
if (!Kind.empty())
return false;
}
+ARM64AsmParser::OperandMatchResultTy
+ARM64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
+ const AsmToken &Tok = Parser.getTok();
+ if (!Tok.is(AsmToken::Identifier))
+ return MatchOperand_NoMatch;
+
+ unsigned RegNum = MatchRegisterName(Tok.getString().lower());
+
+ MCContext &Ctx = getContext();
+ const MCRegisterInfo *RI = Ctx.getRegisterInfo();
+ if (!RI->getRegClass(ARM64::GPR64spRegClassID).contains(RegNum))
+ return MatchOperand_NoMatch;
+
+ SMLoc S = getLoc();
+ Parser.Lex(); // Eat register
+
+ if (Parser.getTok().isNot(AsmToken::Comma)) {
+ Operands.push_back(ARM64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
+ return MatchOperand_Success;
+ }
+ Parser.Lex(); // Eat comma.
+
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat hash
+
+ if (Parser.getTok().isNot(AsmToken::Integer)) {
+ Error(getLoc(), "index must be absent or #0");
+ return MatchOperand_ParseFail;
+ }
+
+ const MCExpr *ImmVal;
+ if (Parser.parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
+ cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
+ Error(getLoc(), "index must be absent or #0");
+ return MatchOperand_ParseFail;
+ }
+
+ Operands.push_back(ARM64Operand::CreateReg(RegNum, false, S, getLoc(), Ctx));
+ return MatchOperand_Success;
+}
+
/// parseOperand - Parse a arm instruction operand. For now this parses the
/// operand regardless of the mnemonic.
bool ARM64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
return false;
}
- case AsmToken::LBrac:
- return parseMemory(Operands);
+ case AsmToken::LBrac: {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(ARM64Operand::CreateToken("[", false, Loc,
+ getContext()));
+ Parser.Lex(); // Eat '['
+
+ // There's no comma after a '[', so we can parse the next operand
+ // immediately.
+ return parseOperand(Operands, false, false);
+ }
case AsmToken::LCurly:
return parseVectorList(Operands);
case AsmToken::Identifier: {
if (!parseRegister(Operands))
return false;
- // This could be an optional "shift" operand.
- if (!parseOptionalShift(Operands))
- return false;
-
- // Or maybe it could be an optional "extend" operand.
- if (!parseOptionalExtend(Operands))
- return false;
+ // This could be an optional "shift" or "extend" operand.
+ OperandMatchResultTy GotShift = tryParseOptionalShiftExtend(Operands);
+ // We can only continue if no tokens were eaten.
+ if (GotShift != MatchOperand_NoMatch)
+ return GotShift;
// This was not a register so parse other operands that start with an
// identifier (like labels) as expressions and create them as immediates.
if (getLexer().is(AsmToken::Hash))
Parser.Lex();
+ // Parse a negative sign
+ bool isNegative = false;
+ if (Parser.getTok().is(AsmToken::Minus)) {
+ isNegative = true;
+ // We need to consume this token only when we have a Real, otherwise
+ // we let parseSymbolicImmVal take care of it
+ if (Parser.getLexer().peekTok().is(AsmToken::Real))
+ Parser.Lex();
+ }
+
// The only Real that should come through here is a literal #0.0 for
// the fcmp[e] r, #0.0 instructions. They expect raw token operands,
// so convert the value.
if (Tok.is(AsmToken::Real)) {
APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
- if (IntVal != 0 ||
- (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
- Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
- Mnemonic != "fcmlt"))
+ if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
+ Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
+ Mnemonic != "fcmlt")
return TokError("unexpected floating point literal");
+ else if (IntVal != 0 || isNegative)
+ return TokError("expected floating-point constant #0.0");
Parser.Lex(); // Eat the token.
Operands.push_back(
StringRef Head = Name.slice(Start, Next);
// IC, DC, AT, and TLBI instructions are aliases for the SYS instruction.
- if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi")
- return parseSysAlias(Head, NameLoc, Operands);
+ if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi") {
+ bool IsError = parseSysAlias(Head, NameLoc, Operands);
+ if (IsError && getLexer().isNot(AsmToken::EndOfStatement))
+ Parser.eatToEndOfStatement();
+ return IsError;
+ }
Operands.push_back(
ARM64Operand::CreateToken(Head, false, NameLoc, getContext()));
SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
(Head.data() - Name.data()));
- unsigned CC = parseCondCodeString(Head);
+ ARM64CC::CondCode CC = parseCondCodeString(Head);
if (CC == ARM64CC::Invalid)
return Error(SuffixLoc, "invalid condition code");
- const MCExpr *CCExpr = MCConstantExpr::Create(CC, getContext());
Operands.push_back(
- ARM64Operand::CreateImm(CCExpr, NameLoc, NameLoc, getContext()));
+ ARM64Operand::CreateToken(".", true, SuffixLoc, getContext()));
+ Operands.push_back(
+ ARM64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
}
// Add the remaining tokens in the mnemonic.
return true;
}
+ // After successfully parsing some operands there are two special cases to
+ // consider (i.e. notional operands not separated by commas). Both are due
+ // to memory specifiers:
+ // + An RBrac will end an address for load/store/prefetch
+ // + An '!' will indicate a pre-indexed operation.
+ //
+ // It's someone else's responsibility to make sure these tokens are sane
+ // in the given context!
+ if (Parser.getTok().is(AsmToken::RBrac)) {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(ARM64Operand::CreateToken("]", false, Loc,
+ getContext()));
+ Parser.Lex();
+ }
+
+ if (Parser.getTok().is(AsmToken::Exclaim)) {
+ SMLoc Loc = Parser.getTok().getLoc();
+ Operands.push_back(ARM64Operand::CreateToken("!", false, Loc,
+ getContext()));
+ Parser.Lex();
+ }
+
++N;
}
}
case ARM64::LDPWpre:
case ARM64::LDPXpost:
case ARM64::LDPXpre: {
- unsigned Rt = Inst.getOperand(0).getReg();
- unsigned Rt2 = Inst.getOperand(1).getReg();
- unsigned Rn = Inst.getOperand(2).getReg();
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ unsigned Rn = Inst.getOperand(3).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable LDP instruction, writeback base "
"is also a destination");
"is also a destination");
// FALLTHROUGH
}
- case ARM64::LDPDpost:
- case ARM64::LDPDpre:
- case ARM64::LDPQpost:
- case ARM64::LDPQpre:
- case ARM64::LDPSpost:
- case ARM64::LDPSpre:
- case ARM64::LDPSWpost:
case ARM64::LDPDi:
case ARM64::LDPQi:
case ARM64::LDPSi:
return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
break;
}
+ case ARM64::LDPDpost:
+ case ARM64::LDPDpre:
+ case ARM64::LDPQpost:
+ case ARM64::LDPQpre:
+ case ARM64::LDPSpost:
+ case ARM64::LDPSpre:
+ case ARM64::LDPSWpost: {
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ if (Rt == Rt2)
+ return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
+ break;
+ }
case ARM64::STPDpost:
case ARM64::STPDpre:
case ARM64::STPQpost:
case ARM64::STPWpre:
case ARM64::STPXpost:
case ARM64::STPXpre: {
- unsigned Rt = Inst.getOperand(0).getReg();
- unsigned Rt2 = Inst.getOperand(1).getReg();
- unsigned Rn = Inst.getOperand(2).getReg();
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rt2 = Inst.getOperand(2).getReg();
+ unsigned Rn = Inst.getOperand(3).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable STP instruction, writeback base "
"is also a source");
case ARM64::LDRSWpost:
case ARM64::LDRWpost:
case ARM64::LDRXpost: {
- unsigned Rt = Inst.getOperand(0).getReg();
- unsigned Rn = Inst.getOperand(1).getReg();
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rn = Inst.getOperand(2).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable LDR instruction, writeback base "
"is also a source");
case ARM64::STRHpre:
case ARM64::STRWpre:
case ARM64::STRXpre: {
- unsigned Rt = Inst.getOperand(0).getReg();
- unsigned Rn = Inst.getOperand(1).getReg();
+ unsigned Rt = Inst.getOperand(1).getReg();
+ unsigned Rn = Inst.getOperand(2).getReg();
if (RI->isSubRegisterEq(Rn, Rt))
return Error(Loc[0], "unpredictable STR instruction, writeback base "
"is also a source");
// in the instructions being checked and this keeps the nested conditionals
// to a minimum.
switch (Inst.getOpcode()) {
- case ARM64::ANDWrs:
- case ARM64::ANDSWrs:
- case ARM64::EORWrs:
- case ARM64::ORRWrs: {
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[3], "immediate value expected");
- int64_t shifter = Inst.getOperand(3).getImm();
- ARM64_AM::ShiftType ST = ARM64_AM::getShiftType(shifter);
- if (ST == ARM64_AM::LSL && shifter > 31)
- return Error(Loc[3], "shift value out of range");
- return false;
- }
case ARM64::ADDSWri:
case ARM64::ADDSXri:
case ARM64::ADDWri:
case ARM64::SUBSXri:
case ARM64::SUBWri:
case ARM64::SUBXri: {
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[3], "immediate value expected");
- int64_t shifter = Inst.getOperand(3).getImm();
- if (shifter != 0 && shifter != 12)
- return Error(Loc[3], "shift value out of range");
- // The imm12 operand can be an expression. Validate that it's legit.
- // FIXME: We really, really want to allow arbitrary expressions here
- // and resolve the value and validate the result at fixup time, but
- // that's hard as we have long since lost any source information we
- // need to generate good diagnostics by that point.
- if ((Inst.getOpcode() == ARM64::ADDXri ||
- Inst.getOpcode() == ARM64::ADDWri) &&
- Inst.getOperand(2).isExpr()) {
+ // Annoyingly we can't do this in the isAddSubImm predicate, so there is
+ // some slight duplication here.
+ if (Inst.getOperand(2).isExpr()) {
const MCExpr *Expr = Inst.getOperand(2).getExpr();
ARM64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
return Error(Loc[2], "invalid immediate expression");
}
- // Note that we don't range-check the addend. It's adjusted modulo page
- // size when converted, so there is no "out of range" condition when using
- // @pageoff. Any validity checking for the value was done in the is*()
- // predicate function.
+ // Only allow these with ADDXri.
if ((DarwinRefKind == MCSymbolRefExpr::VK_PAGEOFF ||
- DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGEOFF) &&
Inst.getOpcode() == ARM64::ADDXri)
return false;
- if (ELFRefKind == ARM64MCExpr::VK_LO12 ||
+
+ // Only allow these with ADDXri/ADDWri
+ if ((ELFRefKind == ARM64MCExpr::VK_LO12 ||
ELFRefKind == ARM64MCExpr::VK_DTPREL_HI12 ||
ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12 ||
ELFRefKind == ARM64MCExpr::VK_DTPREL_LO12_NC ||
ELFRefKind == ARM64MCExpr::VK_TPREL_HI12 ||
ELFRefKind == ARM64MCExpr::VK_TPREL_LO12 ||
ELFRefKind == ARM64MCExpr::VK_TPREL_LO12_NC ||
- ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) {
+ ELFRefKind == ARM64MCExpr::VK_TLSDESC_LO12) &&
+ (Inst.getOpcode() == ARM64::ADDXri ||
+ Inst.getOpcode() == ARM64::ADDWri))
return false;
- } else if (DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGEOFF) {
- // @gotpageoff can only be used directly, not with an addend.
- return Addend != 0;
- }
- // Otherwise, we're not sure, so don't allow it for now.
+ // Don't allow expressions in the immediate field otherwise
return Error(Loc[2], "invalid immediate expression");
}
-
- // If it's anything but an immediate, it's not legit.
- if (!Inst.getOperand(2).isImm())
- return Error(Loc[2], "invalid immediate expression");
- int64_t imm = Inst.getOperand(2).getImm();
- if (imm > 4095 || imm < 0)
- return Error(Loc[2], "immediate value out of range");
- return false;
- }
- case ARM64::LDRBpre:
- case ARM64::LDRHpre:
- case ARM64::LDRSBWpre:
- case ARM64::LDRSBXpre:
- case ARM64::LDRSHWpre:
- case ARM64::LDRSHXpre:
- case ARM64::LDRWpre:
- case ARM64::LDRXpre:
- case ARM64::LDRSpre:
- case ARM64::LDRDpre:
- case ARM64::LDRQpre:
- case ARM64::STRBpre:
- case ARM64::STRHpre:
- case ARM64::STRWpre:
- case ARM64::STRXpre:
- case ARM64::STRSpre:
- case ARM64::STRDpre:
- case ARM64::STRQpre:
- case ARM64::LDRBpost:
- case ARM64::LDRHpost:
- case ARM64::LDRSBWpost:
- case ARM64::LDRSBXpost:
- case ARM64::LDRSHWpost:
- case ARM64::LDRSHXpost:
- case ARM64::LDRWpost:
- case ARM64::LDRXpost:
- case ARM64::LDRSpost:
- case ARM64::LDRDpost:
- case ARM64::LDRQpost:
- case ARM64::STRBpost:
- case ARM64::STRHpost:
- case ARM64::STRWpost:
- case ARM64::STRXpost:
- case ARM64::STRSpost:
- case ARM64::STRDpost:
- case ARM64::STRQpost:
- case ARM64::LDTRXi:
- case ARM64::LDTRWi:
- case ARM64::LDTRHi:
- case ARM64::LDTRBi:
- case ARM64::LDTRSHWi:
- case ARM64::LDTRSHXi:
- case ARM64::LDTRSBWi:
- case ARM64::LDTRSBXi:
- case ARM64::LDTRSWi:
- case ARM64::STTRWi:
- case ARM64::STTRXi:
- case ARM64::STTRHi:
- case ARM64::STTRBi:
- case ARM64::LDURWi:
- case ARM64::LDURXi:
- case ARM64::LDURSi:
- case ARM64::LDURDi:
- case ARM64::LDURQi:
- case ARM64::LDURHi:
- case ARM64::LDURBi:
- case ARM64::LDURSHWi:
- case ARM64::LDURSHXi:
- case ARM64::LDURSBWi:
- case ARM64::LDURSBXi:
- case ARM64::LDURSWi:
- case ARM64::PRFUMi:
- case ARM64::STURWi:
- case ARM64::STURXi:
- case ARM64::STURSi:
- case ARM64::STURDi:
- case ARM64::STURQi:
- case ARM64::STURHi:
- case ARM64::STURBi: {
- // FIXME: Should accept expressions and error in fixup evaluation
- // if out of range.
- if (!Inst.getOperand(2).isImm())
- return Error(Loc[1], "immediate value expected");
- int64_t offset = Inst.getOperand(2).getImm();
- if (offset > 255 || offset < -256)
- return Error(Loc[1], "offset value out of range");
- return false;
- }
- case ARM64::LDRSro:
- case ARM64::LDRWro:
- case ARM64::LDRSWro:
- case ARM64::STRWro:
- case ARM64::STRSro: {
- // FIXME: Should accept expressions and error in fixup evaluation
- // if out of range.
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[1], "immediate value expected");
- int64_t shift = Inst.getOperand(3).getImm();
- ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
- if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
- type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
- return Error(Loc[1], "shift type invalid");
- return false;
- }
- case ARM64::LDRDro:
- case ARM64::LDRQro:
- case ARM64::LDRXro:
- case ARM64::PRFMro:
- case ARM64::STRXro:
- case ARM64::STRDro:
- case ARM64::STRQro: {
- // FIXME: Should accept expressions and error in fixup evaluation
- // if out of range.
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[1], "immediate value expected");
- int64_t shift = Inst.getOperand(3).getImm();
- ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
- if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
- type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
- return Error(Loc[1], "shift type invalid");
- return false;
- }
- case ARM64::LDRHro:
- case ARM64::LDRHHro:
- case ARM64::LDRSHWro:
- case ARM64::LDRSHXro:
- case ARM64::STRHro:
- case ARM64::STRHHro: {
- // FIXME: Should accept expressions and error in fixup evaluation
- // if out of range.
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[1], "immediate value expected");
- int64_t shift = Inst.getOperand(3).getImm();
- ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
- if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
- type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
- return Error(Loc[1], "shift type invalid");
- return false;
- }
- case ARM64::LDRBro:
- case ARM64::LDRBBro:
- case ARM64::LDRSBWro:
- case ARM64::LDRSBXro:
- case ARM64::STRBro:
- case ARM64::STRBBro: {
- // FIXME: Should accept expressions and error in fixup evaluation
- // if out of range.
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[1], "immediate value expected");
- int64_t shift = Inst.getOperand(3).getImm();
- ARM64_AM::ExtendType type = ARM64_AM::getMemExtendType(shift);
- if (type != ARM64_AM::UXTW && type != ARM64_AM::UXTX &&
- type != ARM64_AM::SXTW && type != ARM64_AM::SXTX)
- return Error(Loc[1], "shift type invalid");
- return false;
- }
- case ARM64::LDPWi:
- case ARM64::LDPXi:
- case ARM64::LDPSi:
- case ARM64::LDPDi:
- case ARM64::LDPQi:
- case ARM64::LDPSWi:
- case ARM64::STPWi:
- case ARM64::STPXi:
- case ARM64::STPSi:
- case ARM64::STPDi:
- case ARM64::STPQi:
- case ARM64::LDPWpre:
- case ARM64::LDPXpre:
- case ARM64::LDPSpre:
- case ARM64::LDPDpre:
- case ARM64::LDPQpre:
- case ARM64::LDPSWpre:
- case ARM64::STPWpre:
- case ARM64::STPXpre:
- case ARM64::STPSpre:
- case ARM64::STPDpre:
- case ARM64::STPQpre:
- case ARM64::LDPWpost:
- case ARM64::LDPXpost:
- case ARM64::LDPSpost:
- case ARM64::LDPDpost:
- case ARM64::LDPQpost:
- case ARM64::LDPSWpost:
- case ARM64::STPWpost:
- case ARM64::STPXpost:
- case ARM64::STPSpost:
- case ARM64::STPDpost:
- case ARM64::STPQpost:
- case ARM64::LDNPWi:
- case ARM64::LDNPXi:
- case ARM64::LDNPSi:
- case ARM64::LDNPDi:
- case ARM64::LDNPQi:
- case ARM64::STNPWi:
- case ARM64::STNPXi:
- case ARM64::STNPSi:
- case ARM64::STNPDi:
- case ARM64::STNPQi: {
- // FIXME: Should accept expressions and error in fixup evaluation
- // if out of range.
- if (!Inst.getOperand(3).isImm())
- return Error(Loc[2], "immediate value expected");
- int64_t offset = Inst.getOperand(3).getImm();
- if (offset > 63 || offset < -64)
- return Error(Loc[2], "offset value out of range");
return false;
}
default:
}
}
-static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
- StringRef mnemonic, uint64_t imm, unsigned shift,
- MCContext &Context) {
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
- ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- Operands[0] =
- ARM64Operand::CreateToken(mnemonic, false, Op->getStartLoc(), Context);
-
- const MCExpr *NewImm = MCConstantExpr::Create(imm >> shift, Context);
- Operands[2] = ARM64Operand::CreateImm(NewImm, Op2->getStartLoc(),
- Op2->getEndLoc(), Context);
-
- Operands.push_back(ARM64Operand::CreateShifter(
- ARM64_AM::LSL, shift, Op2->getStartLoc(), Op2->getEndLoc(), Context));
- delete Op2;
- delete Op;
-}
-
-static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
- MCContext &Context) {
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
- ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- Operands[0] =
- ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
-
- const MCExpr *Imm = MCConstantExpr::Create(0, Context);
- Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
- Op2->getEndLoc(), Context));
- Operands.push_back(ARM64Operand::CreateShifter(
- ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
-
- delete Op;
-}
-
-static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
- MCContext &Context) {
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
- ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- Operands[0] =
- ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
-
- // Operands[2] becomes Operands[3].
- Operands.push_back(Operands[2]);
- // And Operands[2] becomes ZR.
- unsigned ZeroReg = ARM64::XZR;
- if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
- Operands[2]->getReg()))
- ZeroReg = ARM64::WZR;
-
- Operands[2] =
- ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
- Op2->getEndLoc(), Context);
-
- delete Op;
-}
-
bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
switch (ErrCode) {
case Match_MissingFeature:
return Error(Loc, "invalid operand for instruction");
case Match_InvalidSuffix:
return Error(Loc, "invalid type suffix for instruction");
+ case Match_InvalidCondCode:
+ return Error(Loc, "expected AArch64 condition code");
+ case Match_AddSubRegExtendSmall:
+ return Error(Loc,
+ "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
+ case Match_AddSubRegExtendLarge:
+ return Error(Loc,
+ "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
+ case Match_AddSubSecondSource:
+ return Error(Loc,
+ "expected compatible register, symbol or integer in range [0, 4095]");
+ case Match_LogicalSecondSource:
+ return Error(Loc, "expected compatible register or logical immediate");
+ case Match_InvalidMovImm32Shift:
+ return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
+ case Match_InvalidMovImm64Shift:
+ return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
+ case Match_AddSubRegShift32:
+ return Error(Loc,
+ "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
+ case Match_AddSubRegShift64:
+ return Error(Loc,
+ "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
+ case Match_InvalidFPImm:
+ return Error(Loc,
+ "expected compatible register or floating-point constant");
case Match_InvalidMemoryIndexedSImm9:
return Error(Loc, "index must be an integer in range [-256, 255].");
- case Match_InvalidMemoryIndexed32SImm7:
+ case Match_InvalidMemoryIndexed4SImm7:
return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
- case Match_InvalidMemoryIndexed64SImm7:
+ case Match_InvalidMemoryIndexed8SImm7:
return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
- case Match_InvalidMemoryIndexed128SImm7:
+ case Match_InvalidMemoryIndexed16SImm7:
return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
- case Match_InvalidMemoryIndexed8:
+ case Match_InvalidMemoryWExtend8:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0");
+ case Match_InvalidMemoryWExtend16:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
+ case Match_InvalidMemoryWExtend32:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
+ case Match_InvalidMemoryWExtend64:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
+ case Match_InvalidMemoryWExtend128:
+ return Error(Loc,
+ "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
+ case Match_InvalidMemoryXExtend8:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0");
+ case Match_InvalidMemoryXExtend16:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
+ case Match_InvalidMemoryXExtend32:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
+ case Match_InvalidMemoryXExtend64:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
+ case Match_InvalidMemoryXExtend128:
+ return Error(Loc,
+ "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
+ case Match_InvalidMemoryIndexed1:
return Error(Loc, "index must be an integer in range [0, 4095].");
- case Match_InvalidMemoryIndexed16:
+ case Match_InvalidMemoryIndexed2:
return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
- case Match_InvalidMemoryIndexed32:
+ case Match_InvalidMemoryIndexed4:
return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
- case Match_InvalidMemoryIndexed64:
+ case Match_InvalidMemoryIndexed8:
return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
- case Match_InvalidMemoryIndexed128:
+ case Match_InvalidMemoryIndexed16:
return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
case Match_InvalidImm0_7:
return Error(Loc, "immediate must be an integer in range [0, 7].");
return Error(Loc, "immediate must be an integer in range [0, 31].");
case Match_InvalidImm0_63:
return Error(Loc, "immediate must be an integer in range [0, 63].");
+ case Match_InvalidImm0_127:
+ return Error(Loc, "immediate must be an integer in range [0, 127].");
+ case Match_InvalidImm0_65535:
+ return Error(Loc, "immediate must be an integer in range [0, 65535].");
case Match_InvalidImm1_8:
return Error(Loc, "immediate must be an integer in range [1, 8].");
case Match_InvalidImm1_16:
return Error(Loc, "immediate must be an integer in range [1, 32].");
case Match_InvalidImm1_64:
return Error(Loc, "immediate must be an integer in range [1, 64].");
+ case Match_InvalidIndex1:
+ return Error(Loc, "expected lane specifier '[1]'");
case Match_InvalidIndexB:
return Error(Loc, "vector lane must be an integer in range [0, 15].");
case Match_InvalidIndexH:
assert(Op->isToken() && "Leading operand should always be a mnemonic!");
StringRef Tok = Op->getToken();
- // Translate CMN/CMP pseudos to ADDS/SUBS with zero register destination.
- // This needs to be done before the special handling of ADD/SUB immediates.
- if (Tok == "cmp" || Tok == "cmn") {
- // Replace the opcode with either ADDS or SUBS.
- const char *Repl = StringSwitch<const char *>(Tok)
- .Case("cmp", "subs")
- .Case("cmn", "adds")
- .Default(nullptr);
- assert(Repl && "Unknown compare instruction");
- delete Operands[0];
- Operands[0] = ARM64Operand::CreateToken(Repl, false, IDLoc, getContext());
-
- // Insert WZR or XZR as destination operand.
- ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
- unsigned ZeroReg;
- if (RegOp->isReg() &&
- ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
- RegOp->getReg()))
- ZeroReg = ARM64::WZR;
- else
- ZeroReg = ARM64::XZR;
- Operands.insert(
- Operands.begin() + 1,
- ARM64Operand::CreateReg(ZeroReg, false, IDLoc, IDLoc, getContext()));
- // Update since we modified it above.
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
- Tok = Op->getToken();
- }
-
unsigned NumOperands = Operands.size();
- if (Tok == "mov" && NumOperands == 3) {
- // The MOV mnemomic is aliased to movn/movz, depending on the value of
- // the immediate being instantiated.
- // FIXME: Catching this here is a total hack, and we should use tblgen
- // support to implement this instead as soon as it is available.
-
- ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
+ if (NumOperands == 4 && Tok == "lsl") {
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- if (Op2->isImm()) {
- if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
- uint64_t Val = CE->getValue();
- uint64_t NVal = ~Val;
-
- // If this is a 32-bit register and the value has none of the upper
- // set, clear the complemented upper 32-bits so the logic below works
- // for 32-bit registers too.
- ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
- if (Op1->isReg() &&
- ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
- Op1->getReg()) &&
- (Val & 0xFFFFFFFFULL) == Val)
- NVal &= 0x00000000FFFFFFFFULL;
-
- // MOVK Rd, imm << 0
- if ((Val & 0xFFFF) == Val)
- rewriteMOVI(Operands, "movz", Val, 0, getContext());
-
- // MOVK Rd, imm << 16
- else if ((Val & 0xFFFF0000ULL) == Val)
- rewriteMOVI(Operands, "movz", Val, 16, getContext());
-
- // MOVK Rd, imm << 32
- else if ((Val & 0xFFFF00000000ULL) == Val)
- rewriteMOVI(Operands, "movz", Val, 32, getContext());
-
- // MOVK Rd, imm << 48
- else if ((Val & 0xFFFF000000000000ULL) == Val)
- rewriteMOVI(Operands, "movz", Val, 48, getContext());
-
- // MOVN Rd, (~imm << 0)
- else if ((NVal & 0xFFFFULL) == NVal)
- rewriteMOVI(Operands, "movn", NVal, 0, getContext());
-
- // MOVN Rd, ~(imm << 16)
- else if ((NVal & 0xFFFF0000ULL) == NVal)
- rewriteMOVI(Operands, "movn", NVal, 16, getContext());
-
- // MOVN Rd, ~(imm << 32)
- else if ((NVal & 0xFFFF00000000ULL) == NVal)
- rewriteMOVI(Operands, "movn", NVal, 32, getContext());
-
- // MOVN Rd, ~(imm << 48)
- else if ((NVal & 0xFFFF000000000000ULL) == NVal)
- rewriteMOVI(Operands, "movn", NVal, 48, getContext());
- }
- } else if (Op1->isReg() && Op2->isReg()) {
- // reg->reg move.
- unsigned Reg1 = Op1->getReg();
- unsigned Reg2 = Op2->getReg();
- if ((Reg1 == ARM64::SP &&
- ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg2)) ||
- (Reg2 == ARM64::SP &&
- ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg1)) ||
- (Reg1 == ARM64::WSP &&
- ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) ||
- (Reg2 == ARM64::WSP &&
- ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg1)))
- rewriteMOVRSP(Operands, getContext());
- else
- rewriteMOVR(Operands, getContext());
- }
- } else if (NumOperands == 4) {
- if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
- // Handle the uimm24 immediate form, where the shift is not specified.
- ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
- if (Op3->isImm()) {
- if (const MCConstantExpr *CE =
- dyn_cast<MCConstantExpr>(Op3->getImm())) {
- uint64_t Val = CE->getValue();
- if (Val >= (1 << 24)) {
- Error(IDLoc, "immediate value is too large");
- return true;
- }
- if (Val < (1 << 12)) {
- Operands.push_back(ARM64Operand::CreateShifter(
- ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
- } else if ((Val & 0xfff) == 0) {
- delete Operands[3];
- CE = MCConstantExpr::Create(Val >> 12, getContext());
- Operands[3] =
- ARM64Operand::CreateImm(CE, IDLoc, IDLoc, getContext());
- Operands.push_back(ARM64Operand::CreateShifter(
- ARM64_AM::LSL, 12, IDLoc, IDLoc, getContext()));
- } else {
- Error(IDLoc, "immediate value is too large");
- return true;
- }
+ ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
+ if (Op2->isReg() && Op3->isImm()) {
+ const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
+ if (Op3CE) {
+ uint64_t Op3Val = Op3CE->getValue();
+ uint64_t NewOp3Val = 0;
+ uint64_t NewOp4Val = 0;
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op2->getReg())) {
+ NewOp3Val = (32 - Op3Val) & 0x1f;
+ NewOp4Val = 31 - Op3Val;
} else {
- Operands.push_back(ARM64Operand::CreateShifter(
- ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
+ NewOp3Val = (64 - Op3Val) & 0x3f;
+ NewOp4Val = 63 - Op3Val;
}
- }
-
- // FIXME: Horible hack to handle the LSL -> UBFM alias.
- } else if (NumOperands == 4 && Tok == "lsl") {
- ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
- if (Op2->isReg() && Op3->isImm()) {
- const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3->getImm());
- if (Op3CE) {
- uint64_t Op3Val = Op3CE->getValue();
- uint64_t NewOp3Val = 0;
- uint64_t NewOp4Val = 0;
- if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
- Op2->getReg())) {
- NewOp3Val = (32 - Op3Val) & 0x1f;
- NewOp4Val = 31 - Op3Val;
- } else {
- NewOp3Val = (64 - Op3Val) & 0x3f;
- NewOp4Val = 63 - Op3Val;
- }
- const MCExpr *NewOp3 =
- MCConstantExpr::Create(NewOp3Val, getContext());
- const MCExpr *NewOp4 =
- MCConstantExpr::Create(NewOp4Val, getContext());
-
- Operands[0] = ARM64Operand::CreateToken(
- "ubfm", false, Op->getStartLoc(), getContext());
- Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
- Op3->getEndLoc(), getContext());
- Operands.push_back(ARM64Operand::CreateImm(
- NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
- delete Op3;
- delete Op;
- }
- }
+ const MCExpr *NewOp3 = MCConstantExpr::Create(NewOp3Val, getContext());
+ const MCExpr *NewOp4 = MCConstantExpr::Create(NewOp4Val, getContext());
- // FIXME: Horrible hack to handle the optional LSL shift for vector
- // instructions.
- } else if (NumOperands == 4 && (Tok == "bic" || Tok == "orr")) {
- ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
- ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
- if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
- (Op1->isVectorReg() && Op2->isToken() && Op3->isImm()))
- Operands.push_back(ARM64Operand::CreateShifter(ARM64_AM::LSL, 0, IDLoc,
- IDLoc, getContext()));
- } else if (NumOperands == 4 && (Tok == "movi" || Tok == "mvni")) {
- ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
- ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
- ARM64Operand *Op3 = static_cast<ARM64Operand *>(Operands[3]);
- if ((Op1->isToken() && Op2->isVectorReg() && Op3->isImm()) ||
- (Op1->isVectorReg() && Op2->isToken() && Op3->isImm())) {
- StringRef Suffix = Op1->isToken() ? Op1->getToken() : Op2->getToken();
- // Canonicalize on lower-case for ease of comparison.
- std::string CanonicalSuffix = Suffix.lower();
- if (Tok != "movi" ||
- (CanonicalSuffix != ".1d" && CanonicalSuffix != ".2d" &&
- CanonicalSuffix != ".8b" && CanonicalSuffix != ".16b"))
- Operands.push_back(ARM64Operand::CreateShifter(
- ARM64_AM::LSL, 0, IDLoc, IDLoc, getContext()));
+ Operands[0] = ARM64Operand::CreateToken(
+ "ubfm", false, Op->getStartLoc(), getContext());
+ Operands[3] = ARM64Operand::CreateImm(NewOp3, Op3->getStartLoc(),
+ Op3->getEndLoc(), getContext());
+ Operands.push_back(ARM64Operand::CreateImm(
+ NewOp4, Op3->getStartLoc(), Op3->getEndLoc(), getContext()));
+ delete Op3;
+ delete Op;
}
}
} else if (NumOperands == 5) {
uint64_t Op3Val = Op3CE->getValue();
uint64_t Op4Val = Op4CE->getValue();
+ uint64_t RegWidth = 0;
+ if (ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
+ Op1->getReg()))
+ RegWidth = 64;
+ else
+ RegWidth = 32;
+
+ if (Op3Val >= RegWidth)
+ return Error(Op3->getStartLoc(),
+ "expected integer in range [0, 31]");
+ if (Op4Val < 1 || Op4Val > RegWidth)
+ return Error(Op4->getStartLoc(),
+ "expected integer in range [1, 32]");
+
uint64_t NewOp3Val = 0;
if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
Op1->getReg()))
uint64_t NewOp4Val = Op4Val - 1;
+ if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
+ return Error(Op4->getStartLoc(),
+ "requested insert overflows register");
+
const MCExpr *NewOp3 =
MCConstantExpr::Create(NewOp3Val, getContext());
const MCExpr *NewOp4 =
if (Op3CE && Op4CE) {
uint64_t Op3Val = Op3CE->getValue();
uint64_t Op4Val = Op4CE->getValue();
+
+ uint64_t RegWidth = 0;
+ if (ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
+ Op1->getReg()))
+ RegWidth = 64;
+ else
+ RegWidth = 32;
+
+ if (Op3Val >= RegWidth)
+ return Error(Op3->getStartLoc(),
+ "expected integer in range [0, 31]");
+ if (Op4Val < 1 || Op4Val > RegWidth)
+ return Error(Op4->getStartLoc(),
+ "expected integer in range [1, 32]");
+
uint64_t NewOp4Val = Op3Val + Op4Val - 1;
- if (NewOp4Val >= Op3Val) {
- const MCExpr *NewOp4 =
- MCConstantExpr::Create(NewOp4Val, getContext());
- Operands[4] = ARM64Operand::CreateImm(
- NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
- if (Tok == "bfxil")
- Operands[0] = ARM64Operand::CreateToken(
- "bfm", false, Op->getStartLoc(), getContext());
- else if (Tok == "sbfx")
- Operands[0] = ARM64Operand::CreateToken(
- "sbfm", false, Op->getStartLoc(), getContext());
- else if (Tok == "ubfx")
- Operands[0] = ARM64Operand::CreateToken(
- "ubfm", false, Op->getStartLoc(), getContext());
- else
- llvm_unreachable("No valid mnemonic for alias?");
-
- delete Op;
- delete Op4;
- }
- }
- }
- }
- }
- // FIXME: Horrible hack for tbz and tbnz with Wn register operand.
- // InstAlias can't quite handle this since the reg classes aren't
- // subclasses.
- if (NumOperands == 4 && (Tok == "tbz" || Tok == "tbnz")) {
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
- if (Op->isImm()) {
- if (const MCConstantExpr *OpCE = dyn_cast<MCConstantExpr>(Op->getImm())) {
- if (OpCE->getValue() < 32) {
- // The source register can be Wn here, but the matcher expects a
- // GPR64. Twiddle it here if necessary.
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
- if (Op->isReg()) {
- unsigned Reg = getXRegFromWReg(Op->getReg());
- Operands[1] = ARM64Operand::CreateReg(
- Reg, false, Op->getStartLoc(), Op->getEndLoc(), getContext());
- delete Op;
- }
+ if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
+ return Error(Op4->getStartLoc(),
+ "requested extract overflows register");
+
+ const MCExpr *NewOp4 =
+ MCConstantExpr::Create(NewOp4Val, getContext());
+ Operands[4] = ARM64Operand::CreateImm(
+ NewOp4, Op4->getStartLoc(), Op4->getEndLoc(), getContext());
+ if (Tok == "bfxil")
+ Operands[0] = ARM64Operand::CreateToken(
+ "bfm", false, Op->getStartLoc(), getContext());
+ else if (Tok == "sbfx")
+ Operands[0] = ARM64Operand::CreateToken(
+ "sbfm", false, Op->getStartLoc(), getContext());
+ else if (Tok == "ubfx")
+ Operands[0] = ARM64Operand::CreateToken(
+ "ubfm", false, Op->getStartLoc(), getContext());
+ else
+ llvm_unreachable("No valid mnemonic for alias?");
+
+ delete Op;
+ delete Op4;
}
}
}
}
}
- // FIXME: Horrible hack to handle the literal .d[1] vector index on
- // FMOV instructions. The index isn't an actual instruction operand
- // but rather syntactic sugar. It really should be part of the mnemonic,
- // not the operand, but whatever.
- if ((NumOperands == 5) && Tok == "fmov") {
- // If the last operand is a vectorindex of '1', then replace it with
- // a '[' '1' ']' token sequence, which is what the matcher
- // (annoyingly) expects for a literal vector index operand.
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[NumOperands - 1]);
- if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
- SMLoc Loc = Op->getStartLoc();
- Operands.pop_back();
- delete Op;
- Operands.push_back(
- ARM64Operand::CreateToken("[", false, Loc, getContext()));
- Operands.push_back(
- ARM64Operand::CreateToken("1", false, Loc, getContext()));
- Operands.push_back(
- ARM64Operand::CreateToken("]", false, Loc, getContext()));
- } else if (Op->isReg()) {
- // Similarly, check the destination operand for the GPR->High-lane
- // variant.
- unsigned OpNo = NumOperands - 2;
- ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[OpNo]);
- if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
- SMLoc Loc = Op->getStartLoc();
- Operands[OpNo] =
- ARM64Operand::CreateToken("[", false, Loc, getContext());
- Operands.insert(
- Operands.begin() + OpNo + 1,
- ARM64Operand::CreateToken("1", false, Loc, getContext()));
- Operands.insert(
- Operands.begin() + OpNo + 2,
- ARM64Operand::CreateToken("]", false, Loc, getContext()));
- delete Op;
- }
- }
- }
-
MCInst Inst;
// First try to match against the secondary set of tables containing the
// short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
return showMatchError(ErrorLoc, MatchResult);
}
- case Match_InvalidMemoryIndexedSImm9: {
- // If there is not a '!' after the memory operand that failed, we really
- // want the diagnostic for the non-pre-indexed instruction variant instead.
- // Be careful to check for the post-indexed variant as well, which also
- // uses this match diagnostic. Also exclude the explicitly unscaled
- // mnemonics, as they want the unscaled diagnostic as well.
- if (Operands.size() == ErrorInfo + 1 &&
- !((ARM64Operand *)Operands[ErrorInfo])->isImm() &&
- !Tok.startswith("stur") && !Tok.startswith("ldur")) {
- // whether we want an Indexed64 or Indexed32 diagnostic depends on
- // the register class of the previous operand. Default to 64 in case
- // we see something unexpected.
- MatchResult = Match_InvalidMemoryIndexed64;
- if (ErrorInfo) {
- ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
- if (PrevOp->isReg() &&
- ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
- PrevOp->getReg()))
- MatchResult = Match_InvalidMemoryIndexed32;
- }
- }
- SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
- if (ErrorLoc == SMLoc())
- ErrorLoc = IDLoc;
- return showMatchError(ErrorLoc, MatchResult);
- }
- case Match_InvalidMemoryIndexed32:
- case Match_InvalidMemoryIndexed64:
- case Match_InvalidMemoryIndexed128:
- // If there is a '!' after the memory operand that failed, we really
- // want the diagnostic for the pre-indexed instruction variant instead.
- if (Operands.size() > ErrorInfo + 1 &&
- ((ARM64Operand *)Operands[ErrorInfo + 1])->isTokenEqual("!"))
- MatchResult = Match_InvalidMemoryIndexedSImm9;
- // FALL THROUGH
+ case Match_InvalidMemoryIndexed1:
+ case Match_InvalidMemoryIndexed2:
+ case Match_InvalidMemoryIndexed4:
case Match_InvalidMemoryIndexed8:
case Match_InvalidMemoryIndexed16:
- case Match_InvalidMemoryIndexed32SImm7:
- case Match_InvalidMemoryIndexed64SImm7:
- case Match_InvalidMemoryIndexed128SImm7:
+ case Match_InvalidCondCode:
+ case Match_AddSubRegExtendSmall:
+ case Match_AddSubRegExtendLarge:
+ case Match_AddSubSecondSource:
+ case Match_LogicalSecondSource:
+ case Match_AddSubRegShift32:
+ case Match_AddSubRegShift64:
+ case Match_InvalidMovImm32Shift:
+ case Match_InvalidMovImm64Shift:
+ case Match_InvalidFPImm:
+ case Match_InvalidMemoryWExtend8:
+ case Match_InvalidMemoryWExtend16:
+ case Match_InvalidMemoryWExtend32:
+ case Match_InvalidMemoryWExtend64:
+ case Match_InvalidMemoryWExtend128:
+ case Match_InvalidMemoryXExtend8:
+ case Match_InvalidMemoryXExtend16:
+ case Match_InvalidMemoryXExtend32:
+ case Match_InvalidMemoryXExtend64:
+ case Match_InvalidMemoryXExtend128:
+ case Match_InvalidMemoryIndexed4SImm7:
+ case Match_InvalidMemoryIndexed8SImm7:
+ case Match_InvalidMemoryIndexed16SImm7:
+ case Match_InvalidMemoryIndexedSImm9:
case Match_InvalidImm0_7:
case Match_InvalidImm0_15:
case Match_InvalidImm0_31:
case Match_InvalidImm0_63:
+ case Match_InvalidImm0_127:
+ case Match_InvalidImm0_65535:
case Match_InvalidImm1_8:
case Match_InvalidImm1_16:
case Match_InvalidImm1_32:
case Match_InvalidImm1_64:
+ case Match_InvalidIndex1:
case Match_InvalidIndexB:
case Match_InvalidIndexH:
case Match_InvalidIndexS:
// Any time we get here, there's nothing fancy to do. Just get the
// operand SMLoc and display the diagnostic.
SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
- // If it's a memory operand, the error is with the offset immediate,
- // so get that location instead.
- if (((ARM64Operand *)Operands[ErrorInfo])->isMem())
- ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getOffsetLoc();
if (ErrorLoc == SMLoc())
ErrorLoc = IDLoc;
return showMatchError(ErrorLoc, MatchResult);
extern "C" void LLVMInitializeARM64AsmParser() {
RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
+
+ RegisterMCAsmParser<ARM64AsmParser> Z(TheAArch64leTarget);
+ RegisterMCAsmParser<ARM64AsmParser> W(TheAArch64beTarget);
}
#define GET_REGISTER_MATCHER