OperandMatchResultTy tryParseNoIndexMemory(OperandVector &Operands);
OperandMatchResultTy tryParseBarrierOperand(OperandVector &Operands);
OperandMatchResultTy tryParseMRSSystemRegister(OperandVector &Operands);
- OperandMatchResultTy tryParseMSRSystemRegister(OperandVector &Operands);
- OperandMatchResultTy tryParseCPSRField(OperandVector &Operands);
+ OperandMatchResultTy tryParseSysReg(OperandVector &Operands);
OperandMatchResultTy tryParseSysCROperand(OperandVector &Operands);
OperandMatchResultTy tryParsePrefetch(OperandVector &Operands);
OperandMatchResultTy tryParseAdrpLabel(OperandVector &Operands);
#include "ARM64GenAsmMatcher.inc"
};
ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
+
+ // Initialize the set of available features.
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
k_VectorList,
k_VectorIndex,
k_Token,
+ k_SysReg,
k_SysCR,
k_Prefetch,
k_Shifter,
k_Extend,
k_FPImm,
- k_Barrier,
- k_SystemRegister,
- k_CPSRField
+ k_Barrier
} Kind;
SMLoc StartLoc, EndLoc, OffsetLoc;
unsigned Val; // Not the enum since not all values have names.
};
- struct SystemRegisterOp {
- // 16-bit immediate, usually from the ARM64SysReg::SysRegValues enum
- // but not limited to those values.
- uint16_t Val;
- };
-
- struct CPSRFieldOp {
- ARM64PState::PStateValues Field;
+ struct SysRegOp {
+ const char *Data;
+ unsigned Length;
};
struct SysCRImmOp {
struct ImmOp Imm;
struct FPImmOp FPImm;
struct BarrierOp Barrier;
- struct SystemRegisterOp SystemRegister;
- struct CPSRFieldOp CPSRField;
+ struct SysRegOp SysReg;
struct SysCRImmOp SysCRImm;
struct PrefetchOp Prefetch;
struct ShifterOp Shifter;
case k_Barrier:
Barrier = o.Barrier;
break;
- case k_SystemRegister:
- SystemRegister = o.SystemRegister;
- break;
- case k_CPSRField:
- CPSRField = o.CPSRField;
- break;
case k_Register:
Reg = o.Reg;
break;
case k_VectorIndex:
VectorIndex = o.VectorIndex;
break;
+ case k_SysReg:
+ SysReg = o.SysReg;
+ break;
case k_SysCR:
SysCRImm = o.SysCRImm;
break;
return Barrier.Val;
}
- uint16_t getSystemRegister() const {
- assert(Kind == k_SystemRegister && "Invalid access!");
- return SystemRegister.Val;
- }
-
- ARM64PState::PStateValues getCPSRField() const {
- assert(Kind == k_CPSRField && "Invalid access!");
- return CPSRField.Field;
- }
-
unsigned getReg() const {
assert(Kind == k_Register && "Invalid access!");
return Reg.RegNum;
return VectorIndex.Val;
}
+ StringRef getSysReg() const {
+ assert(Kind == k_SysReg && "Invalid access!");
+ return StringRef(SysReg.Data, SysReg.Length);
+ }
+
unsigned getSysCR() const {
assert(Kind == k_SysCR && "Invalid access!");
return SysCRImm.Val;
return false;
return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
}
- bool isBranchTarget19() const {
+ bool isPCRelLabel19() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
bool isMovZSymbolG2() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
+ ARM64MCExpr::VK_ABS_G2_S,
ARM64MCExpr::VK_TPREL_G2,
ARM64MCExpr::VK_DTPREL_G2 };
return isMovWSymbol(Variants);
bool isMovZSymbolG1() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
+ ARM64MCExpr::VK_ABS_G1_S,
ARM64MCExpr::VK_GOTTPREL_G1,
ARM64MCExpr::VK_TPREL_G1,
ARM64MCExpr::VK_DTPREL_G1, };
bool isMovZSymbolG0() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
+ ARM64MCExpr::VK_ABS_G0_S,
ARM64MCExpr::VK_TPREL_G0,
ARM64MCExpr::VK_DTPREL_G0 };
return isMovWSymbol(Variants);
}
+ bool isMovKSymbolG3() const {
+ static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
+ return isMovWSymbol(Variants);
+ }
+
bool isMovKSymbolG2() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
return isMovWSymbol(Variants);
bool isFPImm() const { return Kind == k_FPImm; }
bool isBarrier() const { return Kind == k_Barrier; }
- bool isSystemRegister() const {
- if (Kind == k_SystemRegister)
- return true;
- // SPSel is legal for both the system register and the CPSR-field
- // variants of MSR, so special case that. Fugly.
- return (Kind == k_CPSRField && getCPSRField() == ARM64PState::SPSel);
+ bool isSysReg() const { return Kind == k_SysReg; }
+ bool isMRSSystemRegister() const {
+ if (!isSysReg()) return false;
+
+ bool IsKnownRegister;
+ ARM64SysReg::MRSMapper().fromString(getSysReg(), IsKnownRegister);
+
+ return IsKnownRegister;
+ }
+ bool isMSRSystemRegister() const {
+ if (!isSysReg()) return false;
+
+ bool IsKnownRegister;
+ ARM64SysReg::MSRMapper().fromString(getSysReg(), IsKnownRegister);
+
+ return IsKnownRegister;
+ }
+ bool isSystemCPSRField() const {
+ if (!isSysReg()) return false;
+
+ bool IsKnownRegister;
+ ARM64PState::PStateMapper().fromString(getSysReg(), IsKnownRegister);
+
+ return IsKnownRegister;
}
- bool isSystemCPSRField() const { return Kind == k_CPSRField; }
bool isReg() const { return Kind == k_Register && !Reg.isVector; }
bool isVectorReg() const { return Kind == k_Register && Reg.isVector; }
bool isAdrpLabel() const {
// Validation was handled during parsing, so we just sanity check that
// something didn't go haywire.
- return isImm();
+ if (!isImm())
+ return false;
+
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Min = - (4096 * (1LL << (21 - 1)));
+ int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
+ return (Val % 4096) == 0 && Val >= Min && Val <= Max;
+ }
+
+ return true;
}
bool isAdrLabel() const {
// Validation was handled during parsing, so we just sanity check that
// something didn't go haywire.
- return isImm();
+ if (!isImm())
+ return false;
+
+ if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
+ int64_t Val = CE->getValue();
+ int64_t Min = - (1LL << (21 - 1));
+ int64_t Max = ((1LL << (21 - 1)) - 1);
+ return Val >= Min && Val <= Max;
+ }
+
+ return true;
}
void addExpr(MCInst &Inst, const MCExpr *Expr) const {
}
void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
- addImmOperands(Inst, N);
+ assert(N == 1 && "Invalid number of operands!");
+ const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
+ if (!MCE)
+ addExpr(Inst, getImm());
+ else
+ Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 12));
}
void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
}
- void addBranchTarget19Operands(MCInst &Inst, unsigned N) const {
+ void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
// Branch operands don't encode the low bits, so shift them off
// here. If it's a label, however, just put it on directly as there's
// not enough information now to do anything.
Inst.addOperand(MCOperand::CreateImm(getBarrier()));
}
- void addSystemRegisterOperands(MCInst &Inst, unsigned N) const {
+ void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- if (Kind == k_SystemRegister)
- Inst.addOperand(MCOperand::CreateImm(getSystemRegister()));
- else {
- assert(Kind == k_CPSRField && getCPSRField() == ARM64PState::SPSel);
- Inst.addOperand(MCOperand::CreateImm(ARM64SysReg::SPSel));
- }
+
+ bool Valid;
+ uint32_t Bits = ARM64SysReg::MRSMapper().fromString(getSysReg(), Valid);
+
+ Inst.addOperand(MCOperand::CreateImm(Bits));
+ }
+
+ void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+
+ bool Valid;
+ uint32_t Bits = ARM64SysReg::MSRMapper().fromString(getSysReg(), Valid);
+
+ Inst.addOperand(MCOperand::CreateImm(Bits));
}
void addSystemCPSRFieldOperands(MCInst &Inst, unsigned N) const {
assert(N == 1 && "Invalid number of operands!");
- Inst.addOperand(MCOperand::CreateImm(getCPSRField()));
+
+ bool Valid;
+ uint32_t Bits = ARM64PState::PStateMapper().fromString(getSysReg(), Valid);
+
+ Inst.addOperand(MCOperand::CreateImm(Bits));
}
void addSysCROperands(MCInst &Inst, unsigned N) const {
assert(N == 3 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
- Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum));
+ Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
Inst.addOperand(MCOperand::CreateImm(ExtendImm));
}
return Op;
}
- static ARM64Operand *CreateSystemRegister(uint16_t Val, SMLoc S,
- MCContext &Ctx) {
- ARM64Operand *Op = new ARM64Operand(k_SystemRegister, Ctx);
- Op->SystemRegister.Val = Val;
- Op->StartLoc = S;
- Op->EndLoc = S;
- return Op;
- }
-
- static ARM64Operand *CreateCPSRField(ARM64PState::PStateValues Field, SMLoc S,
- MCContext &Ctx) {
- ARM64Operand *Op = new ARM64Operand(k_CPSRField, Ctx);
- Op->CPSRField.Field = Field;
+ static ARM64Operand *CreateSysReg(StringRef Str, SMLoc S, MCContext &Ctx) {
+ ARM64Operand *Op = new ARM64Operand(k_SysReg, Ctx);
+ Op->SysReg.Data = Str.data();
+ Op->SysReg.Length = Str.size();
Op->StartLoc = S;
Op->EndLoc = S;
return Op;
if (Valid)
OS << "<barrier " << Name << ">";
else
- OS << "<barrier invalid #" << getCPSRField() << ">";
- break;
- }
- case k_SystemRegister: {
- bool Valid;
- StringRef Name = ARM64SysReg::MRSMapper().toString(getSystemRegister(), Valid);
- if (!Valid)
- Name = ARM64SysReg::MSRMapper().toString(getSystemRegister(), Valid);
- if (Valid)
- OS << "<systemreg " << Name << ">";
- else
- OS << "<systemreg invalid #" << getSystemRegister() << ">";
- break;
- }
- case k_CPSRField: {
- bool Valid;
- StringRef Name = ARM64PState::PStateMapper().toString(getCPSRField(), Valid);
- if (Valid)
- OS << "<cpsrfield " << Name << ">";
- else
- OS << "<cpsrfield invalid #" << getCPSRField() << ">";
+ OS << "<barrier invalid #" << getBarrier() << ">";
break;
}
case k_Immediate:
case k_VectorIndex:
OS << "<vectorindex " << getVectorIndex() << ">";
break;
+ case k_SysReg:
+ OS << "<sysreg: " << getSysReg() << '>';
+ break;
case k_Token:
OS << "'" << getToken() << "'";
break;
// Also handle a few aliases of registers.
if (RegNum == 0)
RegNum = StringSwitch<unsigned>(lowerCase)
- .Case("x29", ARM64::FP)
- .Case("x30", ARM64::LR)
+ .Case("fp", ARM64::FP)
+ .Case("lr", ARM64::LR)
.Case("x31", ARM64::XZR)
.Case("w31", ARM64::WZR)
.Default(0);
SMLoc S = getLoc();
const AsmToken &Tok = Parser.getTok();
// Either an identifier for named values or a 5-bit immediate.
- if (Tok.is(AsmToken::Hash)) {
- Parser.Lex(); // Eat hash token.
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
+ if (Hash)
+ Parser.Lex(); // Eat hash token.
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
return MatchOperand_ParseFail;
ARM64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
SMLoc S = getLoc();
const MCExpr *Expr;
+
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat hash token.
+ }
+
if (parseSymbolicImmVal(Expr))
return MatchOperand_ParseFail;
ARM64MCExpr::VariantKind ELFRefKind;
MCSymbolRefExpr::VariantKind DarwinRefKind;
const MCConstantExpr *Addend;
- if (!classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
- Error(S, "modified label reference + constant expected");
- return MatchOperand_ParseFail;
- }
-
- if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
- ELFRefKind == ARM64MCExpr::VK_INVALID) {
- // No modifier was specified at all; this is the syntax for an ELF basic
- // ADRP relocation (unfortunately).
- Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
- } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
- DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
- Addend != 0) {
- Error(S, "gotpage label reference not allowed an addend");
- return MatchOperand_ParseFail;
- } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
- DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
- DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
- ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
- ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
- ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
- // The operand must be an @page or @gotpage qualified symbolref.
- Error(S, "page or gotpage label reference expected");
- return MatchOperand_ParseFail;
+ if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
+ if (DarwinRefKind == MCSymbolRefExpr::VK_None &&
+ ELFRefKind == ARM64MCExpr::VK_INVALID) {
+ // No modifier was specified at all; this is the syntax for an ELF basic
+ // ADRP relocation (unfortunately).
+ Expr = ARM64MCExpr::Create(Expr, ARM64MCExpr::VK_ABS_PAGE, getContext());
+ } else if ((DarwinRefKind == MCSymbolRefExpr::VK_GOTPAGE ||
+ DarwinRefKind == MCSymbolRefExpr::VK_TLVPPAGE) &&
+ Addend != 0) {
+ Error(S, "gotpage label reference not allowed an addend");
+ return MatchOperand_ParseFail;
+ } else if (DarwinRefKind != MCSymbolRefExpr::VK_PAGE &&
+ DarwinRefKind != MCSymbolRefExpr::VK_GOTPAGE &&
+ DarwinRefKind != MCSymbolRefExpr::VK_TLVPPAGE &&
+ ELFRefKind != ARM64MCExpr::VK_GOT_PAGE &&
+ ELFRefKind != ARM64MCExpr::VK_GOTTPREL_PAGE &&
+ ELFRefKind != ARM64MCExpr::VK_TLSDESC_PAGE) {
+ // The operand must be an @page or @gotpage qualified symbolref.
+ Error(S, "page or gotpage label reference expected");
+ return MatchOperand_ParseFail;
+ }
}
- // We have a label reference possibly with addend. The addend is a raw value
- // here. The linker will adjust it to only reference the page.
+ // We have either a label reference possibly with addend or an immediate. The
+ // addend is a raw value here. The linker will adjust it to only reference the
+ // page.
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
ARM64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
SMLoc S = getLoc();
const MCExpr *Expr;
- if (getParser().parseExpression(Expr))
- return MatchOperand_ParseFail;
- // The operand must be an un-qualified assembler local symbolref.
- // FIXME: wrong for ELF.
- if (const MCSymbolRefExpr *SRE = dyn_cast<const MCSymbolRefExpr>(Expr)) {
- // FIXME: Should reference the MachineAsmInfo to get the private prefix.
- bool isTemporary = SRE->getSymbol().getName().startswith("L");
- if (!isTemporary || SRE->getKind() != MCSymbolRefExpr::VK_None) {
- Error(S, "unqualified, assembler-local label name expected");
- return MatchOperand_ParseFail;
- }
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat hash token.
}
+ if (getParser().parseExpression(Expr))
+ return MatchOperand_ParseFail;
+
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(ARM64Operand::CreateImm(Expr, S, E, getContext()));
ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
SMLoc S = getLoc();
- if (Parser.getTok().isNot(AsmToken::Hash))
- return MatchOperand_NoMatch;
- Parser.Lex(); // Eat the '#'.
+ bool Hash = false;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat '#'
+ Hash = true;
+ }
// Handle negation, as that still comes through as a separate token.
bool isNegative = false;
return MatchOperand_Success;
}
+ if (!Hash)
+ return MatchOperand_NoMatch;
+
TokError("invalid floating point immediate");
return MatchOperand_ParseFail;
}
Parser.Lex();
// We expect a number here.
- if (getLexer().isNot(AsmToken::Hash))
+ bool Hash = getLexer().is(AsmToken::Hash);
+ if (!Hash && getLexer().isNot(AsmToken::Integer))
return TokError("immediate value expected for shifter operand");
- Parser.Lex(); // Eat the '#'.
+
+ if (Hash)
+ Parser.Lex(); // Eat the '#'.
SMLoc ExprLoc = getLoc();
const MCExpr *ImmVal;
return false;
}
- if (getLexer().isNot(AsmToken::Hash)) {
+ bool Hash = getLexer().is(AsmToken::Hash);
+ if (!Hash && getLexer().isNot(AsmToken::Integer)) {
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(
ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
return false;
}
- Parser.Lex(); // Eat the '#'.
+ if (Hash)
+ Parser.Lex(); // Eat the '#'.
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
} else if (!Op.compare_lower("ipas2le1")) {
// SYS #4, C8, C4, #5
SYS_ALIAS(4, 8, 4, 5);
+ } else if (!Op.compare_lower("ipas2e1is")) {
+ // SYS #4, C8, C4, #1
+ SYS_ALIAS(4, 8, 0, 1);
+ } else if (!Op.compare_lower("ipas2le1is")) {
+ // SYS #4, C8, C4, #5
+ SYS_ALIAS(4, 8, 0, 5);
} else if (!Op.compare_lower("vmalls12e1")) {
// SYS #4, C8, C7, #6
SYS_ALIAS(4, 8, 7, 6);
Parser.Lex(); // Eat operand.
+ bool ExpectRegister = (Op.lower().find("all") == StringRef::npos);
+ bool HasRegister = false;
+
// Check for the optional register operand.
if (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat comma.
if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
return TokError("expected register operand");
+
+ HasRegister = true;
}
if (getLexer().isNot(AsmToken::EndOfStatement)) {
return TokError("unexpected token in argument list");
}
+ if (ExpectRegister && !HasRegister) {
+ return TokError("specified " + Mnemonic + " op requires a register");
+ }
+ else if (!ExpectRegister && HasRegister) {
+ return TokError("specified " + Mnemonic + " op does not use a register");
+ }
+
Parser.Lex(); // Consume the EndOfStatement
return false;
}
const AsmToken &Tok = Parser.getTok();
// Can be either a #imm style literal or an option name
- if (Tok.is(AsmToken::Hash)) {
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
// Immediate operand.
- Parser.Lex(); // Eat the '#'
+ if (Hash)
+ Parser.Lex(); // Eat the '#'
const MCExpr *ImmVal;
SMLoc ExprLoc = getLoc();
if (getParser().parseExpression(ImmVal))
}
ARM64AsmParser::OperandMatchResultTy
-ARM64AsmParser::tryParseMRSSystemRegister(OperandVector &Operands) {
+ARM64AsmParser::tryParseSysReg(OperandVector &Operands) {
const AsmToken &Tok = Parser.getTok();
if (Tok.isNot(AsmToken::Identifier))
return MatchOperand_NoMatch;
- bool Valid;
- auto Mapper = ARM64SysReg::MRSMapper();
- uint32_t Reg = Mapper.fromString(Tok.getString(), Valid);
-
- if (Valid) {
- Operands.push_back(
- ARM64Operand::CreateSystemRegister((uint16_t)Reg, getLoc(),
- getContext()));
- Parser.Lex(); // Consume the register name.
- return MatchOperand_Success;
- }
-
- return MatchOperand_NoMatch;
-}
-
-ARM64AsmParser::OperandMatchResultTy
-ARM64AsmParser::tryParseMSRSystemRegister(OperandVector &Operands) {
- const AsmToken &Tok = Parser.getTok();
-
- if (Tok.isNot(AsmToken::Identifier))
- return MatchOperand_NoMatch;
-
- bool Valid;
- auto Mapper = ARM64SysReg::MSRMapper();
- uint32_t Reg = Mapper.fromString(Tok.getString(), Valid);
-
- if (Valid) {
- Operands.push_back(
- ARM64Operand::CreateSystemRegister((uint16_t)Reg, getLoc(),
- getContext()));
- Parser.Lex(); // Consume the register name.
- return MatchOperand_Success;
- }
-
- return MatchOperand_NoMatch;
-}
-
-ARM64AsmParser::OperandMatchResultTy
-ARM64AsmParser::tryParseCPSRField(OperandVector &Operands) {
- const AsmToken &Tok = Parser.getTok();
-
- if (Tok.isNot(AsmToken::Identifier))
- return MatchOperand_NoMatch;
-
- bool Valid;
- ARM64PState::PStateValues Field = (ARM64PState::PStateValues)
- ARM64PState::PStateMapper().fromString(Tok.getString(), Valid);
-
- if (!Valid)
- return MatchOperand_NoMatch;
- Operands.push_back(
- ARM64Operand::CreateCPSRField(Field, getLoc(), getContext()));
- Parser.Lex(); // Consume the register name.
+ Operands.push_back(ARM64Operand::CreateSysReg(Tok.getString(), getLoc(),
+ getContext()));
+ Parser.Lex(); // Eat identifier
return MatchOperand_Success;
}
Parser.Lex(); // Eat the extend op.
+ // A 32-bit offset register is only valid for [SU]/XTW extend
+ // operators.
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
+ if (ExtOp != ARM64_AM::UXTW &&
+ ExtOp != ARM64_AM::SXTW)
+ return Error(ExtLoc, "32-bit general purpose offset register "
+ "requires sxtw or uxtw extend");
+ } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
+ Reg2))
+ return Error(OffsetLoc,
+ "64-bit general purpose offset register expected");
+
+ bool Hash = getLexer().is(AsmToken::Hash);
if (getLexer().is(AsmToken::RBrac)) {
// No immediate operand.
if (ExtOp == ARM64_AM::UXTX)
return Error(ExtLoc, "LSL extend requires immediate operand");
- } else if (getLexer().is(AsmToken::Hash)) {
+ } else if (Hash || getLexer().is(AsmToken::Integer)) {
// Immediate operand.
- Parser.Lex(); // Eat the '#'
+ if (Hash)
+ Parser.Lex(); // Eat the '#'
const MCExpr *ImmVal;
SMLoc ExprLoc = getLoc();
if (getParser().parseExpression(ImmVal))
return false;
// Immediate expressions.
- } else if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex(); // Eat hash token.
+ } else if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Integer)) {
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat hash token.
if (parseSymbolicImmVal(OffsetExpr))
return true;
.Case("lo12", ARM64MCExpr::VK_LO12)
.Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
.Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
+ .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
.Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
.Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
+ .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
.Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
.Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
+ .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
.Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
.Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
.Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
return false;
}
+ case AsmToken::Integer:
+ case AsmToken::Real:
case AsmToken::Hash: {
// #42 -> immediate.
S = getLoc();
- Parser.Lex();
+ if (getLexer().is(AsmToken::Hash))
+ Parser.Lex();
// The only Real that should come through here is a literal #0.0 for
// the fcmp[e] r, #0.0 instructions. They expect raw token operands,
return false;
}
-/// isFPR32Register - Check if a register is in the FPR32 register class.
-/// (The parser does not have the target register info to check the register
-/// class directly.)
-static bool isFPR32Register(unsigned Reg) {
- using namespace ARM64;
- switch (Reg) {
- default:
- break;
- case S0: case S1: case S2: case S3: case S4: case S5: case S6:
- case S7: case S8: case S9: case S10: case S11: case S12: case S13:
- case S14: case S15: case S16: case S17: case S18: case S19: case S20:
- case S21: case S22: case S23: case S24: case S25: case S26: case S27:
- case S28: case S29: case S30: case S31:
- return true;
- }
- return false;
-}
-
-/// isGPR32Register - Check if a register is in the GPR32sp register class.
-/// (The parser does not have the target register info to check the register
-/// class directly.)
-static bool isGPR32Register(unsigned Reg) {
- using namespace ARM64;
- switch (Reg) {
- default:
- break;
- case W0: case W1: case W2: case W3: case W4: case W5: case W6:
- case W7: case W8: case W9: case W10: case W11: case W12: case W13:
- case W14: case W15: case W16: case W17: case W18: case W19: case W20:
- case W21: case W22: case W23: case W24: case W25: case W26: case W27:
- case W28: case W29: case W30: case WSP: case WZR:
- return true;
- }
- return false;
-}
-
-static bool isGPR64Reg(unsigned Reg) {
- using namespace ARM64;
- switch (Reg) {
- case X0: case X1: case X2: case X3: case X4: case X5: case X6:
- case X7: case X8: case X9: case X10: case X11: case X12: case X13:
- case X14: case X15: case X16: case X17: case X18: case X19: case X20:
- case X21: case X22: case X23: case X24: case X25: case X26: case X27:
- case X28: case FP: case LR: case SP: case XZR:
- return true;
- default:
- return false;
- }
-}
-
-
// FIXME: This entire function is a giant hack to provide us with decent
// operand range validation/diagnostics until TableGen/MC can be extended
// to support autogeneration of this kind of validation.
}
}
-static void rewriteMOV(ARM64AsmParser::OperandVector &Operands,
- StringRef mnemonic, uint64_t imm, unsigned shift,
- MCContext &Context) {
+static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
+ StringRef mnemonic, uint64_t imm, unsigned shift,
+ MCContext &Context) {
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
Operands[0] =
delete Op;
}
+static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
+ MCContext &Context) {
+ ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
+ ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
+ Operands[0] =
+ ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
+
+ const MCExpr *Imm = MCConstantExpr::Create(0, Context);
+ Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
+ Op2->getEndLoc(), Context));
+ Operands.push_back(ARM64Operand::CreateShifter(
+ ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
+
+ delete Op;
+}
+
+static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
+ MCContext &Context) {
+ ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
+ ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
+ Operands[0] =
+ ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
+
+ // Operands[2] becomes Operands[3].
+ Operands.push_back(Operands[2]);
+ // And Operands[2] becomes ZR.
+ unsigned ZeroReg = ARM64::XZR;
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Operands[2]->getReg()))
+ ZeroReg = ARM64::WZR;
+
+ Operands[2] =
+ ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
+ Op2->getEndLoc(), Context);
+
+ delete Op;
+}
+
bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
switch (ErrCode) {
case Match_MissingFeature:
return Error(Loc, "immediate must be an integer in range [1,32].");
case Match_InvalidImm1_64:
return Error(Loc, "immediate must be an integer in range [1,64].");
+ case Match_InvalidLabel:
+ return Error(Loc, "expected label or encodable integer pc offset");
case Match_MnemonicFail:
return Error(Loc, "unrecognized instruction mnemonic");
default:
}
}
+static const char *getSubtargetFeatureName(unsigned Val);
+
bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
// Insert WZR or XZR as destination operand.
ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
unsigned ZeroReg;
- if (RegOp->isReg() && isGPR32Register(RegOp->getReg()))
+ if (RegOp->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ RegOp->getReg()))
ZeroReg = ARM64::WZR;
else
ZeroReg = ARM64::XZR;
// FIXME: Catching this here is a total hack, and we should use tblgen
// support to implement this instead as soon as it is available.
+ ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
if (Op2->isImm()) {
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
// set, clear the complemented upper 32-bits so the logic below works
// for 32-bit registers too.
ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
- if (Op1->isReg() && isGPR32Register(Op1->getReg()) &&
+ if (Op1->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op1->getReg()) &&
(Val & 0xFFFFFFFFULL) == Val)
NVal &= 0x00000000FFFFFFFFULL;
// MOVK Rd, imm << 0
if ((Val & 0xFFFF) == Val)
- rewriteMOV(Operands, "movz", Val, 0, getContext());
+ rewriteMOVI(Operands, "movz", Val, 0, getContext());
// MOVK Rd, imm << 16
else if ((Val & 0xFFFF0000ULL) == Val)
- rewriteMOV(Operands, "movz", Val, 16, getContext());
+ rewriteMOVI(Operands, "movz", Val, 16, getContext());
// MOVK Rd, imm << 32
else if ((Val & 0xFFFF00000000ULL) == Val)
- rewriteMOV(Operands, "movz", Val, 32, getContext());
+ rewriteMOVI(Operands, "movz", Val, 32, getContext());
// MOVK Rd, imm << 48
else if ((Val & 0xFFFF000000000000ULL) == Val)
- rewriteMOV(Operands, "movz", Val, 48, getContext());
+ rewriteMOVI(Operands, "movz", Val, 48, getContext());
// MOVN Rd, (~imm << 0)
else if ((NVal & 0xFFFFULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 0, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 0, getContext());
// MOVN Rd, ~(imm << 16)
else if ((NVal & 0xFFFF0000ULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 16, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 16, getContext());
// MOVN Rd, ~(imm << 32)
else if ((NVal & 0xFFFF00000000ULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 32, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 32, getContext());
// MOVN Rd, ~(imm << 48)
else if ((NVal & 0xFFFF000000000000ULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 48, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 48, getContext());
}
+ } else if (Op1->isReg() && Op2->isReg()) {
+ // reg->reg move.
+ unsigned Reg1 = Op1->getReg();
+ unsigned Reg2 = Op2->getReg();
+ if ((Reg1 == ARM64::SP &&
+ ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg2)) ||
+ (Reg2 == ARM64::SP &&
+ ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg1)) ||
+ (Reg1 == ARM64::WSP &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) ||
+ (Reg2 == ARM64::WSP &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg1)))
+ rewriteMOVRSP(Operands, getContext());
+ else
+ rewriteMOVR(Operands, getContext());
}
} else if (NumOperands == 4) {
if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
uint64_t Op3Val = Op3CE->getValue();
uint64_t NewOp3Val = 0;
uint64_t NewOp4Val = 0;
- if (isGPR32Register(Op2->getReg())) {
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op2->getReg())) {
NewOp3Val = (32 - Op3Val) & 0x1f;
NewOp4Val = 31 - Op3Val;
} else {
uint64_t Op4Val = Op4CE->getValue();
uint64_t NewOp3Val = 0;
- if (isGPR32Register(Op1->getReg()))
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op1->getReg()))
NewOp3Val = (32 - Op3Val) & 0x1f;
else
NewOp3Val = (64 - Op3Val) & 0x3f;
else if (NumOperands == 3 &&
(Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
- if (Op->isReg() && isGPR64Reg(Op->getReg())) {
+ if (Op->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
+ Op->getReg())) {
// The source register can be Wn here, but the matcher expects a
// GPR64. Twiddle it here if necessary.
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
if (RegOp->isReg() && ImmOp->isFPImm() &&
ImmOp->getFPImm() == (unsigned)-1) {
- unsigned zreg =
- isFPR32Register(RegOp->getReg()) ? ARM64::WZR : ARM64::XZR;
+ unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
+ RegOp->getReg())
+ ? ARM64::WZR
+ : ARM64::XZR;
Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
Op->getEndLoc(), getContext());
delete ImmOp;
if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
SMLoc Loc = Op->getStartLoc();
Operands.pop_back();
+ delete Op;
Operands.push_back(
ARM64Operand::CreateToken("[", false, Loc, getContext()));
Operands.push_back(
Operands.insert(
Operands.begin() + OpNo + 2,
ARM64Operand::CreateToken("]", false, Loc, getContext()));
+ delete Op;
}
}
}
Out.EmitInstruction(Inst, STI);
return false;
}
- case Match_MissingFeature:
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing (neon, e.g.).
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
case Match_MnemonicFail:
return showMatchError(IDLoc, MatchResult);
case Match_InvalidOperand: {
MatchResult = Match_InvalidMemoryIndexed64;
if (ErrorInfo) {
ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
- if (PrevOp->isReg() && ARM64MCRegisterClasses[ARM64::GPR32RegClassID]
- .contains(PrevOp->getReg()))
+ if (PrevOp->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
+ PrevOp->getReg()))
MatchResult = Match_InvalidMemoryIndexed32;
}
}
case Match_InvalidImm1_8:
case Match_InvalidImm1_16:
case Match_InvalidImm1_32:
- case Match_InvalidImm1_64: {
+ case Match_InvalidImm1_64:
+ case Match_InvalidLabel: {
// Any time we get here, there's nothing fancy to do. Just get the
// operand SMLoc and display the diagnostic.
SMLoc ErrorLoc = ((ARM64Operand *)Operands[ErrorInfo])->getStartLoc();
/// Force static initialization.
extern "C" void LLVMInitializeARM64AsmParser() {
- RegisterMCAsmParser<ARM64AsmParser> X(TheARM64Target);
+ RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
+ RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
}
#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
#define GET_MATCHER_IMPLEMENTATION
#include "ARM64GenAsmMatcher.inc"