#include "ARM64GenAsmMatcher.inc"
};
ARM64AsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser,
- const MCInstrInfo &MII)
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
: MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
MCAsmParserExtension::Initialize(_Parser);
+
+ // Initialize the set of available features.
+ setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
}
virtual bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
return false;
return (Val >= -(0x2000000 << 2) && Val <= (0x1ffffff << 2));
}
- bool isBranchTarget19() const {
+ bool isPCRelLabel19() const {
if (!isImm())
return false;
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
bool isMovZSymbolG2() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2,
+ ARM64MCExpr::VK_ABS_G2_S,
ARM64MCExpr::VK_TPREL_G2,
ARM64MCExpr::VK_DTPREL_G2 };
return isMovWSymbol(Variants);
bool isMovZSymbolG1() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G1,
+ ARM64MCExpr::VK_ABS_G1_S,
ARM64MCExpr::VK_GOTTPREL_G1,
ARM64MCExpr::VK_TPREL_G1,
ARM64MCExpr::VK_DTPREL_G1, };
bool isMovZSymbolG0() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G0,
+ ARM64MCExpr::VK_ABS_G0_S,
ARM64MCExpr::VK_TPREL_G0,
ARM64MCExpr::VK_DTPREL_G0 };
return isMovWSymbol(Variants);
}
+ bool isMovKSymbolG3() const {
+ static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G3 };
+ return isMovWSymbol(Variants);
+ }
+
bool isMovKSymbolG2() const {
static ARM64MCExpr::VariantKind Variants[] = { ARM64MCExpr::VK_ABS_G2_NC };
return isMovWSymbol(Variants);
Inst.addOperand(MCOperand::CreateImm(MCE->getValue() >> 2));
}
- void addBranchTarget19Operands(MCInst &Inst, unsigned N) const {
+ void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
// Branch operands don't encode the low bits, so shift them off
// here. If it's a label, however, just put it on directly as there's
// not enough information now to do anything.
assert(N == 3 && "Invalid number of operands!");
Inst.addOperand(MCOperand::CreateReg(Mem.BaseRegNum));
- Inst.addOperand(MCOperand::CreateReg(Mem.OffsetRegNum));
+ Inst.addOperand(MCOperand::CreateReg(getXRegFromWReg(Mem.OffsetRegNum)));
unsigned ExtendImm = ARM64_AM::getMemExtendImm(Mem.ExtType, DoShift);
Inst.addOperand(MCOperand::CreateImm(ExtendImm));
}
SMLoc S = getLoc();
const AsmToken &Tok = Parser.getTok();
// Either an identifier for named values or a 5-bit immediate.
- if (Tok.is(AsmToken::Hash)) {
- Parser.Lex(); // Eat hash token.
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
+ if (Hash)
+ Parser.Lex(); // Eat hash token.
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
return MatchOperand_ParseFail;
ARM64AsmParser::tryParseFPImm(OperandVector &Operands) {
SMLoc S = getLoc();
- if (Parser.getTok().isNot(AsmToken::Hash))
- return MatchOperand_NoMatch;
- Parser.Lex(); // Eat the '#'.
+ bool Hash = false;
+ if (Parser.getTok().is(AsmToken::Hash)) {
+ Parser.Lex(); // Eat '#'
+ Hash = true;
+ }
// Handle negation, as that still comes through as a separate token.
bool isNegative = false;
return MatchOperand_Success;
}
+ if (!Hash)
+ return MatchOperand_NoMatch;
+
TokError("invalid floating point immediate");
return MatchOperand_ParseFail;
}
Parser.Lex();
// We expect a number here.
- if (getLexer().isNot(AsmToken::Hash))
+ bool Hash = getLexer().is(AsmToken::Hash);
+ if (!Hash && getLexer().isNot(AsmToken::Integer))
return TokError("immediate value expected for shifter operand");
- Parser.Lex(); // Eat the '#'.
+
+ if (Hash)
+ Parser.Lex(); // Eat the '#'.
SMLoc ExprLoc = getLoc();
const MCExpr *ImmVal;
return false;
}
- if (getLexer().isNot(AsmToken::Hash)) {
+ bool Hash = getLexer().is(AsmToken::Hash);
+ if (!Hash && getLexer().isNot(AsmToken::Integer)) {
SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
Operands.push_back(
ARM64Operand::CreateExtend(ExtOp, 0, S, E, getContext()));
return false;
}
- Parser.Lex(); // Eat the '#'.
+ if (Hash)
+ Parser.Lex(); // Eat the '#'.
const MCExpr *ImmVal;
if (getParser().parseExpression(ImmVal))
const AsmToken &Tok = Parser.getTok();
// Can be either a #imm style literal or an option name
- if (Tok.is(AsmToken::Hash)) {
+ bool Hash = Tok.is(AsmToken::Hash);
+ if (Hash || Tok.is(AsmToken::Integer)) {
// Immediate operand.
- Parser.Lex(); // Eat the '#'
+ if (Hash)
+ Parser.Lex(); // Eat the '#'
const MCExpr *ImmVal;
SMLoc ExprLoc = getLoc();
if (getParser().parseExpression(ImmVal))
Parser.Lex(); // Eat the extend op.
+ // A 32-bit offset register is only valid for [SU]/XTW extend
+ // operators.
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) {
+ if (ExtOp != ARM64_AM::UXTW &&
+ ExtOp != ARM64_AM::SXTW)
+ return Error(ExtLoc, "32-bit general purpose offset register "
+ "requires sxtw or uxtw extend");
+ } else if (!ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
+ Reg2))
+ return Error(OffsetLoc,
+ "64-bit general purpose offset register expected");
+
+ bool Hash = getLexer().is(AsmToken::Hash);
if (getLexer().is(AsmToken::RBrac)) {
// No immediate operand.
if (ExtOp == ARM64_AM::UXTX)
return Error(ExtLoc, "LSL extend requires immediate operand");
- } else if (getLexer().is(AsmToken::Hash)) {
+ } else if (Hash || getLexer().is(AsmToken::Integer)) {
// Immediate operand.
- Parser.Lex(); // Eat the '#'
+ if (Hash)
+ Parser.Lex(); // Eat the '#'
const MCExpr *ImmVal;
SMLoc ExprLoc = getLoc();
if (getParser().parseExpression(ImmVal))
return false;
// Immediate expressions.
- } else if (Parser.getTok().is(AsmToken::Hash)) {
- Parser.Lex(); // Eat hash token.
+ } else if (Parser.getTok().is(AsmToken::Hash) ||
+ Parser.getTok().is(AsmToken::Integer)) {
+ if (Parser.getTok().is(AsmToken::Hash))
+ Parser.Lex(); // Eat hash token.
if (parseSymbolicImmVal(OffsetExpr))
return true;
.Case("lo12", ARM64MCExpr::VK_LO12)
.Case("abs_g3", ARM64MCExpr::VK_ABS_G3)
.Case("abs_g2", ARM64MCExpr::VK_ABS_G2)
+ .Case("abs_g2_s", ARM64MCExpr::VK_ABS_G2_S)
.Case("abs_g2_nc", ARM64MCExpr::VK_ABS_G2_NC)
.Case("abs_g1", ARM64MCExpr::VK_ABS_G1)
+ .Case("abs_g1_s", ARM64MCExpr::VK_ABS_G1_S)
.Case("abs_g1_nc", ARM64MCExpr::VK_ABS_G1_NC)
.Case("abs_g0", ARM64MCExpr::VK_ABS_G0)
+ .Case("abs_g0_s", ARM64MCExpr::VK_ABS_G0_S)
.Case("abs_g0_nc", ARM64MCExpr::VK_ABS_G0_NC)
.Case("dtprel_g2", ARM64MCExpr::VK_DTPREL_G2)
.Case("dtprel_g1", ARM64MCExpr::VK_DTPREL_G1)
Operands.push_back(ARM64Operand::CreateImm(IdVal, S, E, getContext()));
return false;
}
+ case AsmToken::Integer:
+ case AsmToken::Real:
case AsmToken::Hash: {
// #42 -> immediate.
S = getLoc();
- Parser.Lex();
+ if (getLexer().is(AsmToken::Hash))
+ Parser.Lex();
// The only Real that should come through here is a literal #0.0 for
// the fcmp[e] r, #0.0 instructions. They expect raw token operands,
return false;
}
-/// isFPR32Register - Check if a register is in the FPR32 register class.
-/// (The parser does not have the target register info to check the register
-/// class directly.)
-static bool isFPR32Register(unsigned Reg) {
- using namespace ARM64;
- switch (Reg) {
- default:
- break;
- case S0: case S1: case S2: case S3: case S4: case S5: case S6:
- case S7: case S8: case S9: case S10: case S11: case S12: case S13:
- case S14: case S15: case S16: case S17: case S18: case S19: case S20:
- case S21: case S22: case S23: case S24: case S25: case S26: case S27:
- case S28: case S29: case S30: case S31:
- return true;
- }
- return false;
-}
-
-/// isGPR32Register - Check if a register is in the GPR32sp register class.
-/// (The parser does not have the target register info to check the register
-/// class directly.)
-static bool isGPR32Register(unsigned Reg) {
- using namespace ARM64;
- switch (Reg) {
- default:
- break;
- case W0: case W1: case W2: case W3: case W4: case W5: case W6:
- case W7: case W8: case W9: case W10: case W11: case W12: case W13:
- case W14: case W15: case W16: case W17: case W18: case W19: case W20:
- case W21: case W22: case W23: case W24: case W25: case W26: case W27:
- case W28: case W29: case W30: case WSP: case WZR:
- return true;
- }
- return false;
-}
-
-static bool isGPR64Reg(unsigned Reg) {
- using namespace ARM64;
- switch (Reg) {
- case X0: case X1: case X2: case X3: case X4: case X5: case X6:
- case X7: case X8: case X9: case X10: case X11: case X12: case X13:
- case X14: case X15: case X16: case X17: case X18: case X19: case X20:
- case X21: case X22: case X23: case X24: case X25: case X26: case X27:
- case X28: case FP: case LR: case SP: case XZR:
- return true;
- default:
- return false;
- }
-}
-
-
// FIXME: This entire function is a giant hack to provide us with decent
// operand range validation/diagnostics until TableGen/MC can be extended
// to support autogeneration of this kind of validation.
}
}
-static void rewriteMOV(ARM64AsmParser::OperandVector &Operands,
- StringRef mnemonic, uint64_t imm, unsigned shift,
- MCContext &Context) {
+static void rewriteMOVI(ARM64AsmParser::OperandVector &Operands,
+ StringRef mnemonic, uint64_t imm, unsigned shift,
+ MCContext &Context) {
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
Operands[0] =
delete Op;
}
+static void rewriteMOVRSP(ARM64AsmParser::OperandVector &Operands,
+ MCContext &Context) {
+ ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
+ ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
+ Operands[0] =
+ ARM64Operand::CreateToken("add", false, Op->getStartLoc(), Context);
+
+ const MCExpr *Imm = MCConstantExpr::Create(0, Context);
+ Operands.push_back(ARM64Operand::CreateImm(Imm, Op2->getStartLoc(),
+ Op2->getEndLoc(), Context));
+ Operands.push_back(ARM64Operand::CreateShifter(
+ ARM64_AM::LSL, 0, Op2->getStartLoc(), Op2->getEndLoc(), Context));
+
+ delete Op;
+}
+
+static void rewriteMOVR(ARM64AsmParser::OperandVector &Operands,
+ MCContext &Context) {
+ ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[0]);
+ ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
+ Operands[0] =
+ ARM64Operand::CreateToken("orr", false, Op->getStartLoc(), Context);
+
+ // Operands[2] becomes Operands[3].
+ Operands.push_back(Operands[2]);
+ // And Operands[2] becomes ZR.
+ unsigned ZeroReg = ARM64::XZR;
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Operands[2]->getReg()))
+ ZeroReg = ARM64::WZR;
+
+ Operands[2] =
+ ARM64Operand::CreateReg(ZeroReg, false, Op2->getStartLoc(),
+ Op2->getEndLoc(), Context);
+
+ delete Op;
+}
+
bool ARM64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode) {
switch (ErrCode) {
case Match_MissingFeature:
}
}
+static const char *getSubtargetFeatureName(unsigned Val);
+
bool ARM64AsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
OperandVector &Operands,
MCStreamer &Out,
// Insert WZR or XZR as destination operand.
ARM64Operand *RegOp = static_cast<ARM64Operand *>(Operands[1]);
unsigned ZeroReg;
- if (RegOp->isReg() && isGPR32Register(RegOp->getReg()))
+ if (RegOp->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ RegOp->getReg()))
ZeroReg = ARM64::WZR;
else
ZeroReg = ARM64::XZR;
// FIXME: Catching this here is a total hack, and we should use tblgen
// support to implement this instead as soon as it is available.
+ ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
ARM64Operand *Op2 = static_cast<ARM64Operand *>(Operands[2]);
if (Op2->isImm()) {
if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op2->getImm())) {
// set, clear the complemented upper 32-bits so the logic below works
// for 32-bit registers too.
ARM64Operand *Op1 = static_cast<ARM64Operand *>(Operands[1]);
- if (Op1->isReg() && isGPR32Register(Op1->getReg()) &&
+ if (Op1->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op1->getReg()) &&
(Val & 0xFFFFFFFFULL) == Val)
NVal &= 0x00000000FFFFFFFFULL;
// MOVK Rd, imm << 0
if ((Val & 0xFFFF) == Val)
- rewriteMOV(Operands, "movz", Val, 0, getContext());
+ rewriteMOVI(Operands, "movz", Val, 0, getContext());
// MOVK Rd, imm << 16
else if ((Val & 0xFFFF0000ULL) == Val)
- rewriteMOV(Operands, "movz", Val, 16, getContext());
+ rewriteMOVI(Operands, "movz", Val, 16, getContext());
// MOVK Rd, imm << 32
else if ((Val & 0xFFFF00000000ULL) == Val)
- rewriteMOV(Operands, "movz", Val, 32, getContext());
+ rewriteMOVI(Operands, "movz", Val, 32, getContext());
// MOVK Rd, imm << 48
else if ((Val & 0xFFFF000000000000ULL) == Val)
- rewriteMOV(Operands, "movz", Val, 48, getContext());
+ rewriteMOVI(Operands, "movz", Val, 48, getContext());
// MOVN Rd, (~imm << 0)
else if ((NVal & 0xFFFFULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 0, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 0, getContext());
// MOVN Rd, ~(imm << 16)
else if ((NVal & 0xFFFF0000ULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 16, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 16, getContext());
// MOVN Rd, ~(imm << 32)
else if ((NVal & 0xFFFF00000000ULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 32, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 32, getContext());
// MOVN Rd, ~(imm << 48)
else if ((NVal & 0xFFFF000000000000ULL) == NVal)
- rewriteMOV(Operands, "movn", NVal, 48, getContext());
+ rewriteMOVI(Operands, "movn", NVal, 48, getContext());
}
+ } else if (Op1->isReg() && Op2->isReg()) {
+ // reg->reg move.
+ unsigned Reg1 = Op1->getReg();
+ unsigned Reg2 = Op2->getReg();
+ if ((Reg1 == ARM64::SP &&
+ ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg2)) ||
+ (Reg2 == ARM64::SP &&
+ ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(Reg1)) ||
+ (Reg1 == ARM64::WSP &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg2)) ||
+ (Reg2 == ARM64::WSP &&
+ ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(Reg1)))
+ rewriteMOVRSP(Operands, getContext());
+ else
+ rewriteMOVR(Operands, getContext());
}
} else if (NumOperands == 4) {
if (Tok == "add" || Tok == "adds" || Tok == "sub" || Tok == "subs") {
uint64_t Op3Val = Op3CE->getValue();
uint64_t NewOp3Val = 0;
uint64_t NewOp4Val = 0;
- if (isGPR32Register(Op2->getReg())) {
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op2->getReg())) {
NewOp3Val = (32 - Op3Val) & 0x1f;
NewOp4Val = 31 - Op3Val;
} else {
uint64_t Op4Val = Op4CE->getValue();
uint64_t NewOp3Val = 0;
- if (isGPR32Register(Op1->getReg()))
+ if (ARM64MCRegisterClasses[ARM64::GPR32allRegClassID].contains(
+ Op1->getReg()))
NewOp3Val = (32 - Op3Val) & 0x1f;
else
NewOp3Val = (64 - Op3Val) & 0x3f;
else if (NumOperands == 3 &&
(Tok == "sxtb" || Tok == "uxtb" || Tok == "sxth" || Tok == "uxth")) {
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[1]);
- if (Op->isReg() && isGPR64Reg(Op->getReg())) {
+ if (Op->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR64allRegClassID].contains(
+ Op->getReg())) {
// The source register can be Wn here, but the matcher expects a
// GPR64. Twiddle it here if necessary.
ARM64Operand *Op = static_cast<ARM64Operand *>(Operands[2]);
ARM64Operand *ImmOp = static_cast<ARM64Operand *>(Operands[2]);
if (RegOp->isReg() && ImmOp->isFPImm() &&
ImmOp->getFPImm() == (unsigned)-1) {
- unsigned zreg =
- isFPR32Register(RegOp->getReg()) ? ARM64::WZR : ARM64::XZR;
+ unsigned zreg = ARM64MCRegisterClasses[ARM64::FPR32RegClassID].contains(
+ RegOp->getReg())
+ ? ARM64::WZR
+ : ARM64::XZR;
Operands[2] = ARM64Operand::CreateReg(zreg, false, Op->getStartLoc(),
Op->getEndLoc(), getContext());
delete ImmOp;
if (Op->isVectorIndexD() && Op->getVectorIndex() == 1) {
SMLoc Loc = Op->getStartLoc();
Operands.pop_back();
+ delete Op;
Operands.push_back(
ARM64Operand::CreateToken("[", false, Loc, getContext()));
Operands.push_back(
Operands.insert(
Operands.begin() + OpNo + 2,
ARM64Operand::CreateToken("]", false, Loc, getContext()));
+ delete Op;
}
}
}
Out.EmitInstruction(Inst, STI);
return false;
}
- case Match_MissingFeature:
+ case Match_MissingFeature: {
+ assert(ErrorInfo && "Unknown missing feature!");
+ // Special case the error message for the very common case where only
+ // a single subtarget feature is missing (neon, e.g.).
+ std::string Msg = "instruction requires:";
+ unsigned Mask = 1;
+ for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
+ if (ErrorInfo & Mask) {
+ Msg += " ";
+ Msg += getSubtargetFeatureName(ErrorInfo & Mask);
+ }
+ Mask <<= 1;
+ }
+ return Error(IDLoc, Msg);
+ }
case Match_MnemonicFail:
return showMatchError(IDLoc, MatchResult);
case Match_InvalidOperand: {
MatchResult = Match_InvalidMemoryIndexed64;
if (ErrorInfo) {
ARM64Operand *PrevOp = (ARM64Operand *)Operands[ErrorInfo - 1];
- if (PrevOp->isReg() && ARM64MCRegisterClasses[ARM64::GPR32RegClassID]
- .contains(PrevOp->getReg()))
+ if (PrevOp->isReg() &&
+ ARM64MCRegisterClasses[ARM64::GPR32RegClassID].contains(
+ PrevOp->getReg()))
MatchResult = Match_InvalidMemoryIndexed32;
}
}
/// Force static initialization.
extern "C" void LLVMInitializeARM64AsmParser() {
- RegisterMCAsmParser<ARM64AsmParser> X(TheARM64Target);
+ RegisterMCAsmParser<ARM64AsmParser> X(TheARM64leTarget);
+ RegisterMCAsmParser<ARM64AsmParser> Y(TheARM64beTarget);
}
#define GET_REGISTER_MATCHER
+#define GET_SUBTARGET_FEATURE_NAME
#define GET_MATCHER_IMPLEMENTATION
#include "ARM64GenAsmMatcher.inc"