#include "llvm/Target/TargetAsmParser.h"
#include "X86.h"
#include "X86Subtarget.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/ADT/Twine.h"
+#include "llvm/Target/TargetRegistry.h"
+#include "llvm/Target/TargetAsmParser.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
-#include "llvm/Target/TargetRegistry.h"
-#include "llvm/Target/TargetAsmParser.h"
using namespace llvm;
namespace {
/// @name Auto-generated Matcher Functions
/// {
-
- unsigned ComputeAvailableFeatures(const X86Subtarget *Subtarget) const;
-
- bool MatchInstructionImpl(
- const SmallVectorImpl<MCParsedAsmOperand*> &Operands, MCInst &Inst);
-
+
+#define GET_ASSEMBLER_HEADER
+#include "X86GenAsmMatcher.inc"
+
/// }
public:
// validation later, so maybe there is no need for this here.
RegNo = MatchRegisterName(Tok.getString());
+ // If the match failed, try the register name as lowercase.
+ if (RegNo == 0)
+ RegNo = MatchRegisterName(LowercaseString(Tok.getString()));
+
// FIXME: This should be done using Requires<In32BitMode> and
// Requires<In64BitMode> so "eiz" usage in 64-bit instructions
// can be also checked.
if (RegNo == X86::RIZ && !Is64Bit)
return Error(Tok.getLoc(), "riz register in 64-bit mode only");
- // Parse %st(1) and "%st" as "%st(0)"
- if (RegNo == 0 && Tok.getString() == "st") {
+ // Parse "%st" as "%st(0)" and "%st(1)", which is multiple tokens.
+ if (RegNo == 0 && (Tok.getString() == "st" || Tok.getString() == "ST")) {
RegNo = X86::ST0;
EndLoc = Tok.getLoc();
Parser.Lex(); // Eat 'st'
bool X86ATTAsmParser::
ParseInstruction(StringRef Name, SMLoc NameLoc,
SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
- // The various flavors of pushf and popf use Requires<In32BitMode> and
- // Requires<In64BitMode>, but the assembler doesn't yet implement that.
- // For now, just do a manual check to prevent silent misencoding.
- if (Is64Bit) {
- if (Name == "popfl")
- return Error(NameLoc, "popfl cannot be encoded in 64-bit mode");
- else if (Name == "pushfl")
- return Error(NameLoc, "pushfl cannot be encoded in 64-bit mode");
- else if (Name == "pusha")
- return Error(NameLoc, "pusha cannot be encoded in 64-bit mode");
- } else {
- if (Name == "popfq")
- return Error(NameLoc, "popfq cannot be encoded in 32-bit mode");
- else if (Name == "pushfq")
- return Error(NameLoc, "pushfq cannot be encoded in 32-bit mode");
- }
-
- // The "Jump if rCX Zero" form jcxz is not allowed in 64-bit mode and
- // the form jrcxz is not allowed in 32-bit mode.
- if (Is64Bit) {
- if (Name == "jcxz")
- return Error(NameLoc, "jcxz cannot be encoded in 64-bit mode");
- } else {
- if (Name == "jrcxz")
- return Error(NameLoc, "jrcxz cannot be encoded in 32-bit mode");
- }
-
// FIXME: Hack to recognize "sal..." and "rep..." for now. We need a way to
// represent alternative syntaxes in the .td file, without requiring
// instruction duplication.
.Case("repe", "rep")
.Case("repz", "rep")
.Case("repnz", "repne")
+ .Case("iret", "iretl")
+ .Case("sysret", "sysretl")
+ .Case("push", Is64Bit ? "pushq" : "pushl")
+ .Case("pop", Is64Bit ? "popq" : "popl")
.Case("pushf", Is64Bit ? "pushfq" : "pushfl")
.Case("popf", Is64Bit ? "popfq" : "popfl")
+ .Case("pushfd", "pushfl")
+ .Case("popfd", "popfl")
.Case("retl", Is64Bit ? "retl" : "ret")
.Case("retq", Is64Bit ? "ret" : "retq")
- .Case("setz", "sete")
- .Case("setnz", "setne")
- .Case("jz", "je")
- .Case("jnz", "jne")
- .Case("jc", "jb")
- // FIXME: in 32-bit mode jcxz requires an AdSize prefix. In 64-bit mode
- // jecxz requires an AdSize prefix but jecxz does not have a prefix in
- // 32-bit mode.
- .Case("jecxz", "jcxz")
- .Case("jrcxz", "jcxz")
- .Case("jna", "jbe")
- .Case("jnae", "jb")
- .Case("jnb", "jae")
- .Case("jnbe", "ja")
- .Case("jnc", "jae")
- .Case("jng", "jle")
- .Case("jnge", "jl")
- .Case("jnl", "jge")
- .Case("jnle", "jg")
- .Case("jpe", "jp")
- .Case("jpo", "jnp")
- .Case("cmovcl", "cmovbl")
- .Case("cmovcl", "cmovbl")
- .Case("cmovnal", "cmovbel")
- .Case("cmovnbl", "cmovael")
- .Case("cmovnbel", "cmoval")
- .Case("cmovncl", "cmovael")
- .Case("cmovngl", "cmovlel")
- .Case("cmovnl", "cmovgel")
- .Case("cmovngl", "cmovlel")
- .Case("cmovngel", "cmovll")
- .Case("cmovnll", "cmovgel")
- .Case("cmovnlel", "cmovgl")
- .Case("cmovnzl", "cmovnel")
- .Case("cmovzl", "cmovel")
+ .Case("setz", "sete") .Case("setnz", "setne")
+ .Case("setc", "setb") .Case("setna", "setbe")
+ .Case("setnae", "setb").Case("setnb", "setae")
+ .Case("setnbe", "seta").Case("setnc", "setae")
+ .Case("setng", "setle").Case("setnge", "setl")
+ .Case("setnl", "setge").Case("setnle", "setg")
+ .Case("setpe", "setp") .Case("setpo", "setnp")
+ .Case("jz", "je") .Case("jnz", "jne")
+ .Case("jc", "jb") .Case("jna", "jbe")
+ .Case("jnae", "jb").Case("jnb", "jae")
+ .Case("jnbe", "ja").Case("jnc", "jae")
+ .Case("jng", "jle").Case("jnge", "jl")
+ .Case("jnl", "jge").Case("jnle", "jg")
+ .Case("jpe", "jp") .Case("jpo", "jnp")
+ // Condition code aliases for 16-bit, 32-bit, 64-bit and unspec operands.
+ .Case("cmovcw", "cmovbw") .Case("cmovcl", "cmovbl")
+ .Case("cmovcq", "cmovbq") .Case("cmovc", "cmovb")
+ .Case("cmovnaew","cmovbw") .Case("cmovnael","cmovbl")
+ .Case("cmovnaeq","cmovbq") .Case("cmovnae", "cmovb")
+ .Case("cmovnaw", "cmovbew").Case("cmovnal", "cmovbel")
+ .Case("cmovnaq", "cmovbeq").Case("cmovna", "cmovbe")
+ .Case("cmovnbw", "cmovaew").Case("cmovnbl", "cmovael")
+ .Case("cmovnbq", "cmovaeq").Case("cmovnb", "cmovae")
+ .Case("cmovnbew","cmovaw") .Case("cmovnbel","cmoval")
+ .Case("cmovnbeq","cmovaq") .Case("cmovnbe", "cmova")
+ .Case("cmovncw", "cmovaew").Case("cmovncl", "cmovael")
+ .Case("cmovncq", "cmovaeq").Case("cmovnc", "cmovae")
+ .Case("cmovngw", "cmovlew").Case("cmovngl", "cmovlel")
+ .Case("cmovngq", "cmovleq").Case("cmovng", "cmovle")
+ .Case("cmovnw", "cmovgew").Case("cmovnl", "cmovgel")
+ .Case("cmovnq", "cmovgeq").Case("cmovn", "cmovge")
+ .Case("cmovngw", "cmovlew").Case("cmovngl", "cmovlel")
+ .Case("cmovngq", "cmovleq").Case("cmovng", "cmovle")
+ .Case("cmovngew","cmovlw") .Case("cmovngel","cmovll")
+ .Case("cmovngeq","cmovlq") .Case("cmovnge", "cmovl")
+ .Case("cmovnlw", "cmovgew").Case("cmovnll", "cmovgel")
+ .Case("cmovnlq", "cmovgeq").Case("cmovnl", "cmovge")
+ .Case("cmovnlew","cmovgw") .Case("cmovnlel","cmovgl")
+ .Case("cmovnleq","cmovgq") .Case("cmovnle", "cmovg")
+ .Case("cmovnzw", "cmovnew").Case("cmovnzl", "cmovnel")
+ .Case("cmovnzq", "cmovneq").Case("cmovnz", "cmovne")
+ .Case("cmovzw", "cmovew") .Case("cmovzl", "cmovel")
+ .Case("cmovzq", "cmoveq") .Case("cmovz", "cmove")
.Case("fwait", "wait")
- .Case("movzx", "movzb")
+ .Case("movzx", "movzb") // FIXME: Not correct.
+ .Case("fildq", "fildll")
.Default(Name);
// FIXME: Hack to recognize cmp<comparison code>{ss,sd,ps,pd}.
PatchedName = "vpclmulqdq";
}
}
+
Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc));
if (ExtraImmOp)
Operands.push_back(X86Operand::CreateImm(ExtraImmOp, NameLoc, NameLoc));
-
- if (getLexer().isNot(AsmToken::EndOfStatement)) {
+
+
+ // Determine whether this is an instruction prefix.
+ bool isPrefix =
+ PatchedName == "lock" || PatchedName == "rep" ||
+ PatchedName == "repne";
+
+
+ // This does the actual operand parsing. Don't parse any more if we have a
+ // prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
+ // just want to parse the "lock" as the first instruction and the "incl" as
+ // the next one.
+ if (getLexer().isNot(AsmToken::EndOfStatement) && !isPrefix) {
// Parse '*' modifier.
if (getLexer().is(AsmToken::Star)) {
// Read the first operand.
if (X86Operand *Op = ParseOperand())
Operands.push_back(Op);
- else
+ else {
+ Parser.EatToEndOfStatement();
return true;
+ }
while (getLexer().is(AsmToken::Comma)) {
Parser.Lex(); // Eat the comma.
// Parse and remember the operand.
if (X86Operand *Op = ParseOperand())
Operands.push_back(Op);
- else
+ else {
+ Parser.EatToEndOfStatement();
return true;
+ }
+ }
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ Parser.EatToEndOfStatement();
+ return TokError("unexpected token in argument list");
}
}
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ Parser.Lex(); // Consume the EndOfStatement
- // FIXME: Hack to handle recognizing s{hr,ar,hl}? $1.
+ // FIXME: Hack to handle recognize s{hr,ar,hl} $1, <op>. Canonicalize to
+ // "shift <op>".
if ((Name.startswith("shr") || Name.startswith("sar") ||
Name.startswith("shl")) &&
- Operands.size() == 3 &&
- static_cast<X86Operand*>(Operands[1])->isImm() &&
- isa<MCConstantExpr>(static_cast<X86Operand*>(Operands[1])->getImm()) &&
- cast<MCConstantExpr>(static_cast<X86Operand*>(Operands[1])->getImm())->getValue() == 1) {
- delete Operands[1];
- Operands.erase(Operands.begin() + 1);
+ Operands.size() == 3) {
+ X86Operand *Op1 = static_cast<X86Operand*>(Operands[1]);
+ if (Op1->isImm() && isa<MCConstantExpr>(Op1->getImm()) &&
+ cast<MCConstantExpr>(Op1->getImm())->getValue() == 1) {
+ delete Operands[1];
+ Operands.erase(Operands.begin() + 1);
+ }
}
-
+
+ // FIXME: Hack to handle recognize "rc[lr] <op>" -> "rcl $1, <op>".
+ if ((Name.startswith("rcl") || Name.startswith("rcr")) &&
+ Operands.size() == 2) {
+ const MCExpr *One = MCConstantExpr::Create(1, getParser().getContext());
+ Operands.push_back(X86Operand::CreateImm(One, NameLoc, NameLoc));
+ std::swap(Operands[1], Operands[2]);
+ }
+
+ // FIXME: Hack to handle recognize "sh[lr]d op,op" -> "shld $1, op,op".
+ if ((Name.startswith("shld") || Name.startswith("shrd")) &&
+ Operands.size() == 3) {
+ const MCExpr *One = MCConstantExpr::Create(1, getParser().getContext());
+ Operands.insert(Operands.begin()+1,
+ X86Operand::CreateImm(One, NameLoc, NameLoc));
+ }
+
+
+ // FIXME: Hack to handle recognize "in[bwl] <op>". Canonicalize it to
+ // "inb <op>, %al".
+ if ((Name == "inb" || Name == "inw" || Name == "inl") &&
+ Operands.size() == 2) {
+ unsigned Reg;
+ if (Name[2] == 'b')
+ Reg = MatchRegisterName("al");
+ else if (Name[2] == 'w')
+ Reg = MatchRegisterName("ax");
+ else
+ Reg = MatchRegisterName("eax");
+ SMLoc Loc = Operands.back()->getEndLoc();
+ Operands.push_back(X86Operand::CreateReg(Reg, Loc, Loc));
+ }
+
+ // FIXME: Hack to handle recognize "out[bwl] <op>". Canonicalize it to
+ // "outb %al, <op>".
+ if ((Name == "outb" || Name == "outw" || Name == "outl") &&
+ Operands.size() == 2) {
+ unsigned Reg;
+ if (Name[3] == 'b')
+ Reg = MatchRegisterName("al");
+ else if (Name[3] == 'w')
+ Reg = MatchRegisterName("ax");
+ else
+ Reg = MatchRegisterName("eax");
+ SMLoc Loc = Operands.back()->getEndLoc();
+ Operands.push_back(X86Operand::CreateReg(Reg, Loc, Loc));
+ std::swap(Operands[1], Operands[2]);
+ }
+
+ // FIXME: Hack to handle "out[bwl]? %al, (%dx)" -> "outb %al, %dx".
+ if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") &&
+ Operands.size() == 3) {
+ X86Operand &Op = *(X86Operand*)Operands.back();
+ if (Op.isMem() && Op.Mem.SegReg == 0 &&
+ isa<MCConstantExpr>(Op.Mem.Disp) &&
+ cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 &&
+ Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) {
+ SMLoc Loc = Op.getEndLoc();
+ Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc);
+ delete &Op;
+ }
+ }
+
// FIXME: Hack to handle "f{mul*,add*,sub*,div*} $op, st(0)" the same as
// "f{mul*,add*,sub*,div*} $op"
if ((Name.startswith("fmul") || Name.startswith("fadd") ||
Operands.erase(Operands.begin() + 2);
}
- // FIXME: Hack to handle "imul A, B" which is an alias for "imul A, B, B".
+ // FIXME: Hack to handle "imul <imm>, B" which is an alias for "imul <imm>, B,
+ // B".
if (Name.startswith("imul") && Operands.size() == 3 &&
+ static_cast<X86Operand*>(Operands[1])->isImm() &&
static_cast<X86Operand*>(Operands.back())->isReg()) {
X86Operand *Op = static_cast<X86Operand*>(Operands.back());
Operands.push_back(X86Operand::CreateReg(Op->getReg(), Op->getStartLoc(),
Op->getEndLoc()));
}
+
+ // 'sldt <mem>' can be encoded with either sldtw or sldtq with the same
+ // effect (both store to a 16-bit mem). Force to sldtw to avoid ambiguity
+ // errors, since its encoding is the most compact.
+ if (Name == "sldt" && Operands.size() == 2 &&
+ static_cast<X86Operand*>(Operands[1])->isMem()) {
+ delete Operands[0];
+ Operands[0] = X86Operand::CreateToken("sldtw", NameLoc);
+ }
+
+ // The assembler accepts "xchgX <reg>, <mem>" and "xchgX <mem>, <reg>" as
+ // synonyms. Our tables only have the "<reg>, <mem>" form, so if we see the
+ // other operand order, swap them.
+ if (Name == "xchgb" || Name == "xchgw" || Name == "xchgl" || Name == "xchgq"||
+ Name == "xchg")
+ if (Operands.size() == 3 &&
+ static_cast<X86Operand*>(Operands[1])->isMem() &&
+ static_cast<X86Operand*>(Operands[2])->isReg()) {
+ std::swap(Operands[1], Operands[2]);
+ }
+
+ // The assembler accepts "testX <reg>, <mem>" and "testX <mem>, <reg>" as
+ // synonyms. Our tables only have the "<mem>, <reg>" form, so if we see the
+ // other operand order, swap them.
+ if (Name == "testb" || Name == "testw" || Name == "testl" || Name == "testq"||
+ Name == "test")
+ if (Operands.size() == 3 &&
+ static_cast<X86Operand*>(Operands[1])->isReg() &&
+ static_cast<X86Operand*>(Operands[2])->isMem()) {
+ std::swap(Operands[1], Operands[2]);
+ }
+
+ // The assembler accepts these instructions with no operand as a synonym for
+ // an instruction acting on st(1). e.g. "fxch" -> "fxch %st(1)".
+ if ((Name == "fxch" || Name == "fucom" || Name == "fucomp" ||
+ Name == "faddp" || Name == "fsubp" || Name == "fsubrp" ||
+ Name == "fmulp" || Name == "fdivp" || Name == "fdivrp") &&
+ Operands.size() == 1) {
+ Operands.push_back(X86Operand::CreateReg(MatchRegisterName("st(1)"),
+ NameLoc, NameLoc));
+ }
+
+ // The assembler accepts these instructions with two few operands as a synonym
+ // for taking %st(1),%st(0) or X, %st(0).
+ if ((Name == "fcomi" || Name == "fucomi") && Operands.size() < 3) {
+ if (Operands.size() == 1)
+ Operands.push_back(X86Operand::CreateReg(MatchRegisterName("st(1)"),
+ NameLoc, NameLoc));
+ Operands.push_back(X86Operand::CreateReg(MatchRegisterName("st(0)"),
+ NameLoc, NameLoc));
+ }
+
+ // The assembler accepts various amounts of brokenness for fnstsw.
+ if (Name == "fnstsw") {
+ if (Operands.size() == 2 &&
+ static_cast<X86Operand*>(Operands[1])->isReg()) {
+ // "fnstsw al" and "fnstsw eax" -> "fnstw"
+ unsigned Reg = static_cast<X86Operand*>(Operands[1])->Reg.RegNo;
+ if (Reg == MatchRegisterName("eax") ||
+ Reg == MatchRegisterName("al")) {
+ delete Operands[1];
+ Operands.pop_back();
+ }
+ }
+ // "fnstw" -> "fnstw %ax"
+ if (Operands.size() == 1)
+ Operands.push_back(X86Operand::CreateReg(MatchRegisterName("ax"),
+ NameLoc, NameLoc));
+ }
+
+ // jmp $42,$5 -> ljmp, similarly for call.
+ if ((Name.startswith("call") || Name.startswith("jmp")) &&
+ Operands.size() == 3 &&
+ static_cast<X86Operand*>(Operands[1])->isImm() &&
+ static_cast<X86Operand*>(Operands[2])->isImm()) {
+ const char *NewOpName = StringSwitch<const char *>(Name)
+ .Case("jmp", "ljmp")
+ .Case("jmpw", "ljmpw")
+ .Case("jmpl", "ljmpl")
+ .Case("jmpq", "ljmpq")
+ .Case("call", "lcall")
+ .Case("callw", "lcallw")
+ .Case("calll", "lcalll")
+ .Case("callq", "lcallq")
+ .Default(0);
+ if (NewOpName) {
+ delete Operands[0];
+ Operands[0] = X86Operand::CreateToken(NewOpName, NameLoc);
+ Name = NewOpName;
+ }
+ }
+
+ // lcall and ljmp -> lcalll and ljmpl
+ if ((Name == "lcall" || Name == "ljmp") && Operands.size() == 3) {
+ delete Operands[0];
+ Operands[0] = X86Operand::CreateToken(Name == "lcall" ? "lcalll" : "ljmpl",
+ NameLoc);
+ }
+
+ // movsd -> movsl (when no operands are specified).
+ if (Name == "movsd" && Operands.size() == 1) {
+ delete Operands[0];
+ Operands[0] = X86Operand::CreateToken("movsl", NameLoc);
+ }
+
+ // fstp <mem> -> fstps <mem>. Without this, we'll default to fstpl due to
+ // suffix searching.
+ if (Name == "fstp" && Operands.size() == 2 &&
+ static_cast<X86Operand*>(Operands[1])->isMem()) {
+ delete Operands[0];
+ Operands[0] = X86Operand::CreateToken("fstps", NameLoc);
+ }
+
return false;
}
}
-bool
-X86ATTAsmParser::MatchInstruction(SMLoc IDLoc,
- const SmallVectorImpl<MCParsedAsmOperand*>
- &Operands,
- MCInst &Inst) {
+bool X86ATTAsmParser::
+MatchInstruction(SMLoc IDLoc,
+ const SmallVectorImpl<MCParsedAsmOperand*> &Operands,
+ MCInst &Inst) {
assert(!Operands.empty() && "Unexpect empty operand list!");
- X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
- assert(Op->isToken() && "Leading operand should always be a mnemonic!");
-
+ bool WasOriginallyInvalidOperand = false;
+ unsigned OrigErrorInfo;
+
// First, try a direct match.
- if (!MatchInstructionImpl(Operands, Inst))
+ switch (MatchInstructionImpl(Operands, Inst, OrigErrorInfo)) {
+ case Match_Success:
return false;
+ case Match_MissingFeature:
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ case Match_InvalidOperand:
+ WasOriginallyInvalidOperand = true;
+ break;
+ case Match_MnemonicFail:
+ break;
+ }
// FIXME: Ideally, we would only attempt suffix matches for things which are
// valid prefixes, and we could just infer the right unambiguous
// type. However, that requires substantially more matcher support than the
// following hack.
+ X86Operand *Op = static_cast<X86Operand*>(Operands[0]);
+ assert(Op->isToken() && "Leading operand should always be a mnemonic!");
+
// Change the operand to point to a temporary token.
StringRef Base = Op->getToken();
SmallString<16> Tmp;
// Check for the various suffix matches.
Tmp[Base.size()] = 'b';
- bool MatchB = MatchInstructionImpl(Operands, Inst);
+ unsigned BErrorInfo, WErrorInfo, LErrorInfo, QErrorInfo;
+ MatchResultTy MatchB = MatchInstructionImpl(Operands, Inst, BErrorInfo);
Tmp[Base.size()] = 'w';
- bool MatchW = MatchInstructionImpl(Operands, Inst);
+ MatchResultTy MatchW = MatchInstructionImpl(Operands, Inst, WErrorInfo);
Tmp[Base.size()] = 'l';
- bool MatchL = MatchInstructionImpl(Operands, Inst);
+ MatchResultTy MatchL = MatchInstructionImpl(Operands, Inst, LErrorInfo);
Tmp[Base.size()] = 'q';
- bool MatchQ = MatchInstructionImpl(Operands, Inst);
+ MatchResultTy MatchQ = MatchInstructionImpl(Operands, Inst, QErrorInfo);
// Restore the old token.
Op->setTokenValue(Base);
// If exactly one matched, then we treat that as a successful match (and the
// instruction will already have been filled in correctly, since the failing
// matches won't have modified it).
- if (MatchB + MatchW + MatchL + MatchQ == 3)
+ unsigned NumSuccessfulMatches =
+ (MatchB == Match_Success) + (MatchW == Match_Success) +
+ (MatchL == Match_Success) + (MatchQ == Match_Success);
+ if (NumSuccessfulMatches == 1)
return false;
- // Otherwise, the match failed.
+ // Otherwise, the match failed, try to produce a decent error message.
// If we had multiple suffix matches, then identify this as an ambiguous
// match.
- if (MatchB + MatchW + MatchL + MatchQ != 4) {
+ if (NumSuccessfulMatches > 1) {
char MatchChars[4];
unsigned NumMatches = 0;
- if (!MatchB)
+ if (MatchB == Match_Success)
MatchChars[NumMatches++] = 'b';
- if (!MatchW)
+ if (MatchW == Match_Success)
MatchChars[NumMatches++] = 'w';
- if (!MatchL)
+ if (MatchL == Match_Success)
MatchChars[NumMatches++] = 'l';
- if (!MatchQ)
+ if (MatchQ == Match_Success)
MatchChars[NumMatches++] = 'q';
SmallString<126> Msg;
}
OS << ")";
Error(IDLoc, OS.str());
- } else {
- // FIXME: We should give nicer diagnostics about the exact failure.
- Error(IDLoc, "unrecognized instruction");
+ return true;
}
+
+ // Okay, we know that none of the variants matched successfully.
+
+ // If all of the instructions reported an invalid mnemonic, then the original
+ // mnemonic was invalid.
+ if ((MatchB == Match_MnemonicFail) && (MatchW == Match_MnemonicFail) &&
+ (MatchL == Match_MnemonicFail) && (MatchQ == Match_MnemonicFail)) {
+ if (!WasOriginallyInvalidOperand) {
+ Error(IDLoc, "invalid instruction mnemonic '" + Base + "'");
+ return true;
+ }
+ // Recover location info for the operand if we know which was the problem.
+ SMLoc ErrorLoc = IDLoc;
+ if (OrigErrorInfo != ~0U) {
+ if (OrigErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((X86Operand*)Operands[OrigErrorInfo])->getStartLoc();
+ if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
+ }
+
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+
+ // If one instruction matched with a missing feature, report this as a
+ // missing feature.
+ if ((MatchB == Match_MissingFeature) + (MatchW == Match_MissingFeature) +
+ (MatchL == Match_MissingFeature) + (MatchQ == Match_MissingFeature) == 1){
+ Error(IDLoc, "instruction requires a CPU feature not currently enabled");
+ return true;
+ }
+
+ // If one instruction matched with an invalid operand, report this as an
+ // operand failure.
+ if ((MatchB == Match_InvalidOperand) + (MatchW == Match_InvalidOperand) +
+ (MatchL == Match_InvalidOperand) + (MatchQ == Match_InvalidOperand) == 1){
+ Error(IDLoc, "invalid operand for instruction");
+ return true;
+ }
+
+ // If all of these were an outright failure, report it in a useless way.
+ // FIXME: We should give nicer diagnostics about the exact failure.
+ Error(IDLoc, "unknown use of instruction mnemonic without a size suffix");
return true;
}
LLVMInitializeX86AsmLexer();
}
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
#include "X86GenAsmMatcher.inc"