-//===- ARMDisassemblerCore.cpp - ARM disassembler helpers ----*- C++ -*-===//
+//===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
//===----------------------------------------------------------------------===//
//
// This file is part of the ARM Disassembler.
-// It contains code to represent the core concepts of Builder, Builder Factory,
-// as well as the Algorithm to solve the problem of disassembling an ARM instr.
+// It contains code to represent the core concepts of Builder and DisassembleFP
+// to solve the problem of disassembling an ARM instr.
//
//===----------------------------------------------------------------------===//
-#include "ARMAddressingModes.h"
#include "ARMDisassemblerCore.h"
-#include <map>
+#include "ARMAddressingModes.h"
/// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
/// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
return ARMInsts[Opcode].Name;
}
-// There is a more efficient way than the following. It is fragile, though.
-// See the code snippet after this function.
+// Return the register enum Based on RegClass and the raw register number.
+// For DRegPair, see comments below.
+// FIXME: Auto-gened?
static unsigned getRegisterEnum(unsigned RegClassID, unsigned RawRegister,
bool DRegPair = false) {
}
break;
}
- llvm_unreachable("Invalid (RegClassID, RawRegister) combination");
-}
-
-// This is efficient but fragile.
-/*
-// See ARMGenRegisterInfo.h.inc for more info.
-static const TargetRegisterClass* const ARMRegisterClasses[] = {
- NULL,
- &ARM::CCRRegClass, // CCRRegClassID = 1,
- &ARM::DPRRegClass, // DPRRegClassID = 2,
- &ARM::DPR_8RegClass, // DPR_8RegClassID = 3,
- &ARM::DPR_VFP2RegClass, // DPR_VFP2RegClassID = 4,
- &ARM::GPRRegClass, // GPRRegClassID = 5,
- &ARM::QPRRegClass, // QPRRegClassID = 6,
- &ARM::QPR_8RegClass, // QPR_8RegClassID = 7,
- &ARM::QPR_VFP2RegClass, // QPR_VFP2RegClassID = 8,
- &ARM::SPRRegClass, // SPRRegClassID = 9,
- &ARM::SPR_8RegClass, // SPR_8RegClassID = 10,
- &ARM::SPR_INVALIDRegClass, // SPR_INVALIDRegClassID = 11,
- &ARM::tGPRRegClass, // tGPRRegClassID = 12
-};
-
-// Return the register enum given register class id and raw register value.
-static unsigned getRegisterEnum(unsigned RegClassID, unsigned RawRegister) {
- assert(RegClassID < array_lengthof(ARMRegisterClasses) &&
- "Register Class ID out of range");
- return ARMRegisterClasses[RegClassID]->getRegister(RawRegister);
+ assert(0 && "Invalid (RegClassID, RawRegister) combination");
+ return 0;
}
-*/
-
-/// DisassembleFP - DisassembleFP points to a function that disassembles an insn
-/// and builds the MCOperand list upon disassembly. It returns false on failure
-/// or true on success. The number of operands added is updated upon success.
-typedef bool (*DisassembleFP)(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded);
///////////////////////////////
// //
return (insn >> ARMII::M_BitShift) & 1;
}
-namespace {
-// Sign extend 5 bit number x to r.
-// Usage: int r = signextend<signed int, 5>(x);
-template <typename T, unsigned B> inline T signextend(const T x) {
- struct {T x:B;} s;
- return s.x = x;
-}
-}
-
// See A8.4 Shifts applied to a register.
// A8.4.2 Register controlled shifts.
//
// getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
-// into llvm enums for shift opcode.
+// into llvm enums for shift opcode. The API clients should pass in the value
+// encoded with two bits, so the assert stays to signal a wrong API usage.
//
// A8-12: DecodeRegShift()
static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
switch (bits) {
- default: assert(0 && "No such value");
+ default: assert(0 && "No such value"); return ARM_AM::no_shift;
case 0: return ARM_AM::lsl;
case 1: return ARM_AM::lsr;
case 2: return ARM_AM::asr;
}
// getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
-// bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode.
+// bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
+// clients should pass in the value encoded with two bits, so the assert stays
+// to signal a wrong API usage.
static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
switch (bits) {
- default: assert(0 && "No such value");
+ default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
case 1: return ARM_AM::ia; // P=0 U=1
case 3: return ARM_AM::ib; // P=1 U=1
case 0: return ARM_AM::da; // P=0 U=0
/// followed by possible src(s).
///
/// The processing of the predicate, and the 'S' modifier bit, if MI modifies
-/// the CPSR, is factored into ARMBasicMCBuilder's class method named
+/// the CPSR, is factored into ARMBasicMCBuilder's method named
/// TryPredicateAndSBitModifier.
static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
if (Opcode == ARM::Int_MemBarrierV7 || Opcode == ARM::Int_SyncBarrierV7)
return true;
// Inst{3-0} => Rm
// Inst{11-8} => Rs
static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
unsigned short NumDefs = TID.getNumDefs();
assert(NumOps >= 3
&& OpInfo[0].RegClass == ARM::GPRRegClassID
&& OpInfo[1].RegClass == ARM::GPRRegClassID
- && OpInfo[2].RegClass == ARM::GPRRegClassID);
+ && OpInfo[2].RegClass == ARM::GPRRegClassID
+ && "Expect three register operands");
// Instructions with two destination registers have RdLo{15-12} first.
if (NumDefs == 2) {
- assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID);
+ assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
+ "Expect 4th register operand");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRd(insn))));
++OpIdx;
static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded) {
- assert(NumOps >= 5);
+ assert(NumOps >= 5 && "Num of operands >= 5 for coprocessor instr");
unsigned &OpIdx = NumOpsAdded;
bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
// SRSW/SRS: addrmode4:$addr mode_imm
// RFEW/RFE: addrmode4:$addr Rn
static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
if (CoprocessorOpcode(Opcode))
return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded);
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+ if (!OpInfo) return false;
// MRS and MRSsys take one GPR reg Rd.
if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
- assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID);
+ assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRd(insn))));
NumOpsAdded = 1;
}
// BXJ takes one GPR reg Rm.
if (Opcode == ARM::BXJ) {
- assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID);
+ assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRm(insn))));
NumOpsAdded = 1;
}
// MSR and MSRsys take one GPR reg Rm, followed by the mask.
if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
- assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID);
+ assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRm(insn))));
MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
// ARMInstPrinter::printAddrMode4Operand() prints special mode string
// if the base register is SP; so don't set ARM::SP.
MI.addOperand(MCOperand::CreateReg(0));
- bool WB = (Opcode == ARM::SRSW);
ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode, WB)));
+ MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
return true;
}
- assert(Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
- || Opcode == ARM::SMC || Opcode == ARM::SVC);
+ assert((Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
+ || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
+ "Unexpected Opcode");
- assert(NumOps >= 1 && OpInfo[0].RegClass == 0);
+ assert(NumOps >= 1 && OpInfo[0].RegClass == 0 && "Reg operand expected");
int Imm32 = 0;
if (Opcode == ARM::SMC) {
} else {
// SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
unsigned Imm26 = slice(insn, 23, 0) << 2;
- Imm32 = signextend<signed int, 26>(Imm26);
+ //Imm32 = signextend<signed int, 26>(Imm26);
+ Imm32 = SignExtend32<26>(Imm26);
// When executing an ARM instruction, PC reads as the address of the current
// instruction plus 8. The assembler subtracts 8 from the difference
// BLXr9, BXr9
// BRIND, BX_RET
static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+ if (!OpInfo) return false;
+
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
// BLXr9 and BRIND take one GPR reg.
if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
- assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRm(insn))));
OpIdx = 1;
if (Opcode == ARM::BR_JTadd) {
// InOperandList with GPR:$target and GPR:$idx regs.
- assert(NumOps == 4);
+ assert(NumOps == 4 && "Expect 4 operands");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
if (Opcode == ARM::BR_JTr) {
// InOperandList with GPR::$target reg.
- assert(NumOps == 3);
+ assert(NumOps == 3 && "Expect 3 operands");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRm(insn))));
// This is the reg/reg form, with base reg followed by +/- reg shop imm.
// See also ARMAddressingModes.h (Addressing Mode #2).
- assert(NumOps == 5 && getIBit(insn) == 1);
+ assert(NumOps == 5 && getIBit(insn) == 1 && "Expect 5 operands && I-bit=1");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
case ARM::USAT16:
return slice(insn, 19, 16);
default:
- llvm_unreachable("Invalid opcode passed in");
+ assert(0 && "Invalid opcode passed in");
return 0;
}
}
// operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
// They are QADD, QDADD, QDSUB, and QSUB.
static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
unsigned short NumDefs = TID.getNumDefs();
// BinaryDP has an Rn operand.
if (!isUnary) {
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(
getRegisterEnum(ARM::GPRRegClassID,
RmRn ? decodeRm(insn) : decodeRn(insn))));
}
static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
unsigned short NumDefs = TID.getNumDefs();
// BinaryDP has an Rn operand.
if (!isUnary) {
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
(OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+2].RegClass == 0));
+ (OpInfo[OpIdx+2].RegClass == 0) &&
+ "Expect 3 reg operands");
// Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
unsigned Rs = slice(insn, 4, 4);
unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
- unsigned short NumDefs = TID.getNumDefs();
bool isPrePost = isPrePostLdSt(TID.TSFlags);
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ if (!OpInfo) return false;
+
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
- assert((!isStore && NumDefs > 0) || (isStore && (NumDefs == 0 || isPrePost)));
+ assert(((!isStore && TID.getNumDefs() > 0) ||
+ (isStore && (TID.getNumDefs() == 0 || isPrePost)))
+ && "Invalid arguments");
// Operand 0 of a pre- and post-indexed store is the address base writeback.
if (isPrePost && isStore) {
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
if (OpIdx >= NumOps)
return false;
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRd(insn))));
++OpIdx;
// After dst of a pre- and post-indexed load is the address base writeback.
if (isPrePost && !isStore) {
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
if (OpIdx >= NumOps)
return false;
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
- assert(!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1));
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
+ assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
+ && "Index mode or tied_to operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
return false;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+1].RegClass == 0));
+ (OpInfo[OpIdx+1].RegClass == 0) &&
+ "Expect 1 reg operand followed by 1 imm operand");
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
if (getIBit(insn) == 0) {
}
static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
}
static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
}
unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
- unsigned short NumDefs = TID.getNumDefs();
bool isPrePost = isPrePostLdSt(TID.TSFlags);
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ if (!OpInfo) return false;
+
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
- assert((!isStore && NumDefs > 0) || (isStore && (NumDefs == 0 || isPrePost)));
+ assert(((!isStore && TID.getNumDefs() > 0) ||
+ (isStore && (TID.getNumDefs() == 0 || isPrePost)))
+ && "Invalid arguments");
// Operand 0 of a pre- and post-indexed store is the address base writeback.
if (isPrePost && isStore) {
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
if (OpIdx >= NumOps)
return false;
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRd(insn))));
++OpIdx;
// After dst of a pre- and post-indexed load is the address base writeback.
if (isPrePost && !isStore) {
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
if (OpIdx >= NumOps)
return false;
- assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
- assert(!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1));
+ assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
+ assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
+ && "Index mode or tied_to operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
return false;
assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
- (OpInfo[OpIdx+1].RegClass == 0));
+ (OpInfo[OpIdx+1].RegClass == 0) &&
+ "Expect 1 reg operand followed by 1 imm operand");
ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
if (getAM3IBit(insn) == 1) {
}
static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
}
static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
}
// and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
// reglist with each affected register encoded as an MCOperand.
static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- assert(NumOps == 5 && "LdStMulFrm expects NumOps of 5");
+ assert(NumOps >= 5 && "LdStMulFrm expects NumOps >= 5");
unsigned &OpIdx = NumOpsAdded;
+ OpIdx = 0;
+
unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
+
+ // Writeback to base, if necessary.
+ if (Opcode == ARM::LDM_UPD || Opcode == ARM::STM_UPD) {
+ MI.addOperand(MCOperand::CreateReg(Base));
+ ++OpIdx;
+ }
+
MI.addOperand(MCOperand::CreateReg(Base));
ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
- bool WB = getWBit(insn) == 1;
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode, WB)));
+ MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
// Handling the two predicate operands before the reglist.
int64_t CondVal = insn >> ARMII::CondShift;
MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
- OpIdx = 4;
+ OpIdx += 4;
// Fill the variadic part of reglist.
unsigned RegListBits = insn & ((1 << 16) - 1);
//
// SWP, SWPB: Rd Rm Rn
static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+ if (!OpInfo) return false;
+
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
assert(NumOps >= 2
&& OpInfo[0].RegClass == ARM::GPRRegClassID
- && OpInfo[1].RegClass == ARM::GPRRegClassID);
+ && OpInfo[1].RegClass == ARM::GPRRegClassID
+ && "Expect 2 reg operands");
bool isStore = slice(insn, 20, 20) == 0;
bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
// PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
// RBIT, REV, REV16, REVSH: Rd Rm
static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
assert(NumOps >= 2
&& OpInfo[0].RegClass == ARM::GPRRegClassID
- && OpInfo[1].RegClass == ARM::GPRRegClassID);
+ && OpInfo[1].RegClass == ARM::GPRRegClassID
+ && "Expect 2 reg operands");
bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
++OpIdx;
if (ThreeReg) {
- assert(NumOps >= 4);
+ assert(NumOps >= 4 && "Expect >= 4 operands");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
decodeRn(insn))));
++OpIdx;
// The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
// three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
assert(NumOps >= 2
&& OpInfo[0].RegClass == ARM::GPRRegClassID
- && OpInfo[1].RegClass == ARM::GPRRegClassID);
+ && OpInfo[1].RegClass == ARM::GPRRegClassID
+ && "Expect 2 reg operands");
bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
// VCVTDS, VCVTSD: converts between double-precision and single-precision
// The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
OpIdx = 0;
unsigned RegClass = OpInfo[OpIdx].RegClass;
- assert(RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID);
+ assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
+ "Reg operand expected");
bool isSP = (RegClass == ARM::SPRRegClassID);
MI.addOperand(MCOperand::CreateReg(
return true;
RegClass = OpInfo[OpIdx].RegClass;
- assert(RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID);
+ assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
+ "Reg operand expected");
isSP = (RegClass == ARM::SPRRegClassID);
MI.addOperand(MCOperand::CreateReg(
// InOperandList to that of the dst. As far as asm printing is concerned, this
// tied_to operand is simply skipped.
static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
OpIdx = 0;
unsigned RegClass = OpInfo[OpIdx].RegClass;
- assert(RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID);
+ assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
+ "Reg operand expected");
bool isSP = (RegClass == ARM::SPRRegClassID);
MI.addOperand(MCOperand::CreateReg(
// Skip tied_to operand constraint.
if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
- assert(NumOps >= 4);
+ assert(NumOps >= 4 && "Expect >=4 operands");
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
}
// A8.6.297 vcvt (floating-point and fixed-point)
// Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ if (!OpInfo) return false;
bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
if (fixed_point) {
// A8.6.297
- assert(NumOps >= 3);
+ assert(NumOps >= 3 && "Expect >= 3 operands");
int size = slice(insn, 7, 7) == 0 ? 16 : 32;
int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
MI.addOperand(MCOperand::CreateReg(
getRegisterEnum(RegClassID,
decodeVFPRd(insn, SP))));
- assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1);
+ assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
+ "Tied to operand expected");
MI.addOperand(MI.getOperand(0));
assert(OpInfo[2].RegClass == 0 && !OpInfo[2].isPredicate() &&
- !OpInfo[2].isOptionalDef());
+ !OpInfo[2].isOptionalDef() && "Imm operand expected");
MI.addOperand(MCOperand::CreateImm(fbits));
NumOpsAdded = 3;
// VMOVRS - A8.6.330
// Rt => Rd; Sn => UInt(Vn:N)
static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
// VMOVRRS - A8.6.331
// Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
// VMOVSR - A8.6.330
// Rt => Rd; Sn => UInt(Vn:N)
static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
// VMOVRRS - A8.6.331
// Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
// VFP Load/Store Instructions.
// VLDRD, VLDRS, VSTRD, VSTRS
static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
// operand 1 (the AM5 mode imm) is followed by two predicate operands. It is
// followed by a reglist of either DPR(s) or SPR(s).
//
-// VLDMD, VLDMS, VSTMD, VSTMS
+// VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- assert(NumOps == 5 && "VFPLdStMulFrm expects NumOps of 5");
+ assert(NumOps >= 5 && "VFPLdStMulFrm expects NumOps >= 5");
unsigned &OpIdx = NumOpsAdded;
+ OpIdx = 0;
+
unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
+
+ // Writeback to base, if necessary.
+ if (Opcode == ARM::VLDMD_UPD || Opcode == ARM::VLDMS_UPD ||
+ Opcode == ARM::VSTMD_UPD || Opcode == ARM::VSTMS_UPD) {
+ MI.addOperand(MCOperand::CreateReg(Base));
+ ++OpIdx;
+ }
+
MI.addOperand(MCOperand::CreateReg(Base));
// Next comes the AM5 Opcode.
ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
- bool WB = getWBit(insn) == 1;
unsigned char Imm8 = insn & 0xFF;
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, WB, Imm8)));
+ MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, Imm8)));
// Handling the two predicate operands before the reglist.
int64_t CondVal = insn >> ARMII::CondShift;
MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
- OpIdx = 4;
+ OpIdx += 4;
- bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VSTMS) ? true : false;
+ bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VLDMS_UPD ||
+ Opcode == ARM::VSTMS || Opcode == ARM::VSTMS_UPD) ? true : false;
unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
// Extract Dd/Sd.
// FCONSTS (SPR and a VFPf32Imm operand)
// VMRS/VMSR (GPR operand)
static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
unsigned &OpIdx = NumOpsAdded;
if (Opcode == ARM::FMSTAT)
return true;
- assert(NumOps >= 2);
+ assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
unsigned RegEnum = 0;
switch (OpInfo[0].RegClass) {
RegEnum = getRegisterEnum(ARM::GPRRegClassID, decodeRd(insn));
break;
default:
- llvm_unreachable("Invalid reg class id");
+ assert(0 && "Invalid reg class id");
+ return false;
}
MI.addOperand(MCOperand::CreateReg(RegEnum));
return true;
}
-// DisassembleThumbFrm() is defined in ThumbDisassemblerCore.cpp.inc file.
-#include "ThumbDisassemblerCore.cpp.inc"
+// DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
+#include "ThumbDisassemblerCore.h"
/////////////////////////////////////////////////////
// //
// D = Inst{22}, Vd = Inst{15-12}
static unsigned decodeNEONRd(uint32_t insn) {
return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
- | (insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask;
+ | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
}
// Extract/Decode NEON N/Vn:
// N = Inst{7}, Vn = Inst{19-16}
static unsigned decodeNEONRn(uint32_t insn) {
return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
- | (insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask;
+ | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
}
// Extract/Decode NEON M/Vm:
// M = Inst{5}, Vm = Inst{3-0}
static unsigned decodeNEONRm(uint32_t insn) {
return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
- | (insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask;
+ | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
}
namespace {
// Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
static unsigned decodeLaneIndex(uint32_t insn) {
unsigned size = insn >> 10 & 3;
- assert(size == 0 || size == 1 || size == 2);
+ assert((size == 0 || size == 1 || size == 2) &&
+ "Encoding error: size should be either 0, 1, or 2");
unsigned index_align = insn >> 4 & 0xF;
return (index_align >> 1) >> size;
// Imm6 = Inst{21-16}, L = Inst{7}
//
-// NormalShift == true (A8.6.376 VRSHR, A8.6.368 VQSHRN):
-// case L:imm6 of
-// '0001xxx' => esize = 8; shift_amount = 16 - imm6
-// '001xxxx' => esize = 16; shift_amount = 32 - imm6
-// '01xxxxx' => esize = 32; shift_amount = 64 - imm6
-// '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
-//
-// NormalShift == false (A8.6.367 VQSHL, A8.6.387 VSLI):
+// LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
// case L:imm6 of
// '0001xxx' => esize = 8; shift_amount = imm6 - 8
// '001xxxx' => esize = 16; shift_amount = imm6 - 16
// '01xxxxx' => esize = 32; shift_amount = imm6 - 32
// '1xxxxxx' => esize = 64; shift_amount = imm6
//
-static unsigned decodeNVSAmt(uint32_t insn, bool NormalShift) {
+// LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
+// case L:imm6 of
+// '0001xxx' => esize = 8; shift_amount = 16 - imm6
+// '001xxxx' => esize = 16; shift_amount = 32 - imm6
+// '01xxxxx' => esize = 32; shift_amount = 64 - imm6
+// '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
+//
+static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
ElemSize esize = ESizeNA;
unsigned L = (insn >> 7) & 1;
unsigned imm6 = (insn >> 16) & 0x3F;
} else
esize = ESize64;
- if (NormalShift)
- return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
- else
+ if (LeftShift)
return esize == ESize64 ? imm6 : (imm6 - esize);
+ else
+ return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
}
// A8.6.305 VEXT
return (insn >> 8) & 0xF;
}
-static bool DisassembleNSFormatNone(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
- assert(0 && "Unexpected NEON Sub-Format of NSFormatNone");
- return false;
-}
-
// VLD*
-// D[d] D[d2] ... R[addr] [TIED_TO] R[update] AM6 align(ignored)
+// D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
// VLD*LN*
-// D[d] D[d2] ... R[addr] R[update] AM6 align(ignored) TIED_TO ... imm(idx)
+// D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
// VST*
-// R[addr] [TIED_TO] R[update] AM6 align(ignored) D[d] D[d2] ...
+// Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
// VST*LN*
-// R[addr] R[update] AM6 align(ignored) D[d] D[d2] ... [imm(idx)]
+// Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
//
// Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
-static bool DisassembleVLDSTLane0(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
// At least one DPR register plus addressing mode #6.
- assert(NumOps >= 5);
+ assert(NumOps >= 3 && "Expect >= 3 operands");
unsigned &OpIdx = NumOpsAdded;
RmEnum = getRegisterEnum(ARM::GPRRegClassID, Rm);
if (Store) {
- // Consume AddrMode6 (possible TIED_TO Rn), the DPR/QPR's, then possible
- // lane index.
- assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID);
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
- Rn)));
- ++OpIdx;
- if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
- // TIED_TO operand.
+ // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
+ // then possible lane index.
+ assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
+ "Reg operand expected");
+
+ if (WB) {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
Rn)));
++OpIdx;
}
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
- MI.addOperand(MCOperand::CreateReg(RmEnum));
- ++OpIdx;
- assert(OpIdx < NumOps &&
- OpInfo[OpIdx].RegClass == 0 && OpInfo[OpIdx+1].RegClass == 0);
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM6Opc(WB)));
+ assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
+ MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
+ Rn)));
MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
OpIdx += 2;
+ if (WB) {
+ MI.addOperand(MCOperand::CreateReg(RmEnum));
+ ++OpIdx;
+ }
+
assert(OpIdx < NumOps &&
(OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
- OpInfo[OpIdx].RegClass == ARM::QPRRegClassID));
+ OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
+ "Reg operand expected");
RegClass = OpInfo[OpIdx].RegClass;
while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
}
} else {
- // Consume the DPR/QPR's, AddrMode6 (possible TIED_TO Rn), possible TIED_TO
- // DPR/QPR's (ignored), then possible lane index.
+ // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
+ // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
RegClass = OpInfo[0].RegClass;
while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
++OpIdx;
}
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
- Rn)));
- ++OpIdx;
- if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
- // TIED_TO operand.
+ if (WB) {
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
Rn)));
++OpIdx;
}
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
- MI.addOperand(MCOperand::CreateReg(RmEnum));
- ++OpIdx;
- assert(OpIdx < NumOps &&
- OpInfo[OpIdx].RegClass == 0 && OpInfo[OpIdx+1].RegClass == 0);
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM6Opc(WB)));
+ assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
+ OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
+ MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
+ Rn)));
MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
OpIdx += 2;
+ if (WB) {
+ MI.addOperand(MCOperand::CreateReg(RmEnum));
+ ++OpIdx;
+ }
+
while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
- assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1);
+ assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
+ "Tied to operand expected");
MI.addOperand(MCOperand::CreateReg(0));
++OpIdx;
}
// A7.7
// If L (Inst{21}) == 0, store instructions.
-// DblSpaced = false.
-static bool DisassembleVLDSTLane(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
-
- return DisassembleVLDSTLane0(MI, Opcode, insn, NumOps, NumOpsAdded,
- slice(insn, 21, 21) == 0, false);
-}
-// A7.7
-// If L (Inst{21}) == 0, store instructions.
-// DblSpaced = true.
-static bool DisassembleVLDSTLaneDbl(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+// Find out about double-spaced-ness of the Opcode and pass it on to
+// DisassembleNLdSt0().
+static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleVLDSTLane0(MI, Opcode, insn, NumOps, NumOpsAdded,
- slice(insn, 21, 21) == 0, true);
-}
+ const StringRef Name = ARMInsts[Opcode].Name;
+ bool DblSpaced = false;
-// VLDRQ (vldmia), VSTRQ (vstmia)
-// Qd Rn imm (AM4)
-static bool DisassembleVLDSTRQ(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ if (Name.find("LN") != std::string::npos) {
+ // To one lane instructions.
+ // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
- const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
+ // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
+ if (Name.endswith("16") || Name.endswith("16_UPD"))
+ DblSpaced = slice(insn, 5, 5) == 1;
- assert(NumOps >= 3 &&
- OpInfo[0].RegClass == ARM::QPRRegClassID &&
- OpInfo[1].RegClass == ARM::GPRRegClassID &&
- OpInfo[2].RegClass == 0);
+ // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
+ if (Name.endswith("32") || Name.endswith("32_UPD"))
+ DblSpaced = slice(insn, 6, 6) == 1;
- // Qd = Inst{22:15-12} => NEON Rd
- MI.addOperand(MCOperand::CreateReg(
- getRegisterEnum(ARM::QPRRegClassID,
- decodeNEONRd(insn), true)));
-
- // Rn = Inst{19-16} => ARM Rn
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
- decodeRn(insn))));
-
- // Next comes the AM4 Opcode.
- assert(Opcode == ARM::VLDRQ || Opcode == ARM::VSTRQ);
- ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
- bool WB = getWBit(insn) == 1;
- MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode, WB)));
+ } else {
+ // Multiple n-element structures with type encoded as Inst{11-8}.
+ // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
- NumOpsAdded = 3;
- return true;
+ // n == 2 && type == 0b1001 -> DblSpaced = true
+ if (Name.startswith("VST2") || Name.startswith("VLD2"))
+ DblSpaced = slice(insn, 11, 8) == 9;
+
+ // n == 3 && type == 0b0101 -> DblSpaced = true
+ if (Name.startswith("VST3") || Name.startswith("VLD3"))
+ DblSpaced = slice(insn, 11, 8) == 5;
+
+ // n == 4 && type == 0b0001 -> DblSpaced = true
+ if (Name.startswith("VST4") || Name.startswith("VLD4"))
+ DblSpaced = slice(insn, 11, 8) == 1;
+
+ }
+ return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
+ slice(insn, 21, 21) == 0, DblSpaced);
}
// VMOV (immediate)
// Qd/Dd imm
-static bool DisassembleNVdImm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[1].RegClass == 0));
+ (OpInfo[1].RegClass == 0) &&
+ "Expect 1 reg operand followed by 1 imm operand");
// Qd/Dd = Inst{22:15-12} => NEON Rd
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[0].RegClass,
enum N2VFlag {
N2V_None,
N2V_VectorDupLane,
- N2V_VectorShiftLeftLong,
N2V_VectorConvert_Between_Float_Fixed
};
} // End of unnamed namespace
// VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
// Qd/Dd Dm index
//
-// Vector Shift Left Long (with maximum shift count) Instructions.
-// VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
-//
// Vector Move Long:
// Qd Dm
//
// Dd Qm
//
// Others
-static bool DisassembleNVdVmImm0(MCInst &MI, unsigned Opc, uint32_t insn,
+static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag = N2V_None) {
const TargetInstrDesc &TID = ARMInsts[Opc];
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
(OpInfo[1].RegClass == ARM::DPRRegClassID ||
- OpInfo[1].RegClass == ARM::QPRRegClassID));
+ OpInfo[1].RegClass == ARM::QPRRegClassID) &&
+ "Expect >= 2 operands and first 2 as reg operands");
unsigned &OpIdx = NumOpsAdded;
OpIdx = 0;
ElemSize esize = ESizeNA;
- if (Flag == N2V_VectorShiftLeftLong) {
- // VSHLL has maximum shift count as the imm, inferred from its size.
- assert(Opc == ARM::VSHLLi16 || Opc == ARM::VSHLLi32 || Opc == ARM::VSHLLi8);
- esize = Opc == ARM::VSHLLi8 ? ESize8
- : (Opc == ARM::VSHLLi16 ? ESize16
- : ESize32);
- }
if (Flag == N2V_VectorDupLane) {
// VDUPLN has its index embedded. Its size can be inferred from the Opcode.
- assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q);
+ assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
+ "Unexpected Opcode");
esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
: ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
: ESize32);
decodeNEONRm(insn))));
++OpIdx;
+ // VZIP and others have two TIED_TO reg operands.
+ int Idx;
+ while (OpIdx < NumOps &&
+ (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
+ // Add TIED_TO operand.
+ MI.addOperand(MI.getOperand(Idx));
+ ++OpIdx;
+ }
+
// Add the imm operand, if required.
if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
&& !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
unsigned imm = 0xFFFFFFFF;
- if (Flag == N2V_VectorShiftLeftLong)
- imm = static_cast<unsigned>(esize);
if (Flag == N2V_VectorDupLane)
imm = decodeNVLaneDupIndex(insn, esize);
if (Flag == N2V_VectorConvert_Between_Float_Fixed)
imm = decodeVCVTFractionBits(insn);
- assert(imm != 0xFFFFFFFF);
+ assert(imm != 0xFFFFFFFF && "Internal error");
MI.addOperand(MCOperand::CreateImm(imm));
++OpIdx;
}
return true;
}
-static bool DisassembleNVdVmImm(MCInst &MI, unsigned Opc, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
-
- return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded);
-}
-static bool DisassembleNVdVmImmVCVT(MCInst &MI, unsigned Opc, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
-
- return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded,
- N2V_VectorConvert_Between_Float_Fixed);
-}
-static bool DisassembleNVdVmImmVDupLane(MCInst &MI, unsigned Opc, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded,
- N2V_VectorDupLane);
+ return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded);
}
-static bool DisassembleNVdVmImmVSHLL(MCInst &MI, unsigned Opc, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded,
- N2V_VectorShiftLeftLong);
+ return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
+ N2V_VectorConvert_Between_Float_Fixed);
}
+static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
-// Vector Transpose/Unzip/Zip Instructions
-// Qd/Dd Qm/Dm [Qd/Dd (TIED_TO)] [Qm/Dm (TIED_TO)]
-static bool DisassembleNVectorShuffle(MCInst &MI,unsigned Opcode,uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
-
- const TargetInstrDesc &TID = ARMInsts[Opcode];
- const TargetOperandInfo *OpInfo = TID.OpInfo;
-
- assert(NumOps >= 4 &&
- (OpInfo[0].RegClass == ARM::DPRRegClassID ||
- OpInfo[0].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[1].RegClass == ARM::DPRRegClassID ||
- OpInfo[1].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[2].RegClass == ARM::DPRRegClassID ||
- OpInfo[2].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[3].RegClass == ARM::DPRRegClassID ||
- OpInfo[3].RegClass == ARM::QPRRegClassID));
-
- unsigned &OpIdx = NumOpsAdded;
-
- OpIdx = 0;
-
- // Qd/Dd = Inst{22:15-12} => NEON Rd
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
- decodeNEONRd(insn))));
- ++OpIdx;
-
- // Dm = Inst{5:3-0} => NEON Rm
- MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
- decodeNEONRm(insn))));
- ++OpIdx;
-
- assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
- TID.getOperandConstraint(OpIdx+1, TOI::TIED_TO) != -1);
-
- MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx;
- MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx;
-
- return true;
+ return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
+ N2V_VectorDupLane);
}
// Vector Shift [Accumulate] Instructions.
// Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
-static bool DisassembleNVectorShift0(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded, bool NormalShift = true) {
+//
+// Vector Shift Left Long (with maximum shift count) Instructions.
+// VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
+//
+static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
(OpInfo[1].RegClass == ARM::DPRRegClassID ||
- OpInfo[1].RegClass == ARM::QPRRegClassID));
+ OpInfo[1].RegClass == ARM::QPRRegClassID) &&
+ "Expect >= 3 operands and first 2 as reg operands");
unsigned &OpIdx = NumOpsAdded;
++OpIdx;
}
- assert(OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
- OpInfo[OpIdx].RegClass == ARM::QPRRegClassID);
+ assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
+ OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
+ "Reg operand expected");
// Qm/Dm = Inst{5:3-0} => NEON Rm
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
decodeNEONRm(insn))));
++OpIdx;
- assert(OpInfo[OpIdx].RegClass == 0);
+ assert(OpInfo[OpIdx].RegClass == 0 && "Imm operand expected");
// Add the imm operand.
- MI.addOperand(MCOperand::CreateImm(decodeNVSAmt(insn, NormalShift)));
+
+ // VSHLL has maximum shift count as the imm, inferred from its size.
+ unsigned Imm;
+ switch (Opcode) {
+ default:
+ Imm = decodeNVSAmt(insn, LeftShift);
+ break;
+ case ARM::VSHLLi8:
+ Imm = 8;
+ break;
+ case ARM::VSHLLi16:
+ Imm = 16;
+ break;
+ case ARM::VSHLLi32:
+ Imm = 32;
+ break;
+ }
+ MI.addOperand(MCOperand::CreateImm(Imm));
++OpIdx;
return true;
}
-// Normal shift amount interpretation.
-static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+// Left shift instructions.
+static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVectorShift0(MI, Opcode, insn, NumOps, NumOpsAdded, true);
+ return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true);
}
-// Different shift amount interpretation.
-static bool DisassembleNVectorShift2(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+// Right shift instructions have different shift amount interpretation.
+static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVectorShift0(MI, Opcode, insn, NumOps, NumOpsAdded, false);
+ return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false);
}
namespace {
// Qd/Dd Qn/Dn RestrictedDm index
//
// Others
-static bool DisassembleNVdVnVmImm0(MCInst &MI, unsigned Opcode, uint32_t insn,
+static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag = N3V_None) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
assert(NumOps >= 3 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
(OpInfo[1].RegClass == ARM::DPRRegClassID ||
OpInfo[1].RegClass == ARM::QPRRegClassID) &&
- (OpInfo[2].RegClass != 0));
+ "Expect >= 3 operands and first 2 as reg operands");
unsigned &OpIdx = NumOpsAdded;
: decodeNEONRm(insn))));
++OpIdx;
+ // Special case handling for VMOVDneon and VMOVQ because they are marked as
+ // N3RegFrm.
+ if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
+ return true;
+
// Dm = Inst{5:3-0} => NEON Rm
// or
// Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
Imm = decodeN3VImm(insn);
else if (IsDmRestricted)
Imm = decodeRestrictedDmIndex(insn, esize);
- else
+ else {
assert(0 && "Internal error: unreachable code!");
+ return false;
+ }
MI.addOperand(MCOperand::CreateImm(Imm));
++OpIdx;
return true;
}
-static bool DisassembleNVdVnVmImm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded);
+ return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded);
}
-static bool DisassembleNVdVnVmImmVectorShift(MCInst &MI, unsigned Opcode,
- uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded,
- N3V_VectorShift);
+ return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
+ N3V_VectorShift);
}
-static bool DisassembleNVdVnVmImmVectorExtract(MCInst &MI, unsigned Opcode,
- uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded,
- N3V_VectorExtract);
+ return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
+ N3V_VectorExtract);
}
-static bool DisassembleNVdVnVmImmMulScalar(MCInst &MI, unsigned Opcode,
- uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
+ uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
- return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded,
- N3V_Multiply_By_Scalar);
+ return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
+ N3V_Multiply_By_Scalar);
}
// Vector Table Lookup
// VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
// VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
// VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
-static bool DisassembleVTBL(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ if (!OpInfo) return false;
assert(NumOps >= 3 &&
OpInfo[0].RegClass == ARM::DPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
- OpInfo[2].RegClass == ARM::DPRRegClassID);
+ OpInfo[2].RegClass == ARM::DPRRegClassID &&
+ "Expect >= 3 operands and first 3 as reg operands");
unsigned &OpIdx = NumOpsAdded;
// Do the <list> now.
for (unsigned i = 0; i < Len; ++i) {
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID);
+ assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
+ "Reg operand expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
Rn + i)));
++OpIdx;
}
// Dm (the index vector)
- assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID);
+ assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
+ "Reg operand (index vector) expected");
MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
decodeNEONRm(insn))));
++OpIdx;
return true;
}
-/// NEONFuncPtrs - NEONFuncPtrs maps NSFormat to corresponding DisassembleFP.
-/// We divide the disassembly task into different categories, with each one
-/// corresponding to a specific instruction encoding format. There could be
-/// exceptions when handling a specific format, and that is why the Opcode is
-/// also present in the function prototype.
-static const DisassembleFP NEONFuncPtrs[] = {
- // This will assert().
- &DisassembleNSFormatNone,
-
- // VLD and VST (including one lane) Instructions.
- &DisassembleVLDSTLane,
-
- // VLD and VST (including one lane) Double-Spaced Instructions.
- &DisassembleVLDSTLaneDbl,
-
- // A8.6.319 VLDM & A8.6.399 VSTM
- // LLVM defines VLDRQ/VSTRQ to load/store a Q register as a D register pair.
- &DisassembleVLDSTRQ,
-
- // A7.4.6 One register and a modified immediate value
- // 1-Register Instructions with imm.
- // LLVM only defines VMOVv instructions.
- &DisassembleNVdImm,
-
- // 2-Register Instructions with no imm.
- &DisassembleNVdVmImm,
-
- // 2-Register Instructions with imm (vector convert float/fixed point).
- &DisassembleNVdVmImmVCVT,
-
- // 2-Register Instructions with imm (vector dup lane).
- &DisassembleNVdVmImmVDupLane,
-
- // 2-Register Instructions with imm (vector shift left long).
- &DisassembleNVdVmImmVSHLL,
-
- // Vector Transpose/Unzip/Zip Instructions.
- &DisassembleNVectorShuffle,
-
- // Vector Shift [Narrow Accumulate] Instructions.
- &DisassembleNVectorShift,
-
- // Vector Shift Instructions with different interpretation of shift amount.
- &DisassembleNVectorShift2,
-
- // 3-Register Data-Processing Instructions.
- &DisassembleNVdVnVmImm,
-
- // Vector Shift (Register) Instructions.
- // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
- &DisassembleNVdVnVmImmVectorShift,
-
- // Vector Extract Instructions.
- &DisassembleNVdVnVmImmVectorExtract,
-
- // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
- // By Scalar Instructions.
- &DisassembleNVdVnVmImmMulScalar,
-
- // Vector Table Lookup uses byte indexes in a control vector to look up byte
- // values in a table and generate a new vector.
- &DisassembleVTBL,
- NULL,
-};
-
static bool DisassembleNEONFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
- assert(0 && "Code is not reachable");
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
+ assert(0 && "Unreachable code!");
return false;
}
// Vector Get Lane (move scalar to ARM core register) Instructions.
// VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
- unsigned short NumDefs = TID.getNumDefs();
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ if (!OpInfo) return false;
- assert(NumDefs == 1 && NumOps >= 3 &&
+ assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
OpInfo[0].RegClass == ARM::GPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
- OpInfo[2].RegClass == 0);
+ OpInfo[2].RegClass == 0 &&
+ "Expect >= 3 operands with one dst operand");
ElemSize esize =
Opcode == ARM::VGETLNi32 ? ESize32
// Vector Set Lane (move ARM core register to scalar) Instructions.
// VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetInstrDesc &TID = ARMInsts[Opcode];
- unsigned short NumDefs = TID.getNumDefs();
const TargetOperandInfo *OpInfo = TID.OpInfo;
+ if (!OpInfo) return false;
- assert(NumDefs == 1 && NumOps >= 3 &&
+ assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
OpInfo[0].RegClass == ARM::DPRRegClassID &&
OpInfo[1].RegClass == ARM::DPRRegClassID &&
TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
OpInfo[2].RegClass == ARM::GPRRegClassID &&
- OpInfo[3].RegClass == 0);
+ OpInfo[3].RegClass == 0 &&
+ "Expect >= 3 operands with one dst operand");
ElemSize esize =
Opcode == ARM::VSETLNi8 ? ESize8
// Vector Duplicate Instructions (from ARM core register to all elements).
// VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
static bool DisassembleNEONDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
assert(NumOps >= 2 &&
(OpInfo[0].RegClass == ARM::DPRRegClassID ||
OpInfo[0].RegClass == ARM::QPRRegClassID) &&
- OpInfo[1].RegClass == ARM::GPRRegClassID);
+ OpInfo[1].RegClass == ARM::GPRRegClassID &&
+ "Expect >= 2 operands and first 2 as reg operand");
unsigned RegClass = OpInfo[0].RegClass;
}
static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
if (MemBarrierInstr(insn))
return true;
}
static bool DisassembleThumbMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) {
+ unsigned short NumOps, unsigned &NumOpsAdded, BO) {
assert(0 && "Unexpected thumb misc. instruction!");
return false;
&DisassembleLdMiscFrm,
&DisassembleStMiscFrm,
&DisassembleLdStMulFrm,
+ &DisassembleLdStExFrm,
&DisassembleArithMiscFrm,
&DisassembleExtFrm,
&DisassembleVFPUnaryFrm,
&DisassembleNEONGetLnFrm,
&DisassembleNEONSetLnFrm,
&DisassembleNEONDupFrm,
- &DisassembleLdStExFrm,
&DisassembleMiscFrm,
&DisassembleThumbMiscFrm,
- NULL,
-};
-/// ARMAlgorithm - ARMAlgorithm implements ARMDisassemblyAlgorithm for solving
-/// the problem of building the MCOperands of an MCInst. Construction of
-/// ARMAlgorithm requires passing in a function pointer with the DisassembleFP
-/// data type.
-class ARMAlgorithm : public ARMDisassemblyAlgorithm {
- /// Algorithms - Algorithms stores a map from Format to ARMAlgorithm*.
- static std::vector<ARMAlgorithm*> Algorithms;
- /// NSAlgorithms - NSAlgorithms stores a map from NSFormat to ARMAlgorithm*.
- static std::vector<ARMAlgorithm*> NSAlgorithms;
-
- DisassembleFP Disassemble;
-
-public:
- /// GetInstance - GetInstance returns an instance of ARMAlgorithm given the
- /// encoding Format. API clients should not free up the returned instance.
- static ARMAlgorithm *GetInstance(ARMFormat Format, NSFormat NSF) {
- /// Init the first time.
- if (Algorithms.size() == 0) {
- Algorithms.resize(array_lengthof(FuncPtrs));
- for (unsigned i = 0, num = array_lengthof(FuncPtrs); i < num; ++i)
- if (FuncPtrs[i])
- Algorithms[i] = new ARMAlgorithm(FuncPtrs[i]);
- else
- Algorithms[i] = NULL;
- }
- if (NSAlgorithms.size() == 0) {
- NSAlgorithms.resize(array_lengthof(NEONFuncPtrs));
- for (unsigned i = 0, num = array_lengthof(NEONFuncPtrs); i < num; ++i)
- if (NEONFuncPtrs[i])
- NSAlgorithms[i] = new ARMAlgorithm(NEONFuncPtrs[i]);
- else
- NSAlgorithms[i] = NULL;
- }
-
- if (Format != ARM_FORMAT_NEONFRM)
- return Algorithms[Format];
- else
- return NSAlgorithms[NSF];
- }
+ // VLD and VST (including one lane) Instructions.
+ &DisassembleNLdSt,
- virtual bool Solve(MCInst &MI, unsigned Opcode, uint32_t insn,
- unsigned short NumOps, unsigned &NumOpsAdded) const {
- if (Disassemble == NULL)
- return false;
+ // A7.4.6 One register and a modified immediate value
+ // 1-Register Instructions with imm.
+ // LLVM only defines VMOVv instructions.
+ &DisassembleN1RegModImmFrm,
- return (*Disassemble)(MI, Opcode, insn, NumOps, NumOpsAdded);
- }
+ // 2-Register Instructions with no imm.
+ &DisassembleN2RegFrm,
+
+ // 2-Register Instructions with imm (vector convert float/fixed point).
+ &DisassembleNVCVTFrm,
-private:
- ARMAlgorithm(DisassembleFP fp) :
- ARMDisassemblyAlgorithm(), Disassemble(fp) {}
+ // 2-Register Instructions with imm (vector dup lane).
+ &DisassembleNVecDupLnFrm,
- ARMAlgorithm(ARMAlgorithm &AA) :
- ARMDisassemblyAlgorithm(), Disassemble(AA.Disassemble) {}
+ // Vector Shift Left Instructions.
+ &DisassembleN2RegVecShLFrm,
- virtual ~ARMAlgorithm() {}
-};
+ // Vector Shift Righ Instructions, which has different interpretation of the
+ // shift amount from the imm6 field.
+ &DisassembleN2RegVecShRFrm,
-// Define the symbol here.
-std::vector<ARMAlgorithm*> ARMAlgorithm::Algorithms;
+ // 3-Register Data-Processing Instructions.
+ &DisassembleN3RegFrm,
-// Define the symbol here.
-std::vector<ARMAlgorithm*> ARMAlgorithm::NSAlgorithms;
+ // Vector Shift (Register) Instructions.
+ // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
+ &DisassembleN3RegVecShFrm,
-// Define the symbol here.
-unsigned ARMBasicMCBuilder::ITCounter = 0;
+ // Vector Extract Instructions.
+ &DisassembleNVecExtractFrm,
-// Define the symbol here.
-unsigned ARMBasicMCBuilder::ITState = 0;
+ // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
+ // By Scalar Instructions.
+ &DisassembleNVecMulScalarFrm,
-// A8.6.50
-static unsigned short CountITSize(unsigned ITMask) {
- // First count the trailing zeros of the IT mask.
- unsigned TZ = CountTrailingZeros_32(ITMask);
- assert(TZ <= 3);
- return (4 - TZ);
-}
+ // Vector Table Lookup uses byte indexes in a control vector to look up byte
+ // values in a table and generate a new vector.
+ &DisassembleNVTBLFrm,
+
+ NULL
+};
/// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
/// The general idea is to set the Opcode for the MCInst, followed by adding
/// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
-/// to the Algo (ARM Disassemble Algorithm) object to perform Format-specific
-/// disassembly, followed by class method TryPredicateAndSBitModifier() to do
-/// PredicateOperand and OptionalDefOperand which follow the Dst/Src Operands.
+/// to the Format-specific disassemble function for disassembly, followed by
+/// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
+/// which follow the Dst/Src Operands.
bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
// Stage 1 sets the Opcode.
MI.setOpcode(Opcode);
if (NumOps == 0)
return true;
- // Stage 2 calls the ARM Disassembly Algorithm to build the operand list.
+ // Stage 2 calls the format-specific disassemble function to build the operand
+ // list.
+ if (Disasm == NULL)
+ return false;
unsigned NumOpsAdded = 0;
- bool OK = Algo.Solve(MI, Opcode, insn, NumOps, NumOpsAdded);
+ bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
if (!OK) return false;
if (NumOpsAdded >= NumOps)
bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
uint32_t insn, unsigned short NumOpsRemaining) {
- assert(NumOpsRemaining > 0);
+ assert(NumOpsRemaining > 0 && "Invalid argument");
const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
const std::string &Name = ARMInsts[Opcode].Name;
return true;
}
- assert(NumOpsRemaining > 0);
-
// Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
uint32_t insn) {
- if (Opcode == ARM::t2IT) {
- ARMBasicMCBuilder::ITCounter = CountITSize(slice(insn, 3, 0));
- ARMBasicMCBuilder::InitITState(slice(insn, 7, 0));
- } else if (InITBlock())
- ARMBasicMCBuilder::UpdateITState();
+ if (!SP) return Status;
+
+ if (Opcode == ARM::t2IT)
+ SP->InitIT(slice(insn, 7, 0));
+ else if (InITBlock())
+ SP->UpdateIT();
return Status;
}
-AbstractARMMCBuilder *ARMMCBuilderFactory::CreateMCBuilder(unsigned Opcode,
- ARMFormat Format, NSFormat NSF) {
+/// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
+ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
+ unsigned short num)
+ : Opcode(opc), Format(format), NumOps(num), SP(0) {
+ unsigned Idx = (unsigned)format;
+ assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
+ Disasm = FuncPtrs[Idx];
+}
- ARMAlgorithm *Algo = ARMAlgorithm::GetInstance(Format, NSF);
- if (!Algo)
- return NULL;
+/// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
+/// infrastructure of an MCInst given the Opcode and Format of the instr.
+/// Return NULL if it fails to create/return a proper builder. API clients
+/// are responsible for freeing up of the allocated memory. Cacheing can be
+/// performed by the API clients to improve performance.
+ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
- return new ARMBasicMCBuilder(Opcode, Format, NSF,
- ARMInsts[Opcode].getNumOperands(), *Algo);
+ return new ARMBasicMCBuilder(Opcode, Format,
+ ARMInsts[Opcode].getNumOperands());
}