1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "llvm/Support/Debug.h"
21 #include "llvm/Support/raw_ostream.h"
23 //#define DEBUG(X) do { X; } while (0)
25 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
26 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
27 /// describing the operand info for each ARMInsts[i].
29 /// Together with an instruction's encoding format, we can take advantage of the
30 /// NumOperands and the OpInfo fields of the target instruction description in
31 /// the quest to build out the MCOperand list for an MCInst.
33 /// The general guideline is that with a known format, the number of dst and src
34 /// operands are well-known. The dst is built first, followed by the src
35 /// operand(s). The operands not yet used at this point are for the Implicit
36 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
37 /// defined with two components:
39 /// def pred { // Operand PredicateOperand
40 /// ValueType Type = OtherVT;
41 /// string PrintMethod = "printPredicateOperand";
42 /// string AsmOperandLowerMethod = ?;
43 /// dag MIOperandInfo = (ops i32imm, CCR);
44 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
45 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
48 /// which is manifested by the TargetOperandInfo[] of:
50 /// { 0, 0|(1<<TOI::Predicate), 0 },
51 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
53 /// So the first predicate MCOperand corresponds to the immediate part of the
54 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
55 /// corresponds to a register kind of ARM::CPSR.
57 /// For the Defs part, in the simple case of only cc_out:$s, we have:
59 /// def cc_out { // Operand OptionalDefOperand
60 /// ValueType Type = OtherVT;
61 /// string PrintMethod = "printSBitModifierOperand";
62 /// string AsmOperandLowerMethod = ?;
63 /// dag MIOperandInfo = (ops CCR);
64 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
65 /// dag DefaultOps = (ops (i32 zero_reg));
68 /// which is manifested by the one TargetOperandInfo of:
70 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
72 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
73 #include "ARMGenInstrInfo.inc"
77 const char *ARMUtils::OpcodeName(unsigned Opcode) {
78 return ARMInsts[Opcode].Name;
81 // Return the register enum Based on RegClass and the raw register number.
84 getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
85 // For this purpose, we can treat rGPR as if it were GPR.
86 if (RegClassID == ARM::rGPRRegClassID) RegClassID = ARM::GPRRegClassID;
88 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
90 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
97 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
98 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
99 case ARM::DPR_VFP2RegClassID:
101 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
102 case ARM::QPR_VFP2RegClassID:
104 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
108 switch (RegClassID) {
109 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
110 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
111 case ARM::DPR_VFP2RegClassID:
113 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
114 case ARM::QPR_VFP2RegClassID:
116 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
120 switch (RegClassID) {
121 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
122 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
123 case ARM::DPR_VFP2RegClassID:
125 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
126 case ARM::QPR_VFP2RegClassID:
128 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
132 switch (RegClassID) {
133 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
134 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
135 case ARM::DPR_VFP2RegClassID:
137 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
138 case ARM::QPR_VFP2RegClassID:
140 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
144 switch (RegClassID) {
145 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
146 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
147 case ARM::DPR_VFP2RegClassID:
149 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
150 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
154 switch (RegClassID) {
155 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
156 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
157 case ARM::DPR_VFP2RegClassID:
159 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
160 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
164 switch (RegClassID) {
165 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
166 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
167 case ARM::DPR_VFP2RegClassID:
169 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
170 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
174 switch (RegClassID) {
175 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
176 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
177 case ARM::DPR_VFP2RegClassID:
179 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
180 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
184 switch (RegClassID) {
185 case ARM::GPRRegClassID: return ARM::R8;
186 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
187 case ARM::QPRRegClassID: return ARM::Q8;
188 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
192 switch (RegClassID) {
193 case ARM::GPRRegClassID: return ARM::R9;
194 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
195 case ARM::QPRRegClassID: return ARM::Q9;
196 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
200 switch (RegClassID) {
201 case ARM::GPRRegClassID: return ARM::R10;
202 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
203 case ARM::QPRRegClassID: return ARM::Q10;
204 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
208 switch (RegClassID) {
209 case ARM::GPRRegClassID: return ARM::R11;
210 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
211 case ARM::QPRRegClassID: return ARM::Q11;
212 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
216 switch (RegClassID) {
217 case ARM::GPRRegClassID: return ARM::R12;
218 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
219 case ARM::QPRRegClassID: return ARM::Q12;
220 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
224 switch (RegClassID) {
225 case ARM::GPRRegClassID: return ARM::SP;
226 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
227 case ARM::QPRRegClassID: return ARM::Q13;
228 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
232 switch (RegClassID) {
233 case ARM::GPRRegClassID: return ARM::LR;
234 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
235 case ARM::QPRRegClassID: return ARM::Q14;
236 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
240 switch (RegClassID) {
241 case ARM::GPRRegClassID: return ARM::PC;
242 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
243 case ARM::QPRRegClassID: return ARM::Q15;
244 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
248 switch (RegClassID) {
249 case ARM::DPRRegClassID: return ARM::D16;
250 case ARM::SPRRegClassID: return ARM::S16;
254 switch (RegClassID) {
255 case ARM::DPRRegClassID: return ARM::D17;
256 case ARM::SPRRegClassID: return ARM::S17;
260 switch (RegClassID) {
261 case ARM::DPRRegClassID: return ARM::D18;
262 case ARM::SPRRegClassID: return ARM::S18;
266 switch (RegClassID) {
267 case ARM::DPRRegClassID: return ARM::D19;
268 case ARM::SPRRegClassID: return ARM::S19;
272 switch (RegClassID) {
273 case ARM::DPRRegClassID: return ARM::D20;
274 case ARM::SPRRegClassID: return ARM::S20;
278 switch (RegClassID) {
279 case ARM::DPRRegClassID: return ARM::D21;
280 case ARM::SPRRegClassID: return ARM::S21;
284 switch (RegClassID) {
285 case ARM::DPRRegClassID: return ARM::D22;
286 case ARM::SPRRegClassID: return ARM::S22;
290 switch (RegClassID) {
291 case ARM::DPRRegClassID: return ARM::D23;
292 case ARM::SPRRegClassID: return ARM::S23;
296 switch (RegClassID) {
297 case ARM::DPRRegClassID: return ARM::D24;
298 case ARM::SPRRegClassID: return ARM::S24;
302 switch (RegClassID) {
303 case ARM::DPRRegClassID: return ARM::D25;
304 case ARM::SPRRegClassID: return ARM::S25;
308 switch (RegClassID) {
309 case ARM::DPRRegClassID: return ARM::D26;
310 case ARM::SPRRegClassID: return ARM::S26;
314 switch (RegClassID) {
315 case ARM::DPRRegClassID: return ARM::D27;
316 case ARM::SPRRegClassID: return ARM::S27;
320 switch (RegClassID) {
321 case ARM::DPRRegClassID: return ARM::D28;
322 case ARM::SPRRegClassID: return ARM::S28;
326 switch (RegClassID) {
327 case ARM::DPRRegClassID: return ARM::D29;
328 case ARM::SPRRegClassID: return ARM::S29;
332 switch (RegClassID) {
333 case ARM::DPRRegClassID: return ARM::D30;
334 case ARM::SPRRegClassID: return ARM::S30;
338 switch (RegClassID) {
339 case ARM::DPRRegClassID: return ARM::D31;
340 case ARM::SPRRegClassID: return ARM::S31;
344 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
345 // Encoding error. Mark the builder with error code != 0.
350 ///////////////////////////////
352 // Utility Functions //
354 ///////////////////////////////
356 // Extract/Decode Rd: Inst{15-12}.
357 static inline unsigned decodeRd(uint32_t insn) {
358 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
361 // Extract/Decode Rn: Inst{19-16}.
362 static inline unsigned decodeRn(uint32_t insn) {
363 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
366 // Extract/Decode Rm: Inst{3-0}.
367 static inline unsigned decodeRm(uint32_t insn) {
368 return (insn & ARMII::GPRRegMask);
371 // Extract/Decode Rs: Inst{11-8}.
372 static inline unsigned decodeRs(uint32_t insn) {
373 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
376 static inline unsigned getCondField(uint32_t insn) {
377 return (insn >> ARMII::CondShift);
380 static inline unsigned getIBit(uint32_t insn) {
381 return (insn >> ARMII::I_BitShift) & 1;
384 static inline unsigned getAM3IBit(uint32_t insn) {
385 return (insn >> ARMII::AM3_I_BitShift) & 1;
388 static inline unsigned getPBit(uint32_t insn) {
389 return (insn >> ARMII::P_BitShift) & 1;
392 static inline unsigned getUBit(uint32_t insn) {
393 return (insn >> ARMII::U_BitShift) & 1;
396 static inline unsigned getPUBits(uint32_t insn) {
397 return (insn >> ARMII::U_BitShift) & 3;
400 static inline unsigned getSBit(uint32_t insn) {
401 return (insn >> ARMII::S_BitShift) & 1;
404 static inline unsigned getWBit(uint32_t insn) {
405 return (insn >> ARMII::W_BitShift) & 1;
408 static inline unsigned getDBit(uint32_t insn) {
409 return (insn >> ARMII::D_BitShift) & 1;
412 static inline unsigned getNBit(uint32_t insn) {
413 return (insn >> ARMII::N_BitShift) & 1;
416 static inline unsigned getMBit(uint32_t insn) {
417 return (insn >> ARMII::M_BitShift) & 1;
420 // See A8.4 Shifts applied to a register.
421 // A8.4.2 Register controlled shifts.
423 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
424 // into llvm enums for shift opcode. The API clients should pass in the value
425 // encoded with two bits, so the assert stays to signal a wrong API usage.
427 // A8-12: DecodeRegShift()
428 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
430 default: assert(0 && "No such value"); return ARM_AM::no_shift;
431 case 0: return ARM_AM::lsl;
432 case 1: return ARM_AM::lsr;
433 case 2: return ARM_AM::asr;
434 case 3: return ARM_AM::ror;
438 // See A8.4 Shifts applied to a register.
439 // A8.4.1 Constant shifts.
441 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
442 // encodings into the intended ShiftOpc and shift amount.
444 // A8-11: DecodeImmShift()
445 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
449 case ARM_AM::no_shift:
453 ShOp = ARM_AM::no_shift;
465 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
466 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
467 // clients should pass in the value encoded with two bits, so the assert stays
468 // to signal a wrong API usage.
469 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
471 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
472 case 1: return ARM_AM::ia; // P=0 U=1
473 case 3: return ARM_AM::ib; // P=1 U=1
474 case 0: return ARM_AM::da; // P=0 U=0
475 case 2: return ARM_AM::db; // P=1 U=0
479 ////////////////////////////////////////////
481 // Disassemble function definitions //
483 ////////////////////////////////////////////
485 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
486 /// instr into a list of MCOperands in the appropriate order, with possible dst,
487 /// followed by possible src(s).
489 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
490 /// the CPSR, is factored into ARMBasicMCBuilder's method named
491 /// TryPredicateAndSBitModifier.
493 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
494 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
496 assert(0 && "Unexpected pseudo instruction!");
500 // Multiply Instructions.
501 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
502 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
504 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
505 // Rd{19-16} Rn{3-0} Rm{11-8}
507 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
508 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
510 // The mapping of the multiply registers to the "regular" ARM registers, where
511 // there are convenience decoder functions, is:
517 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
518 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
520 const TargetInstrDesc &TID = ARMInsts[Opcode];
521 unsigned short NumDefs = TID.getNumDefs();
522 const TargetOperandInfo *OpInfo = TID.OpInfo;
523 unsigned &OpIdx = NumOpsAdded;
527 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
529 && OpInfo[0].RegClass == ARM::GPRRegClassID
530 && OpInfo[1].RegClass == ARM::GPRRegClassID
531 && OpInfo[2].RegClass == ARM::GPRRegClassID
532 && "Expect three register operands");
534 // Instructions with two destination registers have RdLo{15-12} first.
536 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
537 "Expect 4th register operand");
538 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
543 // The destination register: RdHi{19-16} or Rd{19-16}.
544 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
547 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
548 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
550 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
554 // Many multiply instructions (e.g., MLA) have three src registers.
555 // The third register operand is Ra{15-12}.
556 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
557 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
565 // Helper routines for disassembly of coprocessor instructions.
567 static bool LdStCopOpcode(unsigned Opcode) {
568 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
569 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
573 static bool CoprocessorOpcode(unsigned Opcode) {
574 if (LdStCopOpcode(Opcode))
580 case ARM::CDP: case ARM::CDP2:
581 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
582 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
586 static inline unsigned GetCoprocessor(uint32_t insn) {
587 return slice(insn, 11, 8);
589 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
590 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
592 static inline unsigned GetCopOpc2(uint32_t insn) {
593 return slice(insn, 7, 5);
595 static inline unsigned GetCopOpc(uint32_t insn) {
596 return slice(insn, 7, 4);
598 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
601 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
603 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
605 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
607 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
609 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
613 // LDC_OPTION: cop CRd Rn imm8
615 // STC_OPTION: cop CRd Rn imm8
618 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
619 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
621 assert(NumOps >= 5 && "Num of operands >= 5 for coprocessor instr");
623 unsigned &OpIdx = NumOpsAdded;
624 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
625 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
626 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
627 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
628 bool LdStCop = LdStCopOpcode(Opcode);
632 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
635 // Unindex if P:W = 0b00 --> _OPTION variant
636 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
638 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
640 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
644 MI.addOperand(MCOperand::CreateReg(0));
645 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
646 const TargetInstrDesc &TID = ARMInsts[Opcode];
648 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
649 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
650 ARM_AM::no_shift, IndexMode);
651 MI.addOperand(MCOperand::CreateImm(Offset));
654 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
658 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
659 : GetCopOpc1(insn, NoGPR)));
661 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
662 : MCOperand::CreateReg(
663 getRegisterEnum(B, ARM::GPRRegClassID,
666 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
667 getRegisterEnum(B, ARM::GPRRegClassID,
669 : MCOperand::CreateImm(decodeRn(insn)));
671 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
676 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
684 // Branch Instructions.
685 // BL: SignExtend(Imm24:'00', 32)
686 // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
687 // SMC: ZeroExtend(imm4, 32)
688 // SVC: ZeroExtend(Imm24, 32)
690 // Various coprocessor instructions are assigned BrFrm arbitrarily.
691 // Delegates to DisassembleCoprocessor() helper function.
694 // MSR/MSRsys: Rm mask=Inst{19-16}
696 // MSRi/MSRsysi: so_imm
697 // SRSW/SRS: ldstm_mode:$amode mode_imm
698 // RFEW/RFE: ldstm_mode:$amode Rn
699 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
700 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
702 if (CoprocessorOpcode(Opcode))
703 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
705 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
706 if (!OpInfo) return false;
708 // MRS and MRSsys take one GPR reg Rd.
709 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
710 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
711 "Reg operand expected");
712 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
717 // BXJ takes one GPR reg Rm.
718 if (Opcode == ARM::BXJ) {
719 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
720 "Reg operand expected");
721 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
726 // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
727 // bit 4, and the special register fields in bits 3-0.
728 if (Opcode == ARM::MSR) {
729 assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
730 "Reg operand expected");
731 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
732 slice(insn, 19, 16) /* Special Reg */ ));
733 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
738 // MSRi take a mask, followed by one so_imm operand. The mask contains the
739 // R Bit in bit 4, and the special register fields in bits 3-0.
740 if (Opcode == ARM::MSRi) {
741 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
742 slice(insn, 19, 16) /* Special Reg */ ));
743 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
744 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
745 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
746 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
747 unsigned Imm = insn & 0xFF;
748 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
752 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
753 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
754 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
755 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
757 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
758 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
760 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
766 assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
767 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
768 "Unexpected Opcode");
770 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
773 if (Opcode == ARM::SMC) {
774 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
775 Imm32 = slice(insn, 3, 0);
776 } else if (Opcode == ARM::SVC) {
777 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
778 Imm32 = slice(insn, 23, 0);
780 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
781 unsigned Imm26 = slice(insn, 23, 0) << 2;
782 //Imm32 = signextend<signed int, 26>(Imm26);
783 Imm32 = SignExtend32<26>(Imm26);
786 MI.addOperand(MCOperand::CreateImm(Imm32));
792 // Misc. Branch Instructions.
795 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
796 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
798 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
799 if (!OpInfo) return false;
801 unsigned &OpIdx = NumOpsAdded;
805 // BX_RET and MOVPCLR have only two predicate operands; do an early return.
806 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
809 // BLX and BX take one GPR reg.
810 if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
812 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
813 "Reg operand expected");
814 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
820 // BLXi takes imm32 (the PC offset).
821 if (Opcode == ARM::BLXi) {
822 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
823 // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
824 unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
825 int Imm32 = SignExtend32<26>(Imm26);
826 MI.addOperand(MCOperand::CreateImm(Imm32));
834 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
835 uint32_t lsb = slice(insn, 11, 7);
836 uint32_t msb = slice(insn, 20, 16);
839 DEBUG(errs() << "Encoding error: msb < lsb\n");
843 for (uint32_t i = lsb; i <= msb; ++i)
849 // A major complication is the fact that some of the saturating add/subtract
850 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
851 // They are QADD, QDADD, QDSUB, and QSUB.
852 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
853 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
855 const TargetInstrDesc &TID = ARMInsts[Opcode];
856 unsigned short NumDefs = TID.getNumDefs();
857 bool isUnary = isUnaryDP(TID.TSFlags);
858 const TargetOperandInfo *OpInfo = TID.OpInfo;
859 unsigned &OpIdx = NumOpsAdded;
863 // Disassemble register def if there is one.
864 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
865 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
870 // Now disassemble the src operands.
874 // Special-case handling of BFC/BFI/SBFX/UBFX.
875 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
876 MI.addOperand(MCOperand::CreateReg(0));
877 if (Opcode == ARM::BFI) {
878 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
883 if (!getBFCInvMask(insn, mask))
886 MI.addOperand(MCOperand::CreateImm(mask));
890 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
891 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
893 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
894 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
899 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
900 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
902 // BinaryDP has an Rn operand.
904 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
905 "Reg operand expected");
906 MI.addOperand(MCOperand::CreateReg(
907 getRegisterEnum(B, ARM::GPRRegClassID,
908 RmRn ? decodeRm(insn) : decodeRn(insn))));
912 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
913 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
914 MI.addOperand(MCOperand::CreateReg(0));
918 // Now disassemble operand 2.
922 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
923 // We have a reg/reg form.
924 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
925 // routed here as well.
926 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
927 MI.addOperand(MCOperand::CreateReg(
928 getRegisterEnum(B, ARM::GPRRegClassID,
929 RmRn? decodeRn(insn) : decodeRm(insn))));
931 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
932 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
933 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
934 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
935 MI.addOperand(MCOperand::CreateImm(Imm16));
938 // We have a reg/imm form.
939 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
940 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
941 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
942 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
943 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
944 unsigned Imm = insn & 0xFF;
945 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
952 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
953 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
955 const TargetInstrDesc &TID = ARMInsts[Opcode];
956 unsigned short NumDefs = TID.getNumDefs();
957 bool isUnary = isUnaryDP(TID.TSFlags);
958 const TargetOperandInfo *OpInfo = TID.OpInfo;
959 unsigned &OpIdx = NumOpsAdded;
963 // Disassemble register def if there is one.
964 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
965 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
970 // Disassemble the src operands.
974 // BinaryDP has an Rn operand.
976 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
977 "Reg operand expected");
978 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
983 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
984 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
985 MI.addOperand(MCOperand::CreateReg(0));
989 // Disassemble operand 2, which consists of three components.
990 if (OpIdx + 2 >= NumOps)
993 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
994 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
995 (OpInfo[OpIdx+2].RegClass < 0) &&
996 "Expect 3 reg operands");
998 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
999 unsigned Rs = slice(insn, 4, 4);
1001 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1004 // Register-controlled shifts: [Rm, Rs, shift].
1005 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1007 // Inst{6-5} encodes the shift opcode.
1008 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1009 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1011 // Constant shifts: [Rm, reg0, shift_imm].
1012 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1013 // Inst{6-5} encodes the shift opcode.
1014 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1015 // Inst{11-7} encodes the imm5 shift amount.
1016 unsigned ShImm = slice(insn, 11, 7);
1018 // A8.4.1. Possible rrx or shift amount of 32...
1019 getImmShiftSE(ShOp, ShImm);
1020 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1027 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1028 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1030 const TargetInstrDesc &TID = ARMInsts[Opcode];
1031 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1032 const TargetOperandInfo *OpInfo = TID.OpInfo;
1033 if (!OpInfo) return false;
1035 unsigned &OpIdx = NumOpsAdded;
1039 assert(((!isStore && TID.getNumDefs() > 0) ||
1040 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1041 && "Invalid arguments");
1043 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1044 if (isPrePost && isStore) {
1045 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1046 "Reg operand expected");
1047 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1052 // Disassemble the dst/src operand.
1053 if (OpIdx >= NumOps)
1056 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1057 "Reg operand expected");
1058 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1062 // After dst of a pre- and post-indexed load is the address base writeback.
1063 if (isPrePost && !isStore) {
1064 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1065 "Reg operand expected");
1066 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1071 // Disassemble the base operand.
1072 if (OpIdx >= NumOps)
1075 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1076 "Reg operand expected");
1077 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1078 && "Index mode or tied_to operand expected");
1079 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1083 // For reg/reg form, base reg is followed by +/- reg shop imm.
1084 // For immediate form, it is followed by +/- imm12.
1085 // See also ARMAddressingModes.h (Addressing Mode #2).
1086 if (OpIdx + 1 >= NumOps)
1089 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1090 unsigned IndexMode =
1091 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1092 if (getIBit(insn) == 0) {
1093 // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
1094 // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
1097 MI.addOperand(MCOperand::CreateReg(0));
1101 unsigned Imm12 = slice(insn, 11, 0);
1102 if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
1103 Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
1104 // Disassemble the 12-bit immediate offset, which is the second operand in
1105 // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
1106 int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
1107 MI.addOperand(MCOperand::CreateImm(Offset));
1109 // Disassemble the 12-bit immediate offset, which is the second operand in
1110 // $am2offset => (ops GPR, i32imm).
1111 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
1113 MI.addOperand(MCOperand::CreateImm(Offset));
1117 // The opcode ARM::LDRT actually corresponds to both Encoding A1 and A2 of
1118 // A8.6.86 LDRT. So if Inst{4} != 0 while Inst{25} (getIBit(insn)) == 1,
1119 // we should reject this insn as invalid.
1122 if ((Opcode == ARM::LDRT || Opcode == ARM::LDRBT) && (slice(insn,4,4) == 1))
1125 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1126 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1128 // Inst{6-5} encodes the shift opcode.
1129 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1130 // Inst{11-7} encodes the imm5 shift amount.
1131 unsigned ShImm = slice(insn, 11, 7);
1133 // A8.4.1. Possible rrx or shift amount of 32...
1134 getImmShiftSE(ShOp, ShImm);
1135 MI.addOperand(MCOperand::CreateImm(
1136 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
1143 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1144 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1145 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1148 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1149 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1150 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1153 static bool HasDualReg(unsigned Opcode) {
1157 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1158 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1163 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1164 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1166 const TargetInstrDesc &TID = ARMInsts[Opcode];
1167 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1168 const TargetOperandInfo *OpInfo = TID.OpInfo;
1169 if (!OpInfo) return false;
1171 unsigned &OpIdx = NumOpsAdded;
1175 assert(((!isStore && TID.getNumDefs() > 0) ||
1176 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1177 && "Invalid arguments");
1179 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1180 if (isPrePost && isStore) {
1181 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1182 "Reg operand expected");
1183 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1188 bool DualReg = HasDualReg(Opcode);
1190 // Disassemble the dst/src operand.
1191 if (OpIdx >= NumOps)
1194 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1195 "Reg operand expected");
1196 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1200 // Fill in LDRD and STRD's second operand, but only if it's offset mode OR we
1201 // have a pre-or-post-indexed store operation.
1202 if (DualReg && (!isPrePost || isStore)) {
1203 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1204 decodeRd(insn) + 1)));
1208 // After dst of a pre- and post-indexed load is the address base writeback.
1209 if (isPrePost && !isStore) {
1210 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1211 "Reg operand expected");
1212 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1217 // Disassemble the base operand.
1218 if (OpIdx >= NumOps)
1221 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1222 "Reg operand expected");
1223 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1224 && "Offset mode or tied_to operand expected");
1225 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1229 // For reg/reg form, base reg is followed by +/- reg.
1230 // For immediate form, it is followed by +/- imm8.
1231 // See also ARMAddressingModes.h (Addressing Mode #3).
1232 if (OpIdx + 1 >= NumOps)
1235 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1236 (OpInfo[OpIdx+1].RegClass < 0) &&
1237 "Expect 1 reg operand followed by 1 imm operand");
1239 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1240 unsigned IndexMode =
1241 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1242 if (getAM3IBit(insn) == 1) {
1243 MI.addOperand(MCOperand::CreateReg(0));
1245 // Disassemble the 8-bit immediate offset.
1246 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1247 unsigned Imm4L = insn & 0xF;
1248 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
1250 MI.addOperand(MCOperand::CreateImm(Offset));
1252 // Disassemble the offset reg (Rm).
1253 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1255 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0, IndexMode);
1256 MI.addOperand(MCOperand::CreateImm(Offset));
1263 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1264 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1265 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1269 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1270 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1271 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1274 // The algorithm for disassembly of LdStMulFrm is different from others because
1275 // it explicitly populates the two predicate operands after the base register.
1276 // After that, we need to populate the reglist with each affected register
1277 // encoded as an MCOperand.
1278 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1279 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1281 assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
1284 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1286 // Writeback to base, if necessary.
1287 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
1288 Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
1289 Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
1290 Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
1291 MI.addOperand(MCOperand::CreateReg(Base));
1295 // Add the base register operand.
1296 MI.addOperand(MCOperand::CreateReg(Base));
1298 // Handling the two predicate operands before the reglist.
1299 int64_t CondVal = insn >> ARMII::CondShift;
1300 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1301 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1305 // Fill the variadic part of reglist.
1306 unsigned RegListBits = insn & ((1 << 16) - 1);
1307 for (unsigned i = 0; i < 16; ++i) {
1308 if ((RegListBits >> i) & 1) {
1309 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1318 // LDREX, LDREXB, LDREXH: Rd Rn
1319 // LDREXD: Rd Rd+1 Rn
1320 // STREX, STREXB, STREXH: Rd Rm Rn
1321 // STREXD: Rd Rm Rm+1 Rn
1323 // SWP, SWPB: Rd Rm Rn
1324 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1325 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1327 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1328 if (!OpInfo) return false;
1330 unsigned &OpIdx = NumOpsAdded;
1335 && OpInfo[0].RegClass == ARM::GPRRegClassID
1336 && OpInfo[1].RegClass == ARM::GPRRegClassID
1337 && "Expect 2 reg operands");
1339 bool isStore = slice(insn, 20, 20) == 0;
1340 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1342 // Add the destination operand.
1343 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1347 // Store register Exclusive needs a source operand.
1349 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1354 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1355 decodeRm(insn)+1)));
1359 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1360 decodeRd(insn)+1)));
1364 // Finally add the pointer operand.
1365 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1372 // Misc. Arithmetic Instructions.
1374 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1375 // RBIT, REV, REV16, REVSH: Rd Rm
1376 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1377 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1379 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1380 unsigned &OpIdx = NumOpsAdded;
1385 && OpInfo[0].RegClass == ARM::GPRRegClassID
1386 && OpInfo[1].RegClass == ARM::GPRRegClassID
1387 && "Expect 2 reg operands");
1389 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1391 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1396 assert(NumOps >= 4 && "Expect >= 4 operands");
1397 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1402 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1406 // If there is still an operand info left which is an immediate operand, add
1407 // an additional imm5 LSL/ASR operand.
1408 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1409 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1410 // Extract the 5-bit immediate field Inst{11-7}.
1411 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1412 ARM_AM::ShiftOpc Opc = ARM_AM::no_shift;
1413 if (Opcode == ARM::PKHBT)
1415 else if (Opcode == ARM::PKHBT)
1417 getImmShiftSE(Opc, ShiftAmt);
1418 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShiftAmt)));
1425 /// DisassembleSatFrm - Disassemble saturate instructions:
1426 /// SSAT, SSAT16, USAT, and USAT16.
1427 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1428 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1430 const TargetInstrDesc &TID = ARMInsts[Opcode];
1431 NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
1433 // Disassemble register def.
1434 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1437 unsigned Pos = slice(insn, 20, 16);
1438 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1440 MI.addOperand(MCOperand::CreateImm(Pos));
1442 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1445 if (NumOpsAdded == 4) {
1446 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1447 // Inst{11-7} encodes the imm5 shift amount.
1448 unsigned ShAmt = slice(insn, 11, 7);
1450 // A8.6.183. Possible ASR shift amount of 32...
1451 if (Opc == ARM_AM::asr)
1454 Opc = ARM_AM::no_shift;
1456 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1461 // Extend instructions.
1462 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1463 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1464 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1465 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1466 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1468 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1469 unsigned &OpIdx = NumOpsAdded;
1474 && OpInfo[0].RegClass == ARM::GPRRegClassID
1475 && OpInfo[1].RegClass == ARM::GPRRegClassID
1476 && "Expect 2 reg operands");
1478 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1480 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1485 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1490 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1494 // If there is still an operand info left which is an immediate operand, add
1495 // an additional rotate immediate operand.
1496 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1497 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1498 // Extract the 2-bit rotate field Inst{11-10}.
1499 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1500 // Rotation by 8, 16, or 24 bits.
1501 MI.addOperand(MCOperand::CreateImm(rot << 3));
1508 /////////////////////////////////////
1510 // Utility Functions For VFP //
1512 /////////////////////////////////////
1514 // Extract/Decode Dd/Sd:
1516 // SP => d = UInt(Vd:D)
1517 // DP => d = UInt(D:Vd)
1518 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1519 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1520 : (decodeRd(insn) | getDBit(insn) << 4);
1523 // Extract/Decode Dn/Sn:
1525 // SP => n = UInt(Vn:N)
1526 // DP => n = UInt(N:Vn)
1527 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1528 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1529 : (decodeRn(insn) | getNBit(insn) << 4);
1532 // Extract/Decode Dm/Sm:
1534 // SP => m = UInt(Vm:M)
1535 // DP => m = UInt(M:Vm)
1536 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1537 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1538 : (decodeRm(insn) | getMBit(insn) << 4);
1542 static APInt VFPExpandImm(unsigned char byte, unsigned N) {
1543 assert(N == 32 || N == 64);
1546 unsigned bit6 = slice(byte, 6, 6);
1548 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1550 Result |= 0x1f << 25;
1552 Result |= 0x1 << 30;
1554 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1555 (uint64_t)slice(byte, 5, 0) << 48;
1557 Result |= 0xffULL << 54;
1559 Result |= 0x1ULL << 62;
1561 return APInt(N, Result);
1564 // VFP Unary Format Instructions:
1566 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1567 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1568 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1569 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1570 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1572 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1574 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1575 unsigned &OpIdx = NumOpsAdded;
1579 unsigned RegClass = OpInfo[OpIdx].RegClass;
1580 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1581 "Reg operand expected");
1582 bool isSP = (RegClass == ARM::SPRRegClassID);
1584 MI.addOperand(MCOperand::CreateReg(
1585 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1588 // Early return for compare with zero instructions.
1589 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1590 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1593 RegClass = OpInfo[OpIdx].RegClass;
1594 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1595 "Reg operand expected");
1596 isSP = (RegClass == ARM::SPRRegClassID);
1598 MI.addOperand(MCOperand::CreateReg(
1599 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1605 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1606 // Some of them have operand constraints which tie the first operand in the
1607 // InOperandList to that of the dst. As far as asm printing is concerned, this
1608 // tied_to operand is simply skipped.
1609 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1610 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1612 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1614 const TargetInstrDesc &TID = ARMInsts[Opcode];
1615 const TargetOperandInfo *OpInfo = TID.OpInfo;
1616 unsigned &OpIdx = NumOpsAdded;
1620 unsigned RegClass = OpInfo[OpIdx].RegClass;
1621 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1622 "Reg operand expected");
1623 bool isSP = (RegClass == ARM::SPRRegClassID);
1625 MI.addOperand(MCOperand::CreateReg(
1626 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1629 // Skip tied_to operand constraint.
1630 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1631 assert(NumOps >= 4 && "Expect >=4 operands");
1632 MI.addOperand(MCOperand::CreateReg(0));
1636 MI.addOperand(MCOperand::CreateReg(
1637 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1640 MI.addOperand(MCOperand::CreateReg(
1641 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1647 // A8.6.295 vcvt (floating-point <-> integer)
1648 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1649 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1651 // A8.6.297 vcvt (floating-point and fixed-point)
1652 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1653 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1654 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1656 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1658 const TargetInstrDesc &TID = ARMInsts[Opcode];
1659 const TargetOperandInfo *OpInfo = TID.OpInfo;
1660 if (!OpInfo) return false;
1662 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1663 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1664 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1668 assert(NumOps >= 3 && "Expect >= 3 operands");
1669 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1670 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1671 MI.addOperand(MCOperand::CreateReg(
1672 getRegisterEnum(B, RegClassID,
1673 decodeVFPRd(insn, SP))));
1675 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1676 "Tied to operand expected");
1677 MI.addOperand(MI.getOperand(0));
1679 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1680 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1681 MI.addOperand(MCOperand::CreateImm(fbits));
1686 // The Rd (destination) and Rm (source) bits have different interpretations
1687 // depending on their single-precisonness.
1689 if (slice(insn, 18, 18) == 1) { // to_integer operation
1690 d = decodeVFPRd(insn, true /* Is Single Precision */);
1691 MI.addOperand(MCOperand::CreateReg(
1692 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1693 m = decodeVFPRm(insn, SP);
1694 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1696 d = decodeVFPRd(insn, SP);
1697 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1698 m = decodeVFPRm(insn, true /* Is Single Precision */);
1699 MI.addOperand(MCOperand::CreateReg(
1700 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1708 // VMOVRS - A8.6.330
1709 // Rt => Rd; Sn => UInt(Vn:N)
1710 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1711 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1713 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1715 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1717 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1718 decodeVFPRn(insn, true))));
1723 // VMOVRRD - A8.6.332
1724 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1726 // VMOVRRS - A8.6.331
1727 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1728 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1729 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1731 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1733 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1734 unsigned &OpIdx = NumOpsAdded;
1736 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1738 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1742 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1743 unsigned Sm = decodeVFPRm(insn, true);
1744 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1746 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1750 MI.addOperand(MCOperand::CreateReg(
1751 getRegisterEnum(B, ARM::DPRRegClassID,
1752 decodeVFPRm(insn, false))));
1758 // VMOVSR - A8.6.330
1759 // Rt => Rd; Sn => UInt(Vn:N)
1760 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1761 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1763 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1765 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1766 decodeVFPRn(insn, true))));
1767 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1773 // VMOVDRR - A8.6.332
1774 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1776 // VMOVRRS - A8.6.331
1777 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1778 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1779 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1781 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1783 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1784 unsigned &OpIdx = NumOpsAdded;
1788 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1789 unsigned Sm = decodeVFPRm(insn, true);
1790 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1792 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1796 MI.addOperand(MCOperand::CreateReg(
1797 getRegisterEnum(B, ARM::DPRRegClassID,
1798 decodeVFPRm(insn, false))));
1802 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1804 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1810 // VFP Load/Store Instructions.
1811 // VLDRD, VLDRS, VSTRD, VSTRS
1812 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1813 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1815 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1817 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
1818 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1820 // Extract Dd/Sd for operand 0.
1821 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1823 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
1825 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1826 MI.addOperand(MCOperand::CreateReg(Base));
1828 // Next comes the AM5 Opcode.
1829 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1830 unsigned char Imm8 = insn & 0xFF;
1831 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1838 // VFP Load/Store Multiple Instructions.
1839 // We have an optional write back reg, the base, and two predicate operands.
1840 // It is then followed by a reglist of either DPR(s) or SPR(s).
1842 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1843 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1844 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1846 assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
1848 unsigned &OpIdx = NumOpsAdded;
1852 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1854 // Writeback to base, if necessary.
1855 if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
1856 Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
1857 Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
1858 Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
1859 MI.addOperand(MCOperand::CreateReg(Base));
1863 MI.addOperand(MCOperand::CreateReg(Base));
1865 // Handling the two predicate operands before the reglist.
1866 int64_t CondVal = insn >> ARMII::CondShift;
1867 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1868 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1872 bool isSPVFP = (Opcode == ARM::VLDMSIA ||
1873 Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
1874 Opcode == ARM::VSTMSIA ||
1875 Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
1876 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1879 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1881 // Fill the variadic part of reglist.
1882 unsigned char Imm8 = insn & 0xFF;
1883 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1885 // Apply some sanity checks before proceeding.
1886 if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
1889 for (unsigned i = 0; i < Regs; ++i) {
1890 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
1898 // Misc. VFP Instructions.
1899 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1900 // FCONSTD (DPR and a VFPf64Imm operand)
1901 // FCONSTS (SPR and a VFPf32Imm operand)
1902 // VMRS/VMSR (GPR operand)
1903 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1904 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1906 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1907 unsigned &OpIdx = NumOpsAdded;
1911 if (Opcode == ARM::FMSTAT)
1914 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
1916 unsigned RegEnum = 0;
1917 switch (OpInfo[0].RegClass) {
1918 case ARM::DPRRegClassID:
1919 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
1921 case ARM::SPRRegClassID:
1922 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
1924 case ARM::GPRRegClassID:
1925 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
1928 assert(0 && "Invalid reg class id");
1932 MI.addOperand(MCOperand::CreateReg(RegEnum));
1935 // Extract/decode the f64/f32 immediate.
1936 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1937 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1938 // The asm syntax specifies the floating point value, not the 8-bit literal.
1939 APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
1940 Opcode == ARM::FCONSTD ? 64 : 32);
1941 APFloat immFP = APFloat(immRaw, true);
1942 double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
1943 immFP.convertToFloat();
1944 MI.addOperand(MCOperand::CreateFPImm(imm));
1952 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
1953 #include "ThumbDisassemblerCore.h"
1955 /////////////////////////////////////////////////////
1957 // Utility Functions For ARM Advanced SIMD //
1959 /////////////////////////////////////////////////////
1961 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1962 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1964 // A7.3 Register encoding
1966 // Extract/Decode NEON D/Vd:
1968 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
1969 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
1970 // handling it in the getRegisterEnum() utility function.
1971 // D = Inst{22}, Vd = Inst{15-12}
1972 static unsigned decodeNEONRd(uint32_t insn) {
1973 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
1974 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
1977 // Extract/Decode NEON N/Vn:
1979 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
1980 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
1981 // handling it in the getRegisterEnum() utility function.
1982 // N = Inst{7}, Vn = Inst{19-16}
1983 static unsigned decodeNEONRn(uint32_t insn) {
1984 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
1985 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
1988 // Extract/Decode NEON M/Vm:
1990 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
1991 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
1992 // handling it in the getRegisterEnum() utility function.
1993 // M = Inst{5}, Vm = Inst{3-0}
1994 static unsigned decodeNEONRm(uint32_t insn) {
1995 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
1996 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2007 } // End of unnamed namespace
2009 // size field -> Inst{11-10}
2010 // index_align field -> Inst{7-4}
2012 // The Lane Index interpretation depends on the Data Size:
2013 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2014 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2015 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2017 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2018 static unsigned decodeLaneIndex(uint32_t insn) {
2019 unsigned size = insn >> 10 & 3;
2020 assert((size == 0 || size == 1 || size == 2) &&
2021 "Encoding error: size should be either 0, 1, or 2");
2023 unsigned index_align = insn >> 4 & 0xF;
2024 return (index_align >> 1) >> size;
2027 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2028 // op = Inst{5}, cmode = Inst{11-8}
2029 // i = Inst{24} (ARM architecture)
2030 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2031 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2032 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2033 unsigned char op = (insn >> 5) & 1;
2034 unsigned char cmode = (insn >> 8) & 0xF;
2035 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2036 ((insn >> 16) & 7) << 4 |
2038 return (op << 12) | (cmode << 8) | Imm8;
2041 // A8.6.339 VMUL, VMULL (by scalar)
2042 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2043 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2044 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2051 assert(0 && "Unreachable code!");
2056 // A8.6.339 VMUL, VMULL (by scalar)
2057 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2058 // ESize32 => index = Inst{5} (M) D0-D15
2059 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2062 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2064 return (insn >> 5) & 1;
2066 assert(0 && "Unreachable code!");
2071 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2072 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2073 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2074 return 64 - ((insn >> 16) & 0x3F);
2077 // A8.6.302 VDUP (scalar)
2078 // ESize8 => index = Inst{19-17}
2079 // ESize16 => index = Inst{19-18}
2080 // ESize32 => index = Inst{19}
2081 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2084 return (insn >> 17) & 7;
2086 return (insn >> 18) & 3;
2088 return (insn >> 19) & 1;
2090 assert(0 && "Unspecified element size!");
2095 // A8.6.328 VMOV (ARM core register to scalar)
2096 // A8.6.329 VMOV (scalar to ARM core register)
2097 // ESize8 => index = Inst{21:6-5}
2098 // ESize16 => index = Inst{21:6}
2099 // ESize32 => index = Inst{21}
2100 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2103 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2105 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2107 return ((insn >> 21) & 1);
2109 assert(0 && "Unspecified element size!");
2114 // Imm6 = Inst{21-16}, L = Inst{7}
2116 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2118 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2119 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2120 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2121 // '1xxxxxx' => esize = 64; shift_amount = imm6
2123 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2125 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2126 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2127 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2128 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2130 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2131 ElemSize esize = ESizeNA;
2132 unsigned L = (insn >> 7) & 1;
2133 unsigned imm6 = (insn >> 16) & 0x3F;
2137 else if (imm6 >> 4 == 1)
2139 else if (imm6 >> 5 == 1)
2142 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2147 return esize == ESize64 ? imm6 : (imm6 - esize);
2149 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2153 // Imm4 = Inst{11-8}
2154 static unsigned decodeN3VImm(uint32_t insn) {
2155 return (insn >> 8) & 0xF;
2159 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2161 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2163 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2165 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2167 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2168 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2169 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2172 const TargetInstrDesc &TID = ARMInsts[Opcode];
2173 const TargetOperandInfo *OpInfo = TID.OpInfo;
2175 // At least one DPR register plus addressing mode #6.
2176 assert(NumOps >= 3 && "Expect >= 3 operands");
2178 unsigned &OpIdx = NumOpsAdded;
2182 // We have homogeneous NEON registers for Load/Store.
2183 unsigned RegClass = 0;
2185 // Double-spaced registers have increments of 2.
2186 unsigned Inc = DblSpaced ? 2 : 1;
2188 unsigned Rn = decodeRn(insn);
2189 unsigned Rm = decodeRm(insn);
2190 unsigned Rd = decodeNEONRd(insn);
2192 // A7.7.1 Advanced SIMD addressing mode.
2195 // LLVM Addressing Mode #6.
2196 unsigned RmEnum = 0;
2198 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2201 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2202 // then possible lane index.
2203 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2204 "Reg operand expected");
2207 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2212 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2213 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2214 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2216 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2220 MI.addOperand(MCOperand::CreateReg(RmEnum));
2224 assert(OpIdx < NumOps &&
2225 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2226 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2227 "Reg operand expected");
2229 RegClass = OpInfo[OpIdx].RegClass;
2230 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2231 MI.addOperand(MCOperand::CreateReg(
2232 getRegisterEnum(B, RegClass, Rd)));
2237 // Handle possible lane index.
2238 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2239 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2240 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2245 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2246 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2247 RegClass = OpInfo[0].RegClass;
2249 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2250 MI.addOperand(MCOperand::CreateReg(
2251 getRegisterEnum(B, RegClass, Rd)));
2257 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2262 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2263 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2264 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2266 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2270 MI.addOperand(MCOperand::CreateReg(RmEnum));
2274 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2275 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2276 "Tied to operand expected");
2277 MI.addOperand(MCOperand::CreateReg(0));
2281 // Handle possible lane index.
2282 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2283 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2284 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2289 // Accessing registers past the end of the NEON register file is not
2298 // If L (Inst{21}) == 0, store instructions.
2299 // Find out about double-spaced-ness of the Opcode and pass it on to
2300 // DisassembleNLdSt0().
2301 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2302 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2304 const StringRef Name = ARMInsts[Opcode].Name;
2305 bool DblSpaced = false;
2307 if (Name.find("LN") != std::string::npos) {
2308 // To one lane instructions.
2309 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2311 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2312 if (Name.endswith("16") || Name.endswith("16_UPD"))
2313 DblSpaced = slice(insn, 5, 5) == 1;
2315 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2316 if (Name.endswith("32") || Name.endswith("32_UPD"))
2317 DblSpaced = slice(insn, 6, 6) == 1;
2320 // Multiple n-element structures with type encoded as Inst{11-8}.
2321 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2323 // n == 2 && type == 0b1001 -> DblSpaced = true
2324 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2325 DblSpaced = slice(insn, 11, 8) == 9;
2327 // n == 3 && type == 0b0101 -> DblSpaced = true
2328 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2329 DblSpaced = slice(insn, 11, 8) == 5;
2331 // n == 4 && type == 0b0001 -> DblSpaced = true
2332 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2333 DblSpaced = slice(insn, 11, 8) == 1;
2336 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2337 slice(insn, 21, 21) == 0, DblSpaced, B);
2344 // Qd/Dd imm src(=Qd/Dd)
2345 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2346 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2348 const TargetInstrDesc &TID = ARMInsts[Opcode];
2349 const TargetOperandInfo *OpInfo = TID.OpInfo;
2351 assert(NumOps >= 2 &&
2352 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2353 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2354 (OpInfo[1].RegClass < 0) &&
2355 "Expect 1 reg operand followed by 1 imm operand");
2357 // Qd/Dd = Inst{22:15-12} => NEON Rd
2358 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2359 decodeNEONRd(insn))));
2361 ElemSize esize = ESizeNA;
2364 case ARM::VMOVv16i8:
2367 case ARM::VMOVv4i16:
2368 case ARM::VMOVv8i16:
2369 case ARM::VMVNv4i16:
2370 case ARM::VMVNv8i16:
2371 case ARM::VBICiv4i16:
2372 case ARM::VBICiv8i16:
2373 case ARM::VORRiv4i16:
2374 case ARM::VORRiv8i16:
2377 case ARM::VMOVv2i32:
2378 case ARM::VMOVv4i32:
2379 case ARM::VMVNv2i32:
2380 case ARM::VMVNv4i32:
2381 case ARM::VBICiv2i32:
2382 case ARM::VBICiv4i32:
2383 case ARM::VORRiv2i32:
2384 case ARM::VORRiv4i32:
2387 case ARM::VMOVv1i64:
2388 case ARM::VMOVv2i64:
2392 assert(0 && "Unexpected opcode!");
2396 // One register and a modified immediate value.
2397 // Add the imm operand.
2398 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2402 // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
2404 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2405 OpInfo[2].RegClass == ARM::QPRRegClassID)) {
2406 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2407 decodeNEONRd(insn))));
2418 N2V_VectorConvert_Between_Float_Fixed
2420 } // End of unnamed namespace
2422 // Vector Convert [between floating-point and fixed-point]
2423 // Qd/Dd Qm/Dm [fbits]
2425 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2426 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2429 // Vector Move Long:
2432 // Vector Move Narrow:
2436 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2437 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2439 const TargetInstrDesc &TID = ARMInsts[Opc];
2440 const TargetOperandInfo *OpInfo = TID.OpInfo;
2442 assert(NumOps >= 2 &&
2443 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2444 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2445 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2446 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2447 "Expect >= 2 operands and first 2 as reg operands");
2449 unsigned &OpIdx = NumOpsAdded;
2453 ElemSize esize = ESizeNA;
2454 if (Flag == N2V_VectorDupLane) {
2455 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2456 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2457 "Unexpected Opcode");
2458 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2459 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2463 // Qd/Dd = Inst{22:15-12} => NEON Rd
2464 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2465 decodeNEONRd(insn))));
2469 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2471 MI.addOperand(MCOperand::CreateReg(0));
2475 // Dm = Inst{5:3-0} => NEON Rm
2476 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2477 decodeNEONRm(insn))));
2480 // VZIP and others have two TIED_TO reg operands.
2482 while (OpIdx < NumOps &&
2483 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2484 // Add TIED_TO operand.
2485 MI.addOperand(MI.getOperand(Idx));
2489 // Add the imm operand, if required.
2490 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2491 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2493 unsigned imm = 0xFFFFFFFF;
2495 if (Flag == N2V_VectorDupLane)
2496 imm = decodeNVLaneDupIndex(insn, esize);
2497 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2498 imm = decodeVCVTFractionBits(insn);
2500 assert(imm != 0xFFFFFFFF && "Internal error");
2501 MI.addOperand(MCOperand::CreateImm(imm));
2508 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2509 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2511 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2514 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2515 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2517 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2518 N2V_VectorConvert_Between_Float_Fixed, B);
2520 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2521 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2523 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2524 N2V_VectorDupLane, B);
2527 // Vector Shift [Accumulate] Instructions.
2528 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2530 // Vector Shift Left Long (with maximum shift count) Instructions.
2531 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2533 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2534 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2536 const TargetInstrDesc &TID = ARMInsts[Opcode];
2537 const TargetOperandInfo *OpInfo = TID.OpInfo;
2539 assert(NumOps >= 3 &&
2540 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2541 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2542 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2543 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2544 "Expect >= 3 operands and first 2 as reg operands");
2546 unsigned &OpIdx = NumOpsAdded;
2550 // Qd/Dd = Inst{22:15-12} => NEON Rd
2551 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2552 decodeNEONRd(insn))));
2555 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2557 MI.addOperand(MCOperand::CreateReg(0));
2561 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2562 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2563 "Reg operand expected");
2565 // Qm/Dm = Inst{5:3-0} => NEON Rm
2566 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2567 decodeNEONRm(insn))));
2570 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2572 // Add the imm operand.
2574 // VSHLL has maximum shift count as the imm, inferred from its size.
2578 Imm = decodeNVSAmt(insn, LeftShift);
2590 MI.addOperand(MCOperand::CreateImm(Imm));
2596 // Left shift instructions.
2597 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2598 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2600 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
2603 // Right shift instructions have different shift amount interpretation.
2604 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2605 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2607 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
2616 N3V_Multiply_By_Scalar
2618 } // End of unnamed namespace
2620 // NEON Three Register Instructions with Optional Immediate Operand
2622 // Vector Extract Instructions.
2623 // Qd/Dd Qn/Dn Qm/Dm imm4
2625 // Vector Shift (Register) Instructions.
2626 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2628 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2629 // Qd/Dd Qn/Dn RestrictedDm index
2632 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2633 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
2635 const TargetInstrDesc &TID = ARMInsts[Opcode];
2636 const TargetOperandInfo *OpInfo = TID.OpInfo;
2638 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2639 assert(NumOps >= 3 &&
2640 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2641 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2642 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2643 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2644 "Expect >= 3 operands and first 2 as reg operands");
2646 unsigned &OpIdx = NumOpsAdded;
2650 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2651 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2652 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2653 ElemSize esize = ESizeNA;
2654 if (Flag == N3V_Multiply_By_Scalar) {
2655 unsigned size = (insn >> 20) & 3;
2656 if (size == 1) esize = ESize16;
2657 if (size == 2) esize = ESize32;
2658 assert (esize == ESize16 || esize == ESize32);
2661 // Qd/Dd = Inst{22:15-12} => NEON Rd
2662 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2663 decodeNEONRd(insn))));
2666 // VABA, VABAL, VBSLd, VBSLq, ...
2667 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2669 MI.addOperand(MCOperand::CreateReg(0));
2673 // Dn = Inst{7:19-16} => NEON Rn
2675 // Dm = Inst{5:3-0} => NEON Rm
2676 MI.addOperand(MCOperand::CreateReg(
2677 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2678 VdVnVm ? decodeNEONRn(insn)
2679 : decodeNEONRm(insn))));
2682 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2684 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2687 // Dm = Inst{5:3-0} => NEON Rm
2689 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2691 // Dn = Inst{7:19-16} => NEON Rn
2692 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2693 : decodeNEONRm(insn))
2694 : decodeNEONRn(insn);
2696 MI.addOperand(MCOperand::CreateReg(
2697 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
2700 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2701 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2702 // Add the imm operand.
2705 Imm = decodeN3VImm(insn);
2706 else if (IsDmRestricted)
2707 Imm = decodeRestrictedDmIndex(insn, esize);
2709 assert(0 && "Internal error: unreachable code!");
2713 MI.addOperand(MCOperand::CreateImm(Imm));
2720 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2721 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2723 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2726 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
2727 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2729 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2730 N3V_VectorShift, B);
2732 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
2733 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2735 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2736 N3V_VectorExtract, B);
2738 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
2739 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2741 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2742 N3V_Multiply_By_Scalar, B);
2745 // Vector Table Lookup
2747 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2748 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2749 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2750 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2751 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2752 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2754 const TargetInstrDesc &TID = ARMInsts[Opcode];
2755 const TargetOperandInfo *OpInfo = TID.OpInfo;
2756 if (!OpInfo) return false;
2758 assert(NumOps >= 3 &&
2759 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2760 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2761 OpInfo[2].RegClass == ARM::DPRRegClassID &&
2762 "Expect >= 3 operands and first 3 as reg operands");
2764 unsigned &OpIdx = NumOpsAdded;
2768 unsigned Rn = decodeNEONRn(insn);
2770 // {Dn} encoded as len = 0b00
2771 // {Dn Dn+1} encoded as len = 0b01
2772 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2773 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2774 unsigned Len = slice(insn, 9, 8) + 1;
2776 // Dd (the destination vector)
2777 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2778 decodeNEONRd(insn))));
2781 // Process tied_to operand constraint.
2783 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2784 MI.addOperand(MI.getOperand(Idx));
2788 // Do the <list> now.
2789 for (unsigned i = 0; i < Len; ++i) {
2790 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2791 "Reg operand expected");
2792 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2797 // Dm (the index vector)
2798 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2799 "Reg operand (index vector) expected");
2800 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2801 decodeNEONRm(insn))));
2807 // Vector Get Lane (move scalar to ARM core register) Instructions.
2808 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2809 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2810 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2812 const TargetInstrDesc &TID = ARMInsts[Opcode];
2813 const TargetOperandInfo *OpInfo = TID.OpInfo;
2814 if (!OpInfo) return false;
2816 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2817 OpInfo[0].RegClass == ARM::GPRRegClassID &&
2818 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2819 OpInfo[2].RegClass < 0 &&
2820 "Expect >= 3 operands with one dst operand");
2823 Opcode == ARM::VGETLNi32 ? ESize32
2824 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
2827 // Rt = Inst{15-12} => ARM Rd
2828 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2831 // Dn = Inst{7:19-16} => NEON Rn
2832 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2833 decodeNEONRn(insn))));
2835 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2841 // Vector Set Lane (move ARM core register to scalar) Instructions.
2842 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2843 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2844 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2846 const TargetInstrDesc &TID = ARMInsts[Opcode];
2847 const TargetOperandInfo *OpInfo = TID.OpInfo;
2848 if (!OpInfo) return false;
2850 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2851 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2852 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2853 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
2854 OpInfo[2].RegClass == ARM::GPRRegClassID &&
2855 OpInfo[3].RegClass < 0 &&
2856 "Expect >= 3 operands with one dst operand");
2859 Opcode == ARM::VSETLNi8 ? ESize8
2860 : (Opcode == ARM::VSETLNi16 ? ESize16
2863 // Dd = Inst{7:19-16} => NEON Rn
2864 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
2865 decodeNEONRn(insn))));
2868 MI.addOperand(MCOperand::CreateReg(0));
2870 // Rt = Inst{15-12} => ARM Rd
2871 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2874 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2880 // Vector Duplicate Instructions (from ARM core register to all elements).
2881 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2882 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2883 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2885 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2887 assert(NumOps >= 2 &&
2888 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2889 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2890 OpInfo[1].RegClass == ARM::GPRRegClassID &&
2891 "Expect >= 2 operands and first 2 as reg operand");
2893 unsigned RegClass = OpInfo[0].RegClass;
2895 // Qd/Dd = Inst{7:19-16} => NEON Rn
2896 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
2897 decodeNEONRn(insn))));
2899 // Rt = Inst{15-12} => ARM Rd
2900 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2910 static inline bool MemBarrierInstr(uint32_t insn) {
2911 unsigned op7_4 = slice(insn, 7, 4);
2912 if (slice(insn, 31, 8) == 0xf57ff0 && (op7_4 >= 4 && op7_4 <= 6))
2918 static inline bool PreLoadOpcode(unsigned Opcode) {
2920 case ARM::PLDi12: case ARM::PLDrs:
2921 case ARM::PLDWi12: case ARM::PLDWrs:
2922 case ARM::PLIi12: case ARM::PLIrs:
2929 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2930 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2932 // Preload Data/Instruction requires either 2 or 3 operands.
2933 // PLDi12, PLDWi12, PLIi12: addrmode_imm12
2934 // PLDrs, PLDWrs, PLIrs: ldst_so_reg
2936 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2939 if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
2940 || Opcode == ARM::PLIi12) {
2941 unsigned Imm12 = slice(insn, 11, 0);
2942 bool Negative = getUBit(insn) == 0;
2944 // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
2945 if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
2946 DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
2947 MI.setOpcode(ARM::PLDi12);
2950 // -0 is represented specially. All other values are as normal.
2951 int Offset = Negative ? -1 * Imm12 : Imm12;
2952 if (Imm12 == 0 && Negative)
2955 MI.addOperand(MCOperand::CreateImm(Offset));
2958 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2961 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2963 // Inst{6-5} encodes the shift opcode.
2964 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
2965 // Inst{11-7} encodes the imm5 shift amount.
2966 unsigned ShImm = slice(insn, 11, 7);
2968 // A8.4.1. Possible rrx or shift amount of 32...
2969 getImmShiftSE(ShOp, ShImm);
2970 MI.addOperand(MCOperand::CreateImm(
2971 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
2978 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2979 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2981 if (MemBarrierInstr(insn)) {
2982 // DMBsy, DSBsy, and ISBsy instructions have zero operand and are taken care
2983 // of within the generic ARMBasicMCBuilder::BuildIt() method.
2985 // Inst{3-0} encodes the memory barrier option for the variants.
2986 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3002 // SWP, SWPB: Rd Rm Rn
3003 // Delegate to DisassembleLdStExFrm()....
3004 return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3009 if (Opcode == ARM::SETEND) {
3011 MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
3015 // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
3016 // opcodes which match the same real instruction. This is needed since there's
3017 // no current handling of optional arguments. Fix here when a better handling
3018 // of optional arguments is implemented.
3019 if (Opcode == ARM::CPS3p) { // M = 1
3020 // Let's reject these impossible imod values by returning false:
3023 // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
3024 // invalid combination, so we just check for imod=0b00 here.
3025 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3027 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3028 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3029 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3033 if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
3034 // Let's reject these impossible imod values by returning false:
3035 // 1. (imod=0b00,M=0)
3037 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3039 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3040 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3044 if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
3045 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3050 // DBG has its option specified in Inst{3-0}.
3051 if (Opcode == ARM::DBG) {
3052 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3057 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3058 if (Opcode == ARM::BKPT) {
3059 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3060 slice(insn, 3, 0)));
3065 if (PreLoadOpcode(Opcode))
3066 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3068 assert(0 && "Unexpected misc instruction!");
3072 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3073 /// We divide the disassembly task into different categories, with each one
3074 /// corresponding to a specific instruction encoding format. There could be
3075 /// exceptions when handling a specific format, and that is why the Opcode is
3076 /// also present in the function prototype.
3077 static const DisassembleFP FuncPtrs[] = {
3081 &DisassembleBrMiscFrm,
3083 &DisassembleDPSoRegFrm,
3086 &DisassembleLdMiscFrm,
3087 &DisassembleStMiscFrm,
3088 &DisassembleLdStMulFrm,
3089 &DisassembleLdStExFrm,
3090 &DisassembleArithMiscFrm,
3093 &DisassembleVFPUnaryFrm,
3094 &DisassembleVFPBinaryFrm,
3095 &DisassembleVFPConv1Frm,
3096 &DisassembleVFPConv2Frm,
3097 &DisassembleVFPConv3Frm,
3098 &DisassembleVFPConv4Frm,
3099 &DisassembleVFPConv5Frm,
3100 &DisassembleVFPLdStFrm,
3101 &DisassembleVFPLdStMulFrm,
3102 &DisassembleVFPMiscFrm,
3103 &DisassembleThumbFrm,
3104 &DisassembleMiscFrm,
3105 &DisassembleNGetLnFrm,
3106 &DisassembleNSetLnFrm,
3107 &DisassembleNDupFrm,
3109 // VLD and VST (including one lane) Instructions.
3112 // A7.4.6 One register and a modified immediate value
3113 // 1-Register Instructions with imm.
3114 // LLVM only defines VMOVv instructions.
3115 &DisassembleN1RegModImmFrm,
3117 // 2-Register Instructions with no imm.
3118 &DisassembleN2RegFrm,
3120 // 2-Register Instructions with imm (vector convert float/fixed point).
3121 &DisassembleNVCVTFrm,
3123 // 2-Register Instructions with imm (vector dup lane).
3124 &DisassembleNVecDupLnFrm,
3126 // Vector Shift Left Instructions.
3127 &DisassembleN2RegVecShLFrm,
3129 // Vector Shift Righ Instructions, which has different interpretation of the
3130 // shift amount from the imm6 field.
3131 &DisassembleN2RegVecShRFrm,
3133 // 3-Register Data-Processing Instructions.
3134 &DisassembleN3RegFrm,
3136 // Vector Shift (Register) Instructions.
3137 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3138 &DisassembleN3RegVecShFrm,
3140 // Vector Extract Instructions.
3141 &DisassembleNVecExtractFrm,
3143 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3144 // By Scalar Instructions.
3145 &DisassembleNVecMulScalarFrm,
3147 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3148 // values in a table and generate a new vector.
3149 &DisassembleNVTBLFrm,
3154 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3155 /// The general idea is to set the Opcode for the MCInst, followed by adding
3156 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3157 /// to the Format-specific disassemble function for disassembly, followed by
3158 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3159 /// which follow the Dst/Src Operands.
3160 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3161 // Stage 1 sets the Opcode.
3162 MI.setOpcode(Opcode);
3163 // If the number of operands is zero, we're done!
3167 // Stage 2 calls the format-specific disassemble function to build the operand
3171 unsigned NumOpsAdded = 0;
3172 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3174 if (!OK || this->Err != 0) return false;
3175 if (NumOpsAdded >= NumOps)
3178 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3179 // FIXME: Should this be done selectively?
3180 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3183 // A8.3 Conditional execution
3184 // A8.3.1 Pseudocode details of conditional execution
3185 // Condition bits '111x' indicate the instruction is always executed.
3186 static uint32_t CondCode(uint32_t CondField) {
3187 if (CondField == 0xF)
3192 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3193 /// of some Thumb instructions which come before the reglist operands. It
3194 /// returns true if the two predicate operands have been processed.
3195 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3196 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3198 assert(NumOpsRemaining > 0 && "Invalid argument");
3200 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3201 unsigned Idx = MI.getNumOperands();
3203 // First, we check whether this instr specifies the PredicateOperand through
3204 // a pair of TargetOperandInfos with isPredicate() property.
3205 if (NumOpsRemaining >= 2 &&
3206 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3207 OpInfo[Idx].RegClass < 0 &&
3208 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3210 // If we are inside an IT block, get the IT condition bits maintained via
3211 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3214 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3216 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3217 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3224 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3225 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3227 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3228 uint32_t insn, unsigned short NumOpsRemaining) {
3230 assert(NumOpsRemaining > 0 && "Invalid argument");
3232 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3233 const std::string &Name = ARMInsts[Opcode].Name;
3234 unsigned Idx = MI.getNumOperands();
3236 // First, we check whether this instr specifies the PredicateOperand through
3237 // a pair of TargetOperandInfos with isPredicate() property.
3238 if (NumOpsRemaining >= 2 &&
3239 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3240 OpInfo[Idx].RegClass < 0 &&
3241 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3243 // If we are inside an IT block, get the IT condition bits maintained via
3244 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3247 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3249 if (Name.length() > 1 && Name[0] == 't') {
3250 // Thumb conditional branch instructions have their cond field embedded,
3254 if (Name == "t2Bcc")
3255 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 25, 22))));
3256 else if (Name == "tBcc")
3257 MI.addOperand(MCOperand::CreateImm(CondCode(slice(insn, 11, 8))));
3259 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3261 // ARM instructions get their condition field from Inst{31-28}.
3262 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3265 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3267 NumOpsRemaining -= 2;
3270 if (NumOpsRemaining == 0)
3273 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3274 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3275 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3279 if (NumOpsRemaining == 0)
3285 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3286 /// after BuildIt is finished.
3287 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3290 if (!SP) return Status;
3292 if (Opcode == ARM::t2IT)
3293 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3294 else if (InITBlock())
3300 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3301 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3303 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3304 unsigned Idx = (unsigned)format;
3305 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3306 Disasm = FuncPtrs[Idx];
3309 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3310 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3311 /// Return NULL if it fails to create/return a proper builder. API clients
3312 /// are responsible for freeing up of the allocated memory. Cacheing can be
3313 /// performed by the API clients to improve performance.
3314 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3315 // For "Unknown format", fail by returning a NULL pointer.
3316 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3317 DEBUG(errs() << "Unknown format\n");
3321 return new ARMBasicMCBuilder(Opcode, Format,
3322 ARMInsts[Opcode].getNumOperands());