1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder, Builder Factory,
12 // as well as the Algorithm to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #include "ARMAddressingModes.h"
17 #include "ARMDisassemblerCore.h"
20 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
21 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
22 /// describing the operand info for each ARMInsts[i].
24 /// Together with an instruction's encoding format, we can take advantage of the
25 /// NumOperands and the OpInfo fields of the target instruction description in
26 /// the quest to build out the MCOperand list for an MCInst.
28 /// The general guideline is that with a known format, the number of dst and src
29 /// operands are well-known. The dst is built first, followed by the src
30 /// operand(s). The operands not yet used at this point are for the Implicit
31 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
32 /// defined with two components:
34 /// def pred { // Operand PredicateOperand
35 /// ValueType Type = OtherVT;
36 /// string PrintMethod = "printPredicateOperand";
37 /// string AsmOperandLowerMethod = ?;
38 /// dag MIOperandInfo = (ops i32imm, CCR);
39 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
40 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
43 /// which is manifested by the TargetOperandInfo[] of:
45 /// { 0, 0|(1<<TOI::Predicate), 0 },
46 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
48 /// So the first predicate MCOperand corresponds to the immediate part of the
49 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
50 /// corresponds to a register kind of ARM::CPSR.
52 /// For the Defs part, in the simple case of only cc_out:$s, we have:
54 /// def cc_out { // Operand OptionalDefOperand
55 /// ValueType Type = OtherVT;
56 /// string PrintMethod = "printSBitModifierOperand";
57 /// string AsmOperandLowerMethod = ?;
58 /// dag MIOperandInfo = (ops CCR);
59 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
60 /// dag DefaultOps = (ops (i32 zero_reg));
63 /// which is manifested by the one TargetOperandInfo of:
65 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
67 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
68 #include "ARMGenInstrInfo.inc"
72 const char *ARMUtils::OpcodeName(unsigned Opcode) {
73 return ARMInsts[Opcode].Name;
76 // There is a more efficient way than the following. It is fragile, though.
77 // See the code snippet after this function.
78 static unsigned getRegisterEnum(unsigned RegClassID, unsigned RawRegister,
79 bool DRegPair = false) {
81 if (DRegPair && RegClassID == ARM::QPRRegClassID) {
82 // LLVM expects { Dd, Dd+1 } to form a super register; this is not specified
83 // in the ARM Architecture Manual as far as I understand it (A8.6.307).
84 // Therefore, we morph the RegClassID to be the sub register class and don't
85 // subsequently transform the RawRegister encoding when calculating RegNum.
87 // See also ARMinstPrinter::printOperand() wrt "dregpair" modifier part
88 // where this workaround is meant for.
89 RegClassID = ARM::DPRRegClassID;
92 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
94 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
100 switch (RegClassID) {
101 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
102 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
103 case ARM::DPR_VFP2RegClassID:
105 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
106 case ARM::QPR_VFP2RegClassID:
108 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
112 switch (RegClassID) {
113 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
114 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
115 case ARM::DPR_VFP2RegClassID:
117 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
118 case ARM::QPR_VFP2RegClassID:
120 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
124 switch (RegClassID) {
125 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
126 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
127 case ARM::DPR_VFP2RegClassID:
129 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
130 case ARM::QPR_VFP2RegClassID:
132 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
136 switch (RegClassID) {
137 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
138 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
139 case ARM::DPR_VFP2RegClassID:
141 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
142 case ARM::QPR_VFP2RegClassID:
144 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
148 switch (RegClassID) {
149 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
150 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
151 case ARM::DPR_VFP2RegClassID:
153 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
154 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
158 switch (RegClassID) {
159 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
160 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
161 case ARM::DPR_VFP2RegClassID:
163 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
164 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
168 switch (RegClassID) {
169 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
170 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
171 case ARM::DPR_VFP2RegClassID:
173 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
174 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
178 switch (RegClassID) {
179 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
180 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
181 case ARM::DPR_VFP2RegClassID:
183 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
184 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
188 switch (RegClassID) {
189 case ARM::GPRRegClassID: return ARM::R8;
190 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
191 case ARM::QPRRegClassID: return ARM::Q8;
192 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
196 switch (RegClassID) {
197 case ARM::GPRRegClassID: return ARM::R9;
198 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
199 case ARM::QPRRegClassID: return ARM::Q9;
200 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
204 switch (RegClassID) {
205 case ARM::GPRRegClassID: return ARM::R10;
206 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
207 case ARM::QPRRegClassID: return ARM::Q10;
208 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
212 switch (RegClassID) {
213 case ARM::GPRRegClassID: return ARM::R11;
214 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
215 case ARM::QPRRegClassID: return ARM::Q11;
216 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
220 switch (RegClassID) {
221 case ARM::GPRRegClassID: return ARM::R12;
222 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
223 case ARM::QPRRegClassID: return ARM::Q12;
224 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
228 switch (RegClassID) {
229 case ARM::GPRRegClassID: return ARM::SP;
230 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
231 case ARM::QPRRegClassID: return ARM::Q13;
232 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
236 switch (RegClassID) {
237 case ARM::GPRRegClassID: return ARM::LR;
238 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
239 case ARM::QPRRegClassID: return ARM::Q14;
240 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
244 switch (RegClassID) {
245 case ARM::GPRRegClassID: return ARM::PC;
246 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
247 case ARM::QPRRegClassID: return ARM::Q15;
248 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
252 switch (RegClassID) {
253 case ARM::DPRRegClassID: return ARM::D16;
254 case ARM::SPRRegClassID: return ARM::S16;
258 switch (RegClassID) {
259 case ARM::DPRRegClassID: return ARM::D17;
260 case ARM::SPRRegClassID: return ARM::S17;
264 switch (RegClassID) {
265 case ARM::DPRRegClassID: return ARM::D18;
266 case ARM::SPRRegClassID: return ARM::S18;
270 switch (RegClassID) {
271 case ARM::DPRRegClassID: return ARM::D19;
272 case ARM::SPRRegClassID: return ARM::S19;
276 switch (RegClassID) {
277 case ARM::DPRRegClassID: return ARM::D20;
278 case ARM::SPRRegClassID: return ARM::S20;
282 switch (RegClassID) {
283 case ARM::DPRRegClassID: return ARM::D21;
284 case ARM::SPRRegClassID: return ARM::S21;
288 switch (RegClassID) {
289 case ARM::DPRRegClassID: return ARM::D22;
290 case ARM::SPRRegClassID: return ARM::S22;
294 switch (RegClassID) {
295 case ARM::DPRRegClassID: return ARM::D23;
296 case ARM::SPRRegClassID: return ARM::S23;
300 switch (RegClassID) {
301 case ARM::DPRRegClassID: return ARM::D24;
302 case ARM::SPRRegClassID: return ARM::S24;
306 switch (RegClassID) {
307 case ARM::DPRRegClassID: return ARM::D25;
308 case ARM::SPRRegClassID: return ARM::S25;
312 switch (RegClassID) {
313 case ARM::DPRRegClassID: return ARM::D26;
314 case ARM::SPRRegClassID: return ARM::S26;
318 switch (RegClassID) {
319 case ARM::DPRRegClassID: return ARM::D27;
320 case ARM::SPRRegClassID: return ARM::S27;
324 switch (RegClassID) {
325 case ARM::DPRRegClassID: return ARM::D28;
326 case ARM::SPRRegClassID: return ARM::S28;
330 switch (RegClassID) {
331 case ARM::DPRRegClassID: return ARM::D29;
332 case ARM::SPRRegClassID: return ARM::S29;
336 switch (RegClassID) {
337 case ARM::DPRRegClassID: return ARM::D30;
338 case ARM::SPRRegClassID: return ARM::S30;
342 switch (RegClassID) {
343 case ARM::DPRRegClassID: return ARM::D31;
344 case ARM::SPRRegClassID: return ARM::S31;
348 llvm_unreachable("Invalid (RegClassID, RawRegister) combination");
351 // This is efficient but fragile.
353 // See ARMGenRegisterInfo.h.inc for more info.
354 static const TargetRegisterClass* const ARMRegisterClasses[] = {
356 &ARM::CCRRegClass, // CCRRegClassID = 1,
357 &ARM::DPRRegClass, // DPRRegClassID = 2,
358 &ARM::DPR_8RegClass, // DPR_8RegClassID = 3,
359 &ARM::DPR_VFP2RegClass, // DPR_VFP2RegClassID = 4,
360 &ARM::GPRRegClass, // GPRRegClassID = 5,
361 &ARM::QPRRegClass, // QPRRegClassID = 6,
362 &ARM::QPR_8RegClass, // QPR_8RegClassID = 7,
363 &ARM::QPR_VFP2RegClass, // QPR_VFP2RegClassID = 8,
364 &ARM::SPRRegClass, // SPRRegClassID = 9,
365 &ARM::SPR_8RegClass, // SPR_8RegClassID = 10,
366 &ARM::SPR_INVALIDRegClass, // SPR_INVALIDRegClassID = 11,
367 &ARM::tGPRRegClass, // tGPRRegClassID = 12
370 // Return the register enum given register class id and raw register value.
371 static unsigned getRegisterEnum(unsigned RegClassID, unsigned RawRegister) {
372 assert(RegClassID < array_lengthof(ARMRegisterClasses) &&
373 "Register Class ID out of range");
374 return ARMRegisterClasses[RegClassID]->getRegister(RawRegister);
378 /// DisassembleFP - DisassembleFP points to a function that disassembles an insn
379 /// and builds the MCOperand list upon disassembly. It returns false on failure
380 /// or true on success. The number of operands added is updated upon success.
381 typedef bool (*DisassembleFP)(MCInst &MI, unsigned Opcode, uint32_t insn,
382 unsigned short NumOps, unsigned &NumOpsAdded);
384 ///////////////////////////////
386 // Utility Functions //
388 ///////////////////////////////
390 // Extract/Decode Rd: Inst{15-12}.
391 static inline unsigned decodeRd(uint32_t insn) {
392 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
395 // Extract/Decode Rn: Inst{19-16}.
396 static inline unsigned decodeRn(uint32_t insn) {
397 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
400 // Extract/Decode Rm: Inst{3-0}.
401 static inline unsigned decodeRm(uint32_t insn) {
402 return (insn & ARMII::GPRRegMask);
405 // Extract/Decode Rs: Inst{11-8}.
406 static inline unsigned decodeRs(uint32_t insn) {
407 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
410 static inline unsigned getCondField(uint32_t insn) {
411 return (insn >> ARMII::CondShift);
414 static inline unsigned getIBit(uint32_t insn) {
415 return (insn >> ARMII::I_BitShift) & 1;
418 static inline unsigned getAM3IBit(uint32_t insn) {
419 return (insn >> ARMII::AM3_I_BitShift) & 1;
422 static inline unsigned getPBit(uint32_t insn) {
423 return (insn >> ARMII::P_BitShift) & 1;
426 static inline unsigned getUBit(uint32_t insn) {
427 return (insn >> ARMII::U_BitShift) & 1;
430 static inline unsigned getPUBits(uint32_t insn) {
431 return (insn >> ARMII::U_BitShift) & 3;
434 static inline unsigned getSBit(uint32_t insn) {
435 return (insn >> ARMII::S_BitShift) & 1;
438 static inline unsigned getWBit(uint32_t insn) {
439 return (insn >> ARMII::W_BitShift) & 1;
442 static inline unsigned getDBit(uint32_t insn) {
443 return (insn >> ARMII::D_BitShift) & 1;
446 static inline unsigned getNBit(uint32_t insn) {
447 return (insn >> ARMII::N_BitShift) & 1;
450 static inline unsigned getMBit(uint32_t insn) {
451 return (insn >> ARMII::M_BitShift) & 1;
455 // Sign extend 5 bit number x to r.
456 // Usage: int r = signextend<signed int, 5>(x);
457 template <typename T, unsigned B> inline T signextend(const T x) {
463 // See A8.4 Shifts applied to a register.
464 // A8.4.2 Register controlled shifts.
466 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
467 // into llvm enums for shift opcode.
469 // A8-12: DecodeRegShift()
470 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
472 default: assert(0 && "No such value");
473 case 0: return ARM_AM::lsl;
474 case 1: return ARM_AM::lsr;
475 case 2: return ARM_AM::asr;
476 case 3: return ARM_AM::ror;
480 // See A8.4 Shifts applied to a register.
481 // A8.4.1 Constant shifts.
483 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
484 // encodings into the intended ShiftOpc and shift amount.
486 // A8-11: DecodeImmShift()
487 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
488 // If type == 0b11 and imm5 == 0, we have an rrx, instead.
489 if (ShOp == ARM_AM::ror && ShImm == 0)
491 // If (lsr or asr) and imm5 == 0, shift amount is 32.
492 if ((ShOp == ARM_AM::lsr || ShOp == ARM_AM::asr) && ShImm == 0)
496 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
497 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode.
498 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
500 default: assert(0 && "No such value");
501 case 1: return ARM_AM::ia; // P=0 U=1
502 case 3: return ARM_AM::ib; // P=1 U=1
503 case 0: return ARM_AM::da; // P=0 U=0
504 case 2: return ARM_AM::db; // P=1 U=0
508 ////////////////////////////////////////////
510 // Disassemble function definitions //
512 ////////////////////////////////////////////
514 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
515 /// instr into a list of MCOperands in the appropriate order, with possible dst,
516 /// followed by possible src(s).
518 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
519 /// the CPSR, is factored into ARMBasicMCBuilder's class method named
520 /// TryPredicateAndSBitModifier.
522 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
523 unsigned short NumOps, unsigned &NumOpsAdded) {
525 if (Opcode == ARM::Int_MemBarrierV7 || Opcode == ARM::Int_SyncBarrierV7)
528 assert(0 && "Unexpected pseudo instruction!");
532 // Multiply Instructions.
533 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
534 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
536 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
537 // Rd{19-16} Rn{3-0} Rm{11-8}
539 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
540 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
542 // The mapping of the multiply registers to the "regular" ARM registers, where
543 // there are convenience decoder functions, is:
549 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
550 unsigned short NumOps, unsigned &NumOpsAdded) {
552 const TargetInstrDesc &TID = ARMInsts[Opcode];
553 unsigned short NumDefs = TID.getNumDefs();
554 const TargetOperandInfo *OpInfo = TID.OpInfo;
555 unsigned &OpIdx = NumOpsAdded;
559 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
561 && OpInfo[0].RegClass == ARM::GPRRegClassID
562 && OpInfo[1].RegClass == ARM::GPRRegClassID
563 && OpInfo[2].RegClass == ARM::GPRRegClassID);
565 // Instructions with two destination registers have RdLo{15-12} first.
567 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID);
568 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
573 // The destination register: RdHi{19-16} or Rd{19-16}.
574 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
577 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
578 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
580 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
584 // Many multiply instructions (e.g., MLA) have three src registers.
585 // The third register operand is Ra{15-12}.
586 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
587 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
595 // Helper routines for disassembly of coprocessor instructions.
597 static bool LdStCopOpcode(unsigned Opcode) {
598 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
599 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
603 static bool CoprocessorOpcode(unsigned Opcode) {
604 if (LdStCopOpcode(Opcode))
610 case ARM::CDP: case ARM::CDP2:
611 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
612 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
616 static inline unsigned GetCoprocessor(uint32_t insn) {
617 return slice(insn, 11, 8);
619 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
620 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
622 static inline unsigned GetCopOpc2(uint32_t insn) {
623 return slice(insn, 7, 5);
625 static inline unsigned GetCopOpc(uint32_t insn) {
626 return slice(insn, 7, 4);
628 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
631 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
633 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
635 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
637 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
639 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
643 // LDC_OPTION: cop CRd Rn imm8
645 // STC_OPTION: cop CRd Rn imm8
648 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
649 unsigned short NumOps, unsigned &NumOpsAdded) {
653 unsigned &OpIdx = NumOpsAdded;
654 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
655 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
656 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
657 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
658 bool LdStCop = LdStCopOpcode(Opcode);
662 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
665 // Unindex if P:W = 0b00 --> _OPTION variant
666 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
668 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
670 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
674 MI.addOperand(MCOperand::CreateReg(0));
675 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
676 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
678 MI.addOperand(MCOperand::CreateImm(Offset));
681 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
685 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
686 : GetCopOpc1(insn, NoGPR)));
688 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
689 : MCOperand::CreateReg(
690 getRegisterEnum(ARM::GPRRegClassID,
693 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
694 getRegisterEnum(ARM::GPRRegClassID,
696 : MCOperand::CreateImm(decodeRn(insn)));
698 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
703 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
711 // Branch Instructions.
712 // BLr9: SignExtend(Imm24:'00', 32)
713 // Bcc, BLr9_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
714 // SMC: ZeroExtend(imm4, 32)
715 // SVC: ZeroExtend(Imm24, 32)
717 // Various coprocessor instructions are assigned BrFrm arbitrarily.
718 // Delegates to DisassembleCoprocessor() helper function.
721 // MSR/MSRsys: Rm mask=Inst{19-16}
723 // MSRi/MSRsysi: so_imm
724 // SRSW/SRS: addrmode4:$addr mode_imm
725 // RFEW/RFE: addrmode4:$addr Rn
726 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
727 unsigned short NumOps, unsigned &NumOpsAdded) {
729 if (CoprocessorOpcode(Opcode))
730 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded);
732 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
734 // MRS and MRSsys take one GPR reg Rd.
735 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
736 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID);
737 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
742 // BXJ takes one GPR reg Rm.
743 if (Opcode == ARM::BXJ) {
744 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID);
745 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
750 // MSR and MSRsys take one GPR reg Rm, followed by the mask.
751 if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
752 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID);
753 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
755 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
759 // MSRi and MSRsysi take one so_imm operand, followed by the mask.
760 if (Opcode == ARM::MSRi || Opcode == ARM::MSRsysi) {
761 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
762 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
763 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
764 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
765 unsigned Imm = insn & 0xFF;
766 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
767 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
771 // SRSW and SRS requires addrmode4:$addr for ${addr:submode}, followed by the
772 // mode immediate (Inst{4-0}).
773 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
774 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
775 // ARMInstPrinter::printAddrMode4Operand() prints special mode string
776 // if the base register is SP; so don't set ARM::SP.
777 MI.addOperand(MCOperand::CreateReg(0));
778 bool WB = (Opcode == ARM::SRSW);
779 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
780 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode, WB)));
782 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
783 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
785 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
791 assert(Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
792 || Opcode == ARM::SMC || Opcode == ARM::SVC);
794 assert(NumOps >= 1 && OpInfo[0].RegClass == 0);
797 if (Opcode == ARM::SMC) {
798 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
799 Imm32 = slice(insn, 3, 0);
800 } else if (Opcode == ARM::SVC) {
801 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
802 Imm32 = slice(insn, 23, 0);
804 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
805 unsigned Imm26 = slice(insn, 23, 0) << 2;
806 Imm32 = signextend<signed int, 26>(Imm26);
808 // When executing an ARM instruction, PC reads as the address of the current
809 // instruction plus 8. The assembler subtracts 8 from the difference
810 // between the branch instruction and the target address, disassembler has
811 // to add 8 to compensate.
815 MI.addOperand(MCOperand::CreateImm(Imm32));
821 // Misc. Branch Instructions.
822 // BR_JTadd, BR_JTr, BR_JTm
825 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
826 unsigned short NumOps, unsigned &NumOpsAdded) {
828 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
829 unsigned &OpIdx = NumOpsAdded;
833 // BX_RET has only two predicate operands, do an early return.
834 if (Opcode == ARM::BX_RET)
837 // BLXr9 and BRIND take one GPR reg.
838 if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
839 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
840 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
846 // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
847 if (Opcode == ARM::BR_JTadd) {
848 // InOperandList with GPR:$target and GPR:$idx regs.
851 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
853 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
856 // Fill in the two remaining imm operands to signify build completion.
857 MI.addOperand(MCOperand::CreateImm(0));
858 MI.addOperand(MCOperand::CreateImm(0));
864 // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
865 if (Opcode == ARM::BR_JTr) {
866 // InOperandList with GPR::$target reg.
869 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
872 // Fill in the two remaining imm operands to signify build completion.
873 MI.addOperand(MCOperand::CreateImm(0));
874 MI.addOperand(MCOperand::CreateImm(0));
880 // BR_JTm is an LDR with Rt = PC.
881 if (Opcode == ARM::BR_JTm) {
882 // This is the reg/reg form, with base reg followed by +/- reg shop imm.
883 // See also ARMAddressingModes.h (Addressing Mode #2).
885 assert(NumOps == 5 && getIBit(insn) == 1);
886 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
889 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
891 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
892 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
894 // Inst{6-5} encodes the shift opcode.
895 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
896 // Inst{11-7} encodes the imm5 shift amount.
897 unsigned ShImm = slice(insn, 11, 7);
899 // A8.4.1. Possible rrx or shift amount of 32...
900 getImmShiftSE(ShOp, ShImm);
901 MI.addOperand(MCOperand::CreateImm(
902 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
904 // Fill in the two remaining imm operands to signify build completion.
905 MI.addOperand(MCOperand::CreateImm(0));
906 MI.addOperand(MCOperand::CreateImm(0));
912 assert(0 && "Unexpected BrMiscFrm Opcode");
916 static inline uint32_t getBFCInvMask(uint32_t insn) {
917 uint32_t lsb = slice(insn, 11, 7);
918 uint32_t msb = slice(insn, 20, 16);
920 assert(lsb <= msb && "Encoding error: lsb > msb");
921 for (uint32_t i = lsb; i <= msb; ++i)
926 static inline bool SaturateOpcode(unsigned Opcode) {
928 case ARM::SSATlsl: case ARM::SSATasr: case ARM::SSAT16:
929 case ARM::USATlsl: case ARM::USATasr: case ARM::USAT16:
936 static inline unsigned decodeSaturatePos(unsigned Opcode, uint32_t insn) {
940 return slice(insn, 20, 16) + 1;
942 return slice(insn, 19, 16) + 1;
945 return slice(insn, 20, 16);
947 return slice(insn, 19, 16);
949 llvm_unreachable("Invalid opcode passed in");
954 // A major complication is the fact that some of the saturating add/subtract
955 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
956 // They are QADD, QDADD, QDSUB, and QSUB.
957 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
958 unsigned short NumOps, unsigned &NumOpsAdded) {
960 const TargetInstrDesc &TID = ARMInsts[Opcode];
961 unsigned short NumDefs = TID.getNumDefs();
962 bool isUnary = isUnaryDP(TID.TSFlags);
963 const TargetOperandInfo *OpInfo = TID.OpInfo;
964 unsigned &OpIdx = NumOpsAdded;
968 // Disassemble register def if there is one.
969 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
970 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
975 // Now disassemble the src operands.
979 // SSAT/SSAT16/USAT/USAT16 has imm operand after Rd.
980 if (SaturateOpcode(Opcode)) {
981 MI.addOperand(MCOperand::CreateImm(decodeSaturatePos(Opcode, insn)));
983 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
986 if (Opcode == ARM::SSAT16 || Opcode == ARM::USAT16) {
991 // For SSAT operand reg (Rm) has been disassembled above.
992 // Now disassemble the shift amount.
994 // Inst{11-7} encodes the imm5 shift amount.
995 unsigned ShAmt = slice(insn, 11, 7);
997 // A8.6.183. Possible ASR shift amount of 32...
998 if (Opcode == ARM::SSATasr && ShAmt == 0)
1001 MI.addOperand(MCOperand::CreateImm(ShAmt));
1007 // Special-case handling of BFC/BFI/SBFX/UBFX.
1008 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
1009 // TIED_TO operand skipped for BFC and Inst{3-0} (Reg) for BFI.
1010 MI.addOperand(MCOperand::CreateReg(Opcode == ARM::BFC ? 0
1011 : getRegisterEnum(ARM::GPRRegClassID,
1013 MI.addOperand(MCOperand::CreateImm(getBFCInvMask(insn)));
1017 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
1018 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1020 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
1021 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
1026 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
1027 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
1029 // BinaryDP has an Rn operand.
1031 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1032 MI.addOperand(MCOperand::CreateReg(
1033 getRegisterEnum(ARM::GPRRegClassID,
1034 RmRn ? decodeRm(insn) : decodeRn(insn))));
1038 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1039 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1040 MI.addOperand(MCOperand::CreateReg(0));
1044 // Now disassemble operand 2.
1045 if (OpIdx >= NumOps)
1048 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1049 // We have a reg/reg form.
1050 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1051 // routed here as well.
1052 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1053 MI.addOperand(MCOperand::CreateReg(
1054 getRegisterEnum(ARM::GPRRegClassID,
1055 RmRn? decodeRn(insn) : decodeRm(insn))));
1057 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1058 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1059 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1060 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1061 MI.addOperand(MCOperand::CreateImm(Imm16));
1064 // We have a reg/imm form.
1065 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1066 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1067 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1068 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1069 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1070 unsigned Imm = insn & 0xFF;
1071 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1078 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1079 unsigned short NumOps, unsigned &NumOpsAdded) {
1081 const TargetInstrDesc &TID = ARMInsts[Opcode];
1082 unsigned short NumDefs = TID.getNumDefs();
1083 bool isUnary = isUnaryDP(TID.TSFlags);
1084 const TargetOperandInfo *OpInfo = TID.OpInfo;
1085 unsigned &OpIdx = NumOpsAdded;
1089 // Disassemble register def if there is one.
1090 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1091 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1096 // Disassemble the src operands.
1097 if (OpIdx >= NumOps)
1100 // BinaryDP has an Rn operand.
1102 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1103 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1108 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1109 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1110 MI.addOperand(MCOperand::CreateReg(0));
1114 // Disassemble operand 2, which consists of three components.
1115 if (OpIdx + 2 >= NumOps)
1118 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1119 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1120 (OpInfo[OpIdx+2].RegClass == 0));
1122 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1123 unsigned Rs = slice(insn, 4, 4);
1125 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1128 // Register-controlled shifts: [Rm, Rs, shift].
1129 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1131 // Inst{6-5} encodes the shift opcode.
1132 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1133 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1135 // Constant shifts: [Rm, reg0, shift_imm].
1136 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1137 // Inst{6-5} encodes the shift opcode.
1138 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1139 // Inst{11-7} encodes the imm5 shift amount.
1140 unsigned ShImm = slice(insn, 11, 7);
1142 // A8.4.1. Possible rrx or shift amount of 32...
1143 getImmShiftSE(ShOp, ShImm);
1144 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1151 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1152 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
1154 const TargetInstrDesc &TID = ARMInsts[Opcode];
1155 unsigned short NumDefs = TID.getNumDefs();
1156 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1157 const TargetOperandInfo *OpInfo = TID.OpInfo;
1158 unsigned &OpIdx = NumOpsAdded;
1162 assert((!isStore && NumDefs > 0) || (isStore && (NumDefs == 0 || isPrePost)));
1164 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1165 if (isPrePost && isStore) {
1166 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1167 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1172 // Disassemble the dst/src operand.
1173 if (OpIdx >= NumOps)
1176 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1177 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1181 // After dst of a pre- and post-indexed load is the address base writeback.
1182 if (isPrePost && !isStore) {
1183 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1184 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1189 // Disassemble the base operand.
1190 if (OpIdx >= NumOps)
1193 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1194 assert(!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1));
1195 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1199 // For reg/reg form, base reg is followed by +/- reg shop imm.
1200 // For immediate form, it is followed by +/- imm12.
1201 // See also ARMAddressingModes.h (Addressing Mode #2).
1202 if (OpIdx + 1 >= NumOps)
1205 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1206 (OpInfo[OpIdx+1].RegClass == 0));
1208 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1209 if (getIBit(insn) == 0) {
1210 MI.addOperand(MCOperand::CreateReg(0));
1212 // Disassemble the 12-bit immediate offset.
1213 unsigned Imm12 = slice(insn, 11, 0);
1214 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift);
1215 MI.addOperand(MCOperand::CreateImm(Offset));
1217 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1218 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1220 // Inst{6-5} encodes the shift opcode.
1221 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1222 // Inst{11-7} encodes the imm5 shift amount.
1223 unsigned ShImm = slice(insn, 11, 7);
1225 // A8.4.1. Possible rrx or shift amount of 32...
1226 getImmShiftSE(ShOp, ShImm);
1227 MI.addOperand(MCOperand::CreateImm(
1228 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
1235 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1236 unsigned short NumOps, unsigned &NumOpsAdded) {
1237 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
1240 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1241 unsigned short NumOps, unsigned &NumOpsAdded) {
1242 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
1245 static bool HasDualReg(unsigned Opcode) {
1249 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1250 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1255 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1256 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
1258 const TargetInstrDesc &TID = ARMInsts[Opcode];
1259 unsigned short NumDefs = TID.getNumDefs();
1260 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1261 const TargetOperandInfo *OpInfo = TID.OpInfo;
1262 unsigned &OpIdx = NumOpsAdded;
1266 assert((!isStore && NumDefs > 0) || (isStore && (NumDefs == 0 || isPrePost)));
1268 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1269 if (isPrePost && isStore) {
1270 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1271 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1276 bool DualReg = HasDualReg(Opcode);
1278 // Disassemble the dst/src operand.
1279 if (OpIdx >= NumOps)
1282 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1283 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1287 // Fill in LDRD and STRD's second operand.
1289 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1290 decodeRd(insn) + 1)));
1294 // After dst of a pre- and post-indexed load is the address base writeback.
1295 if (isPrePost && !isStore) {
1296 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1297 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1302 // Disassemble the base operand.
1303 if (OpIdx >= NumOps)
1306 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
1307 assert(!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1));
1308 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1312 // For reg/reg form, base reg is followed by +/- reg.
1313 // For immediate form, it is followed by +/- imm8.
1314 // See also ARMAddressingModes.h (Addressing Mode #3).
1315 if (OpIdx + 1 >= NumOps)
1318 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1319 (OpInfo[OpIdx+1].RegClass == 0));
1321 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1322 if (getAM3IBit(insn) == 1) {
1323 MI.addOperand(MCOperand::CreateReg(0));
1325 // Disassemble the 8-bit immediate offset.
1326 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1327 unsigned Imm4L = insn & 0xF;
1328 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L);
1329 MI.addOperand(MCOperand::CreateImm(Offset));
1331 // Disassemble the offset reg (Rm).
1332 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1334 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0);
1335 MI.addOperand(MCOperand::CreateImm(Offset));
1342 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1343 unsigned short NumOps, unsigned &NumOpsAdded) {
1344 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
1347 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1348 unsigned short NumOps, unsigned &NumOpsAdded) {
1349 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
1352 // The algorithm for disassembly of LdStMulFrm is different from others because
1353 // it explicitly populates the two predicate operands after operand 0 (the base)
1354 // and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
1355 // reglist with each affected register encoded as an MCOperand.
1356 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1357 unsigned short NumOps, unsigned &NumOpsAdded) {
1359 assert(NumOps == 5 && "LdStMulFrm expects NumOps of 5");
1361 unsigned &OpIdx = NumOpsAdded;
1363 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1364 MI.addOperand(MCOperand::CreateReg(Base));
1366 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1367 bool WB = getWBit(insn) == 1;
1368 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode, WB)));
1370 // Handling the two predicate operands before the reglist.
1371 int64_t CondVal = insn >> ARMII::CondShift;
1372 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1373 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1377 // Fill the variadic part of reglist.
1378 unsigned RegListBits = insn & ((1 << 16) - 1);
1379 for (unsigned i = 0; i < 16; ++i) {
1380 if ((RegListBits >> i) & 1) {
1381 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1390 // LDREX, LDREXB, LDREXH: Rd Rn
1391 // LDREXD: Rd Rd+1 Rn
1392 // STREX, STREXB, STREXH: Rd Rm Rn
1393 // STREXD: Rd Rm Rm+1 Rn
1395 // SWP, SWPB: Rd Rm Rn
1396 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1397 unsigned short NumOps, unsigned &NumOpsAdded) {
1399 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1400 unsigned &OpIdx = NumOpsAdded;
1405 && OpInfo[0].RegClass == ARM::GPRRegClassID
1406 && OpInfo[1].RegClass == ARM::GPRRegClassID);
1408 bool isStore = slice(insn, 20, 20) == 0;
1409 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1411 // Add the destination operand.
1412 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1416 // Store register Exclusive needs a source operand.
1418 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1423 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1424 decodeRm(insn)+1)));
1428 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1429 decodeRd(insn)+1)));
1433 // Finally add the pointer operand.
1434 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1441 // Misc. Arithmetic Instructions.
1443 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1444 // RBIT, REV, REV16, REVSH: Rd Rm
1445 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1446 unsigned short NumOps, unsigned &NumOpsAdded) {
1448 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1449 unsigned &OpIdx = NumOpsAdded;
1454 && OpInfo[0].RegClass == ARM::GPRRegClassID
1455 && OpInfo[1].RegClass == ARM::GPRRegClassID);
1457 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1459 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1464 assert(NumOps >= 4);
1465 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1470 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1474 // If there is still an operand info left which is an immediate operand, add
1475 // an additional imm5 LSL/ASR operand.
1476 if (ThreeReg && OpInfo[OpIdx].RegClass == 0
1477 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1478 // Extract the 5-bit immediate field Inst{11-7}.
1479 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1480 MI.addOperand(MCOperand::CreateImm(ShiftAmt));
1487 // Extend instructions.
1488 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1489 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1490 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1491 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1492 unsigned short NumOps, unsigned &NumOpsAdded) {
1494 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1495 unsigned &OpIdx = NumOpsAdded;
1500 && OpInfo[0].RegClass == ARM::GPRRegClassID
1501 && OpInfo[1].RegClass == ARM::GPRRegClassID);
1503 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1505 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1510 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1515 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1519 // If there is still an operand info left which is an immediate operand, add
1520 // an additional rotate immediate operand.
1521 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
1522 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1523 // Extract the 2-bit rotate field Inst{11-10}.
1524 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1525 // Rotation by 8, 16, or 24 bits.
1526 MI.addOperand(MCOperand::CreateImm(rot << 3));
1533 /////////////////////////////////////
1535 // Utility Functions For VFP //
1537 /////////////////////////////////////
1539 // Extract/Decode Dd/Sd:
1541 // SP => d = UInt(Vd:D)
1542 // DP => d = UInt(D:Vd)
1543 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1544 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1545 : (decodeRd(insn) | getDBit(insn) << 4);
1548 // Extract/Decode Dn/Sn:
1550 // SP => n = UInt(Vn:N)
1551 // DP => n = UInt(N:Vn)
1552 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1553 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1554 : (decodeRn(insn) | getNBit(insn) << 4);
1557 // Extract/Decode Dm/Sm:
1559 // SP => m = UInt(Vm:M)
1560 // DP => m = UInt(M:Vm)
1561 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1562 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1563 : (decodeRm(insn) | getMBit(insn) << 4);
1568 static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
1569 assert(N == 32 || N == 64);
1572 unsigned bit6 = slice(byte, 6, 6);
1574 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1576 Result |= 0x1f << 25;
1578 Result |= 0x1 << 30;
1580 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1581 (uint64_t)slice(byte, 5, 0) << 48;
1583 Result |= 0xffL << 54;
1585 Result |= 0x1L << 62;
1591 // VFP Unary Format Instructions:
1593 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1594 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1595 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1596 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1597 unsigned short NumOps, unsigned &NumOpsAdded) {
1599 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1601 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1602 unsigned &OpIdx = NumOpsAdded;
1606 unsigned RegClass = OpInfo[OpIdx].RegClass;
1607 assert(RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID);
1608 bool isSP = (RegClass == ARM::SPRRegClassID);
1610 MI.addOperand(MCOperand::CreateReg(
1611 getRegisterEnum(RegClass, decodeVFPRd(insn, isSP))));
1614 // Early return for compare with zero instructions.
1615 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1616 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1619 RegClass = OpInfo[OpIdx].RegClass;
1620 assert(RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID);
1621 isSP = (RegClass == ARM::SPRRegClassID);
1623 MI.addOperand(MCOperand::CreateReg(
1624 getRegisterEnum(RegClass, decodeVFPRm(insn, isSP))));
1630 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1631 // Some of them have operand constraints which tie the first operand in the
1632 // InOperandList to that of the dst. As far as asm printing is concerned, this
1633 // tied_to operand is simply skipped.
1634 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1635 unsigned short NumOps, unsigned &NumOpsAdded) {
1637 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1639 const TargetInstrDesc &TID = ARMInsts[Opcode];
1640 const TargetOperandInfo *OpInfo = TID.OpInfo;
1641 unsigned &OpIdx = NumOpsAdded;
1645 unsigned RegClass = OpInfo[OpIdx].RegClass;
1646 assert(RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID);
1647 bool isSP = (RegClass == ARM::SPRRegClassID);
1649 MI.addOperand(MCOperand::CreateReg(
1650 getRegisterEnum(RegClass, decodeVFPRd(insn, isSP))));
1653 // Skip tied_to operand constraint.
1654 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1655 assert(NumOps >= 4);
1656 MI.addOperand(MCOperand::CreateReg(0));
1660 MI.addOperand(MCOperand::CreateReg(
1661 getRegisterEnum(RegClass, decodeVFPRn(insn, isSP))));
1664 MI.addOperand(MCOperand::CreateReg(
1665 getRegisterEnum(RegClass, decodeVFPRm(insn, isSP))));
1671 // A8.6.295 vcvt (floating-point <-> integer)
1672 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1673 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1675 // A8.6.297 vcvt (floating-point and fixed-point)
1676 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1677 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1678 unsigned short NumOps, unsigned &NumOpsAdded) {
1680 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1682 const TargetInstrDesc &TID = ARMInsts[Opcode];
1683 const TargetOperandInfo *OpInfo = TID.OpInfo;
1685 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1686 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1687 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1691 assert(NumOps >= 3);
1692 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1693 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1694 MI.addOperand(MCOperand::CreateReg(
1695 getRegisterEnum(RegClassID,
1696 decodeVFPRd(insn, SP))));
1698 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1);
1699 MI.addOperand(MI.getOperand(0));
1701 assert(OpInfo[2].RegClass == 0 && !OpInfo[2].isPredicate() &&
1702 !OpInfo[2].isOptionalDef());
1703 MI.addOperand(MCOperand::CreateImm(fbits));
1708 // The Rd (destination) and Rm (source) bits have different interpretations
1709 // depending on their single-precisonness.
1711 if (slice(insn, 18, 18) == 1) { // to_integer operation
1712 d = decodeVFPRd(insn, true /* Is Single Precision */);
1713 MI.addOperand(MCOperand::CreateReg(
1714 getRegisterEnum(ARM::SPRRegClassID, d)));
1715 m = decodeVFPRm(insn, SP);
1716 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, m)));
1718 d = decodeVFPRd(insn, SP);
1719 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, d)));
1720 m = decodeVFPRm(insn, true /* Is Single Precision */);
1721 MI.addOperand(MCOperand::CreateReg(
1722 getRegisterEnum(ARM::SPRRegClassID, m)));
1730 // VMOVRS - A8.6.330
1731 // Rt => Rd; Sn => UInt(Vn:N)
1732 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1733 unsigned short NumOps, unsigned &NumOpsAdded) {
1735 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1737 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1739 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1740 decodeVFPRn(insn, true))));
1745 // VMOVRRD - A8.6.332
1746 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1748 // VMOVRRS - A8.6.331
1749 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1750 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1751 unsigned short NumOps, unsigned &NumOpsAdded) {
1753 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1755 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1756 unsigned &OpIdx = NumOpsAdded;
1758 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1760 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1764 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1765 unsigned Sm = decodeVFPRm(insn, true);
1766 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1768 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1772 MI.addOperand(MCOperand::CreateReg(
1773 getRegisterEnum(ARM::DPRRegClassID,
1774 decodeVFPRm(insn, false))));
1780 // VMOVSR - A8.6.330
1781 // Rt => Rd; Sn => UInt(Vn:N)
1782 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1783 unsigned short NumOps, unsigned &NumOpsAdded) {
1785 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1787 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1788 decodeVFPRn(insn, true))));
1789 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1795 // VMOVDRR - A8.6.332
1796 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1798 // VMOVRRS - A8.6.331
1799 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1800 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1801 unsigned short NumOps, unsigned &NumOpsAdded) {
1803 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1805 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1806 unsigned &OpIdx = NumOpsAdded;
1810 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1811 unsigned Sm = decodeVFPRm(insn, true);
1812 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1814 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1818 MI.addOperand(MCOperand::CreateReg(
1819 getRegisterEnum(ARM::DPRRegClassID,
1820 decodeVFPRm(insn, false))));
1824 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1826 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1832 // VFP Load/Store Instructions.
1833 // VLDRD, VLDRS, VSTRD, VSTRS
1834 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1835 unsigned short NumOps, unsigned &NumOpsAdded) {
1837 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1839 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS) ? true : false;
1840 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1842 // Extract Dd/Sd for operand 0.
1843 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1845 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, RegD)));
1847 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1848 MI.addOperand(MCOperand::CreateReg(Base));
1850 // Next comes the AM5 Opcode.
1851 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1852 unsigned char Imm8 = insn & 0xFF;
1853 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1860 // VFP Load/Store Multiple Instructions.
1861 // This is similar to the algorithm for LDM/STM in that operand 0 (the base) and
1862 // operand 1 (the AM5 mode imm) is followed by two predicate operands. It is
1863 // followed by a reglist of either DPR(s) or SPR(s).
1865 // VLDMD, VLDMS, VSTMD, VSTMS
1866 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1867 unsigned short NumOps, unsigned &NumOpsAdded) {
1869 assert(NumOps == 5 && "VFPLdStMulFrm expects NumOps of 5");
1871 unsigned &OpIdx = NumOpsAdded;
1873 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1874 MI.addOperand(MCOperand::CreateReg(Base));
1876 // Next comes the AM5 Opcode.
1877 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1878 bool WB = getWBit(insn) == 1;
1879 unsigned char Imm8 = insn & 0xFF;
1880 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, WB, Imm8)));
1882 // Handling the two predicate operands before the reglist.
1883 int64_t CondVal = insn >> ARMII::CondShift;
1884 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1885 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1889 bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VSTMS) ? true : false;
1890 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1893 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1895 // Fill the variadic part of reglist.
1896 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1897 for (unsigned i = 0; i < Regs; ++i) {
1898 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID,
1906 // Misc. VFP Instructions.
1907 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1908 // FCONSTD (DPR and a VFPf64Imm operand)
1909 // FCONSTS (SPR and a VFPf32Imm operand)
1910 // VMRS/VMSR (GPR operand)
1911 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1912 unsigned short NumOps, unsigned &NumOpsAdded) {
1914 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1915 unsigned &OpIdx = NumOpsAdded;
1919 if (Opcode == ARM::FMSTAT)
1922 assert(NumOps >= 2);
1924 unsigned RegEnum = 0;
1925 switch (OpInfo[0].RegClass) {
1926 case ARM::DPRRegClassID:
1927 RegEnum = getRegisterEnum(ARM::DPRRegClassID, decodeVFPRd(insn, false));
1929 case ARM::SPRRegClassID:
1930 RegEnum = getRegisterEnum(ARM::SPRRegClassID, decodeVFPRd(insn, true));
1932 case ARM::GPRRegClassID:
1933 RegEnum = getRegisterEnum(ARM::GPRRegClassID, decodeRd(insn));
1936 llvm_unreachable("Invalid reg class id");
1939 MI.addOperand(MCOperand::CreateReg(RegEnum));
1942 // Extract/decode the f64/f32 immediate.
1943 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
1944 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1945 // The asm syntax specifies the before-expanded <imm>.
1946 // Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
1947 // Opcode == ARM::FCONSTD ? 64 : 32)
1948 MI.addOperand(MCOperand::CreateImm(slice(insn,19,16)<<4 | slice(insn,3,0)));
1955 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.cpp.inc file.
1956 #include "ThumbDisassemblerCore.cpp.inc"
1958 /////////////////////////////////////////////////////
1960 // Utility Functions For ARM Advanced SIMD //
1962 /////////////////////////////////////////////////////
1964 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1965 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1967 // A7.3 Register encoding
1969 // Extract/Decode NEON D/Vd:
1971 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
1972 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
1973 // handling it in the getRegisterEnum() utility function.
1974 // D = Inst{22}, Vd = Inst{15-12}
1975 static unsigned decodeNEONRd(uint32_t insn) {
1976 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
1977 | (insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask;
1980 // Extract/Decode NEON N/Vn:
1982 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
1983 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
1984 // handling it in the getRegisterEnum() utility function.
1985 // N = Inst{7}, Vn = Inst{19-16}
1986 static unsigned decodeNEONRn(uint32_t insn) {
1987 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
1988 | (insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask;
1991 // Extract/Decode NEON M/Vm:
1993 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
1994 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
1995 // handling it in the getRegisterEnum() utility function.
1996 // M = Inst{5}, Vm = Inst{3-0}
1997 static unsigned decodeNEONRm(uint32_t insn) {
1998 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
1999 | (insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask;
2010 } // End of unnamed namespace
2012 // size field -> Inst{11-10}
2013 // index_align field -> Inst{7-4}
2015 // The Lane Index interpretation depends on the Data Size:
2016 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2017 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2018 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2020 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2021 static unsigned decodeLaneIndex(uint32_t insn) {
2022 unsigned size = insn >> 10 & 3;
2023 assert(size == 0 || size == 1 || size == 2);
2025 unsigned index_align = insn >> 4 & 0xF;
2026 return (index_align >> 1) >> size;
2029 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2030 // op = Inst{5}, cmode = Inst{11-8}
2031 // i = Inst{24} (ARM architecture)
2032 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2033 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2034 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2035 unsigned char cmode = (insn >> 8) & 0xF;
2036 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2037 ((insn >> 16) & 7) << 4 |
2046 Imm64 = Imm8 << 8*(cmode >> 1 & 1);
2050 Imm64 = (Imm8 << 8) | 0xFF;
2051 else if (cmode == 13)
2052 Imm64 = (Imm8 << 16) | 0xFFFF;
2054 // Imm8 to be shifted left by how many bytes...
2055 Imm64 = Imm8 << 8*(cmode >> 1 & 3);
2060 for (unsigned i = 0; i < 8; ++i)
2061 if ((Imm8 >> i) & 1)
2062 Imm64 |= 0xFF << 8*i;
2066 assert(0 && "Unreachable code!");
2073 // A8.6.339 VMUL, VMULL (by scalar)
2074 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2075 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2076 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2083 assert(0 && "Unreachable code!");
2088 // A8.6.339 VMUL, VMULL (by scalar)
2089 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2090 // ESize32 => index = Inst{5} (M) D0-D15
2091 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2094 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2096 return (insn >> 5) & 1;
2098 assert(0 && "Unreachable code!");
2103 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2104 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2105 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2106 return 64 - ((insn >> 16) & 0x3F);
2109 // A8.6.302 VDUP (scalar)
2110 // ESize8 => index = Inst{19-17}
2111 // ESize16 => index = Inst{19-18}
2112 // ESize32 => index = Inst{19}
2113 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2116 return (insn >> 17) & 7;
2118 return (insn >> 18) & 3;
2120 return (insn >> 19) & 1;
2122 assert(0 && "Unspecified element size!");
2127 // A8.6.328 VMOV (ARM core register to scalar)
2128 // A8.6.329 VMOV (scalar to ARM core register)
2129 // ESize8 => index = Inst{21:6-5}
2130 // ESize16 => index = Inst{21:6}
2131 // ESize32 => index = Inst{21}
2132 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2135 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2137 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2139 return ((insn >> 21) & 1);
2141 assert(0 && "Unspecified element size!");
2146 // Imm6 = Inst{21-16}, L = Inst{7}
2148 // NormalShift == true (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2150 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2151 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2152 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2153 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2155 // NormalShift == false (A8.6.367 VQSHL, A8.6.387 VSLI):
2157 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2158 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2159 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2160 // '1xxxxxx' => esize = 64; shift_amount = imm6
2162 static unsigned decodeNVSAmt(uint32_t insn, bool NormalShift) {
2163 ElemSize esize = ESizeNA;
2164 unsigned L = (insn >> 7) & 1;
2165 unsigned imm6 = (insn >> 16) & 0x3F;
2169 else if (imm6 >> 4 == 1)
2171 else if (imm6 >> 5 == 1)
2174 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2179 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2181 return esize == ESize64 ? imm6 : (imm6 - esize);
2185 // Imm4 = Inst{11-8}
2186 static unsigned decodeN3VImm(uint32_t insn) {
2187 return (insn >> 8) & 0xF;
2190 static bool DisassembleNSFormatNone(MCInst &MI, unsigned Opcode, uint32_t insn,
2191 unsigned short NumOps, unsigned &NumOpsAdded) {
2192 assert(0 && "Unexpected NEON Sub-Format of NSFormatNone");
2197 // D[d] D[d2] ... R[addr] [TIED_TO] R[update] AM6 align(ignored)
2199 // D[d] D[d2] ... R[addr] R[update] AM6 align(ignored) TIED_TO ... imm(idx)
2201 // R[addr] [TIED_TO] R[update] AM6 align(ignored) D[d] D[d2] ...
2203 // R[addr] R[update] AM6 align(ignored) D[d] D[d2] ... [imm(idx)]
2205 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2206 static bool DisassembleVLDSTLane0(MCInst &MI, unsigned Opcode, uint32_t insn,
2207 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced) {
2209 const TargetInstrDesc &TID = ARMInsts[Opcode];
2210 const TargetOperandInfo *OpInfo = TID.OpInfo;
2212 // At least one DPR register plus addressing mode #6.
2213 assert(NumOps >= 5);
2215 unsigned &OpIdx = NumOpsAdded;
2219 // We have homogeneous NEON registers for Load/Store.
2220 unsigned RegClass = 0;
2222 // Double-spaced registers have increments of 2.
2223 unsigned Inc = DblSpaced ? 2 : 1;
2225 unsigned Rn = decodeRn(insn);
2226 unsigned Rm = decodeRm(insn);
2227 unsigned Rd = decodeNEONRd(insn);
2229 // A7.7.1 Advanced SIMD addressing mode.
2232 // LLVM Addressing Mode #6.
2233 unsigned RmEnum = 0;
2235 RmEnum = getRegisterEnum(ARM::GPRRegClassID, Rm);
2238 // Consume AddrMode6 (possible TIED_TO Rn), the DPR/QPR's, then possible
2240 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID);
2241 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2244 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2246 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2251 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
2252 MI.addOperand(MCOperand::CreateReg(RmEnum));
2254 assert(OpIdx < NumOps &&
2255 OpInfo[OpIdx].RegClass == 0 && OpInfo[OpIdx+1].RegClass == 0);
2256 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM6Opc(WB)));
2257 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2260 assert(OpIdx < NumOps &&
2261 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2262 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID));
2264 RegClass = OpInfo[OpIdx].RegClass;
2265 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2266 if (Opcode >= ARM::VST1q16 && Opcode <= ARM::VST1q8)
2267 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd,true)));
2269 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd)));
2274 // Handle possible lane index.
2275 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2276 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2277 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2282 // Consume the DPR/QPR's, AddrMode6 (possible TIED_TO Rn), possible TIED_TO
2283 // DPR/QPR's (ignored), then possible lane index.
2284 RegClass = OpInfo[0].RegClass;
2286 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2287 if (Opcode >= ARM::VLD1q16 && Opcode <= ARM::VLD1q8)
2288 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd,true)));
2290 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd)));
2295 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
2296 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2299 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2301 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2306 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID);
2307 MI.addOperand(MCOperand::CreateReg(RmEnum));
2309 assert(OpIdx < NumOps &&
2310 OpInfo[OpIdx].RegClass == 0 && OpInfo[OpIdx+1].RegClass == 0);
2311 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM6Opc(WB)));
2312 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2315 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2316 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1);
2317 MI.addOperand(MCOperand::CreateReg(0));
2321 // Handle possible lane index.
2322 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2323 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2324 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2333 // If L (Inst{21}) == 0, store instructions.
2334 // DblSpaced = false.
2335 static bool DisassembleVLDSTLane(MCInst &MI, unsigned Opcode, uint32_t insn,
2336 unsigned short NumOps, unsigned &NumOpsAdded) {
2338 return DisassembleVLDSTLane0(MI, Opcode, insn, NumOps, NumOpsAdded,
2339 slice(insn, 21, 21) == 0, false);
2342 // If L (Inst{21}) == 0, store instructions.
2343 // DblSpaced = true.
2344 static bool DisassembleVLDSTLaneDbl(MCInst &MI, unsigned Opcode, uint32_t insn,
2345 unsigned short NumOps, unsigned &NumOpsAdded) {
2347 return DisassembleVLDSTLane0(MI, Opcode, insn, NumOps, NumOpsAdded,
2348 slice(insn, 21, 21) == 0, true);
2351 // VLDRQ (vldmia), VSTRQ (vstmia)
2353 static bool DisassembleVLDSTRQ(MCInst &MI, unsigned Opcode, uint32_t insn,
2354 unsigned short NumOps, unsigned &NumOpsAdded) {
2356 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2358 assert(NumOps >= 3 &&
2359 OpInfo[0].RegClass == ARM::QPRRegClassID &&
2360 OpInfo[1].RegClass == ARM::GPRRegClassID &&
2361 OpInfo[2].RegClass == 0);
2363 // Qd = Inst{22:15-12} => NEON Rd
2364 MI.addOperand(MCOperand::CreateReg(
2365 getRegisterEnum(ARM::QPRRegClassID,
2366 decodeNEONRd(insn), true)));
2368 // Rn = Inst{19-16} => ARM Rn
2369 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2372 // Next comes the AM4 Opcode.
2373 assert(Opcode == ARM::VLDRQ || Opcode == ARM::VSTRQ);
2374 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
2375 bool WB = getWBit(insn) == 1;
2376 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode, WB)));
2384 static bool DisassembleNVdImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2385 unsigned short NumOps, unsigned &NumOpsAdded) {
2387 const TargetInstrDesc &TID = ARMInsts[Opcode];
2388 const TargetOperandInfo *OpInfo = TID.OpInfo;
2390 assert(NumOps >= 2 &&
2391 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2392 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2393 (OpInfo[1].RegClass == 0));
2395 // Qd/Dd = Inst{22:15-12} => NEON Rd
2396 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[0].RegClass,
2397 decodeNEONRd(insn))));
2399 ElemSize esize = ESizeNA;
2402 case ARM::VMOVv16i8:
2405 case ARM::VMOVv4i16:
2406 case ARM::VMOVv8i16:
2409 case ARM::VMOVv2i32:
2410 case ARM::VMOVv4i32:
2413 case ARM::VMOVv1i64:
2414 case ARM::VMOVv2i64:
2417 assert(0 && "Unreachable code!");
2421 // One register and a modified immediate value.
2422 // Add the imm operand.
2423 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2433 N2V_VectorShiftLeftLong,
2434 N2V_VectorConvert_Between_Float_Fixed
2436 } // End of unnamed namespace
2438 // Vector Convert [between floating-point and fixed-point]
2439 // Qd/Dd Qm/Dm [fbits]
2441 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2442 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2445 // Vector Shift Left Long (with maximum shift count) Instructions.
2446 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2448 // Vector Move Long:
2451 // Vector Move Narrow:
2455 static bool DisassembleNVdVmImm0(MCInst &MI, unsigned Opc, uint32_t insn,
2456 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag = N2V_None) {
2458 const TargetInstrDesc &TID = ARMInsts[Opc];
2459 const TargetOperandInfo *OpInfo = TID.OpInfo;
2461 assert(NumOps >= 2 &&
2462 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2463 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2464 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2465 OpInfo[1].RegClass == ARM::QPRRegClassID));
2467 unsigned &OpIdx = NumOpsAdded;
2471 ElemSize esize = ESizeNA;
2472 if (Flag == N2V_VectorShiftLeftLong) {
2473 // VSHLL has maximum shift count as the imm, inferred from its size.
2474 assert(Opc == ARM::VSHLLi16 || Opc == ARM::VSHLLi32 || Opc == ARM::VSHLLi8);
2475 esize = Opc == ARM::VSHLLi8 ? ESize8
2476 : (Opc == ARM::VSHLLi16 ? ESize16
2479 if (Flag == N2V_VectorDupLane) {
2480 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2481 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q);
2482 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2483 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2487 // Qd/Dd = Inst{22:15-12} => NEON Rd
2488 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2489 decodeNEONRd(insn))));
2493 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2495 MI.addOperand(MCOperand::CreateReg(0));
2499 // Dm = Inst{5:3-0} => NEON Rm
2500 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2501 decodeNEONRm(insn))));
2504 // Add the imm operand, if required.
2505 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2506 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2508 unsigned imm = 0xFFFFFFFF;
2510 if (Flag == N2V_VectorShiftLeftLong)
2511 imm = static_cast<unsigned>(esize);
2512 if (Flag == N2V_VectorDupLane)
2513 imm = decodeNVLaneDupIndex(insn, esize);
2514 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2515 imm = decodeVCVTFractionBits(insn);
2517 assert(imm != 0xFFFFFFFF);
2518 MI.addOperand(MCOperand::CreateImm(imm));
2525 static bool DisassembleNVdVmImm(MCInst &MI, unsigned Opc, uint32_t insn,
2526 unsigned short NumOps, unsigned &NumOpsAdded) {
2528 return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded);
2530 static bool DisassembleNVdVmImmVCVT(MCInst &MI, unsigned Opc, uint32_t insn,
2531 unsigned short NumOps, unsigned &NumOpsAdded) {
2533 return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded,
2534 N2V_VectorConvert_Between_Float_Fixed);
2536 static bool DisassembleNVdVmImmVDupLane(MCInst &MI, unsigned Opc, uint32_t insn,
2537 unsigned short NumOps, unsigned &NumOpsAdded) {
2539 return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded,
2542 static bool DisassembleNVdVmImmVSHLL(MCInst &MI, unsigned Opc, uint32_t insn,
2543 unsigned short NumOps, unsigned &NumOpsAdded) {
2545 return DisassembleNVdVmImm0(MI, Opc, insn, NumOps, NumOpsAdded,
2546 N2V_VectorShiftLeftLong);
2549 // Vector Transpose/Unzip/Zip Instructions
2550 // Qd/Dd Qm/Dm [Qd/Dd (TIED_TO)] [Qm/Dm (TIED_TO)]
2551 static bool DisassembleNVectorShuffle(MCInst &MI,unsigned Opcode,uint32_t insn,
2552 unsigned short NumOps, unsigned &NumOpsAdded) {
2554 const TargetInstrDesc &TID = ARMInsts[Opcode];
2555 const TargetOperandInfo *OpInfo = TID.OpInfo;
2557 assert(NumOps >= 4 &&
2558 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2559 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2560 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2561 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2562 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2563 OpInfo[2].RegClass == ARM::QPRRegClassID) &&
2564 (OpInfo[3].RegClass == ARM::DPRRegClassID ||
2565 OpInfo[3].RegClass == ARM::QPRRegClassID));
2567 unsigned &OpIdx = NumOpsAdded;
2571 // Qd/Dd = Inst{22:15-12} => NEON Rd
2572 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2573 decodeNEONRd(insn))));
2576 // Dm = Inst{5:3-0} => NEON Rm
2577 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2578 decodeNEONRm(insn))));
2581 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2582 TID.getOperandConstraint(OpIdx+1, TOI::TIED_TO) != -1);
2584 MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx;
2585 MI.addOperand(MCOperand::CreateReg(0)); ++OpIdx;
2590 // Vector Shift [Accumulate] Instructions.
2591 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2592 static bool DisassembleNVectorShift0(MCInst &MI, unsigned Opcode, uint32_t insn,
2593 unsigned short NumOps, unsigned &NumOpsAdded, bool NormalShift = true) {
2595 const TargetInstrDesc &TID = ARMInsts[Opcode];
2596 const TargetOperandInfo *OpInfo = TID.OpInfo;
2598 assert(NumOps >= 3 &&
2599 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2600 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2601 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2602 OpInfo[1].RegClass == ARM::QPRRegClassID));
2604 unsigned &OpIdx = NumOpsAdded;
2608 // Qd/Dd = Inst{22:15-12} => NEON Rd
2609 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2610 decodeNEONRd(insn))));
2613 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2615 MI.addOperand(MCOperand::CreateReg(0));
2619 assert(OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2620 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID);
2622 // Qm/Dm = Inst{5:3-0} => NEON Rm
2623 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2624 decodeNEONRm(insn))));
2627 assert(OpInfo[OpIdx].RegClass == 0);
2629 // Add the imm operand.
2630 MI.addOperand(MCOperand::CreateImm(decodeNVSAmt(insn, NormalShift)));
2636 // Normal shift amount interpretation.
2637 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2638 unsigned short NumOps, unsigned &NumOpsAdded) {
2640 return DisassembleNVectorShift0(MI, Opcode, insn, NumOps, NumOpsAdded, true);
2642 // Different shift amount interpretation.
2643 static bool DisassembleNVectorShift2(MCInst &MI, unsigned Opcode, uint32_t insn,
2644 unsigned short NumOps, unsigned &NumOpsAdded) {
2646 return DisassembleNVectorShift0(MI, Opcode, insn, NumOps, NumOpsAdded, false);
2654 N3V_Multiply_By_Scalar
2656 } // End of unnamed namespace
2658 // NEON Three Register Instructions with Optional Immediate Operand
2660 // Vector Extract Instructions.
2661 // Qd/Dd Qn/Dn Qm/Dm imm4
2663 // Vector Shift (Register) Instructions.
2664 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2666 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2667 // Qd/Dd Qn/Dn RestrictedDm index
2670 static bool DisassembleNVdVnVmImm0(MCInst &MI, unsigned Opcode, uint32_t insn,
2671 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag = N3V_None) {
2673 const TargetInstrDesc &TID = ARMInsts[Opcode];
2674 const TargetOperandInfo *OpInfo = TID.OpInfo;
2676 assert(NumOps >= 3 &&
2677 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2678 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2679 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2680 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2681 (OpInfo[2].RegClass != 0));
2683 unsigned &OpIdx = NumOpsAdded;
2687 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2688 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2689 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2690 ElemSize esize = ESizeNA;
2691 if (Flag == N3V_Multiply_By_Scalar) {
2692 unsigned size = (insn >> 20) & 3;
2693 if (size == 1) esize = ESize16;
2694 if (size == 2) esize = ESize32;
2695 assert (esize == ESize16 || esize == ESize32);
2698 // Qd/Dd = Inst{22:15-12} => NEON Rd
2699 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2700 decodeNEONRd(insn))));
2703 // VABA, VABAL, VBSLd, VBSLq, ...
2704 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2706 MI.addOperand(MCOperand::CreateReg(0));
2710 // Dn = Inst{7:19-16} => NEON Rn
2712 // Dm = Inst{5:3-0} => NEON Rm
2713 MI.addOperand(MCOperand::CreateReg(
2714 getRegisterEnum(OpInfo[OpIdx].RegClass,
2715 VdVnVm ? decodeNEONRn(insn)
2716 : decodeNEONRm(insn))));
2719 // Dm = Inst{5:3-0} => NEON Rm
2721 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2723 // Dn = Inst{7:19-16} => NEON Rn
2724 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2725 : decodeNEONRm(insn))
2726 : decodeNEONRn(insn);
2728 MI.addOperand(MCOperand::CreateReg(
2729 getRegisterEnum(OpInfo[OpIdx].RegClass, m)));
2732 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2733 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2734 // Add the imm operand.
2737 Imm = decodeN3VImm(insn);
2738 else if (IsDmRestricted)
2739 Imm = decodeRestrictedDmIndex(insn, esize);
2741 assert(0 && "Internal error: unreachable code!");
2743 MI.addOperand(MCOperand::CreateImm(Imm));
2750 static bool DisassembleNVdVnVmImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2751 unsigned short NumOps, unsigned &NumOpsAdded) {
2753 return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded);
2755 static bool DisassembleNVdVnVmImmVectorShift(MCInst &MI, unsigned Opcode,
2756 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded) {
2758 return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded,
2761 static bool DisassembleNVdVnVmImmVectorExtract(MCInst &MI, unsigned Opcode,
2762 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded) {
2764 return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded,
2767 static bool DisassembleNVdVnVmImmMulScalar(MCInst &MI, unsigned Opcode,
2768 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded) {
2770 return DisassembleNVdVnVmImm0(MI, Opcode, insn, NumOps, NumOpsAdded,
2771 N3V_Multiply_By_Scalar);
2774 // Vector Table Lookup
2776 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2777 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2778 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2779 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2780 static bool DisassembleVTBL(MCInst &MI, unsigned Opcode, uint32_t insn,
2781 unsigned short NumOps, unsigned &NumOpsAdded) {
2783 const TargetInstrDesc &TID = ARMInsts[Opcode];
2784 const TargetOperandInfo *OpInfo = TID.OpInfo;
2786 assert(NumOps >= 3 &&
2787 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2788 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2789 OpInfo[2].RegClass == ARM::DPRRegClassID);
2791 unsigned &OpIdx = NumOpsAdded;
2795 unsigned Rn = decodeNEONRn(insn);
2797 // {Dn} encoded as len = 0b00
2798 // {Dn Dn+1} encoded as len = 0b01
2799 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2800 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2801 unsigned Len = slice(insn, 9, 8) + 1;
2803 // Dd (the destination vector)
2804 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2805 decodeNEONRd(insn))));
2808 // Process tied_to operand constraint.
2810 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2811 MI.addOperand(MI.getOperand(Idx));
2815 // Do the <list> now.
2816 for (unsigned i = 0; i < Len; ++i) {
2817 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID);
2818 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2823 // Dm (the index vector)
2824 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID);
2825 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2826 decodeNEONRm(insn))));
2832 /// NEONFuncPtrs - NEONFuncPtrs maps NSFormat to corresponding DisassembleFP.
2833 /// We divide the disassembly task into different categories, with each one
2834 /// corresponding to a specific instruction encoding format. There could be
2835 /// exceptions when handling a specific format, and that is why the Opcode is
2836 /// also present in the function prototype.
2837 static const DisassembleFP NEONFuncPtrs[] = {
2838 // This will assert().
2839 &DisassembleNSFormatNone,
2841 // VLD and VST (including one lane) Instructions.
2842 &DisassembleVLDSTLane,
2844 // VLD and VST (including one lane) Double-Spaced Instructions.
2845 &DisassembleVLDSTLaneDbl,
2847 // A8.6.319 VLDM & A8.6.399 VSTM
2848 // LLVM defines VLDRQ/VSTRQ to load/store a Q register as a D register pair.
2849 &DisassembleVLDSTRQ,
2851 // A7.4.6 One register and a modified immediate value
2852 // 1-Register Instructions with imm.
2853 // LLVM only defines VMOVv instructions.
2856 // 2-Register Instructions with no imm.
2857 &DisassembleNVdVmImm,
2859 // 2-Register Instructions with imm (vector convert float/fixed point).
2860 &DisassembleNVdVmImmVCVT,
2862 // 2-Register Instructions with imm (vector dup lane).
2863 &DisassembleNVdVmImmVDupLane,
2865 // 2-Register Instructions with imm (vector shift left long).
2866 &DisassembleNVdVmImmVSHLL,
2868 // Vector Transpose/Unzip/Zip Instructions.
2869 &DisassembleNVectorShuffle,
2871 // Vector Shift [Narrow Accumulate] Instructions.
2872 &DisassembleNVectorShift,
2874 // Vector Shift Instructions with different interpretation of shift amount.
2875 &DisassembleNVectorShift2,
2877 // 3-Register Data-Processing Instructions.
2878 &DisassembleNVdVnVmImm,
2880 // Vector Shift (Register) Instructions.
2881 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
2882 &DisassembleNVdVnVmImmVectorShift,
2884 // Vector Extract Instructions.
2885 &DisassembleNVdVnVmImmVectorExtract,
2887 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
2888 // By Scalar Instructions.
2889 &DisassembleNVdVnVmImmMulScalar,
2891 // Vector Table Lookup uses byte indexes in a control vector to look up byte
2892 // values in a table and generate a new vector.
2897 static bool DisassembleNEONFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2898 unsigned short NumOps, unsigned &NumOpsAdded) {
2899 assert(0 && "Code is not reachable");
2903 // Vector Get Lane (move scalar to ARM core register) Instructions.
2904 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2905 static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2906 unsigned short NumOps, unsigned &NumOpsAdded) {
2908 const TargetInstrDesc &TID = ARMInsts[Opcode];
2909 unsigned short NumDefs = TID.getNumDefs();
2910 const TargetOperandInfo *OpInfo = TID.OpInfo;
2912 assert(NumDefs == 1 && NumOps >= 3 &&
2913 OpInfo[0].RegClass == ARM::GPRRegClassID &&
2914 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2915 OpInfo[2].RegClass == 0);
2918 Opcode == ARM::VGETLNi32 ? ESize32
2919 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
2922 // Rt = Inst{15-12} => ARM Rd
2923 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2926 // Dn = Inst{7:19-16} => NEON Rn
2927 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2928 decodeNEONRn(insn))));
2930 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2936 // Vector Set Lane (move ARM core register to scalar) Instructions.
2937 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2938 static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2939 unsigned short NumOps, unsigned &NumOpsAdded) {
2941 const TargetInstrDesc &TID = ARMInsts[Opcode];
2942 unsigned short NumDefs = TID.getNumDefs();
2943 const TargetOperandInfo *OpInfo = TID.OpInfo;
2945 assert(NumDefs == 1 && NumOps >= 3 &&
2946 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2947 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2948 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
2949 OpInfo[2].RegClass == ARM::GPRRegClassID &&
2950 OpInfo[3].RegClass == 0);
2953 Opcode == ARM::VSETLNi8 ? ESize8
2954 : (Opcode == ARM::VSETLNi16 ? ESize16
2957 // Dd = Inst{7:19-16} => NEON Rn
2958 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2959 decodeNEONRn(insn))));
2962 MI.addOperand(MCOperand::CreateReg(0));
2964 // Rt = Inst{15-12} => ARM Rd
2965 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2968 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2974 // Vector Duplicate Instructions (from ARM core register to all elements).
2975 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2976 static bool DisassembleNEONDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2977 unsigned short NumOps, unsigned &NumOpsAdded) {
2979 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2981 assert(NumOps >= 2 &&
2982 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2983 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2984 OpInfo[1].RegClass == ARM::GPRRegClassID);
2986 unsigned RegClass = OpInfo[0].RegClass;
2988 // Qd/Dd = Inst{7:19-16} => NEON Rn
2989 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,
2990 decodeNEONRn(insn))));
2992 // Rt = Inst{15-12} => ARM Rd
2993 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
3003 static inline bool MemBarrierInstr(uint32_t insn) {
3004 unsigned op7_4 = slice(insn, 7, 4);
3005 if (slice(insn, 31, 20) == 0xf57 && (op7_4 >= 4 && op7_4 <= 6))
3011 static inline bool PreLoadOpcode(unsigned Opcode) {
3013 case ARM::PLDi: case ARM::PLDr:
3014 case ARM::PLDWi: case ARM::PLDWr:
3015 case ARM::PLIi: case ARM::PLIr:
3022 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3023 unsigned short NumOps, unsigned &NumOpsAdded) {
3025 // Preload Data/Instruction requires either 2 or 4 operands.
3026 // PLDi, PLDWi, PLIi: Rn [+/-]imm12 add = (U == '1')
3027 // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: Rn Rm addrmode2_opc
3029 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
3032 if (Opcode == ARM::PLDi || Opcode == ARM::PLDWi || Opcode == ARM::PLIi) {
3033 unsigned Imm12 = slice(insn, 11, 0);
3034 bool Negative = getUBit(insn) == 0;
3035 int Offset = Negative ? -1 - Imm12 : 1 * Imm12;
3036 MI.addOperand(MCOperand::CreateImm(Offset));
3039 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
3042 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
3044 // Inst{6-5} encodes the shift opcode.
3045 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
3046 // Inst{11-7} encodes the imm5 shift amount.
3047 unsigned ShImm = slice(insn, 11, 7);
3049 // A8.4.1. Possible rrx or shift amount of 32...
3050 getImmShiftSE(ShOp, ShImm);
3051 MI.addOperand(MCOperand::CreateImm(
3052 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3059 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3060 unsigned short NumOps, unsigned &NumOpsAdded) {
3062 if (MemBarrierInstr(insn))
3080 // CPS has a singleton $opt operand that contains the following information:
3081 // opt{4-0} = mode from Inst{4-0}
3082 // opt{5} = changemode from Inst{17}
3083 // opt{8-6} = AIF from Inst{8-6}
3084 // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
3085 if (Opcode == ARM::CPS) {
3086 unsigned Option = slice(insn, 4, 0) | slice(insn, 17, 17) << 5 |
3087 slice(insn, 8, 6) << 6 | slice(insn, 19, 18) << 9;
3088 MI.addOperand(MCOperand::CreateImm(Option));
3093 // DBG has its option specified in Inst{3-0}.
3094 if (Opcode == ARM::DBG) {
3095 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3100 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3101 if (Opcode == ARM::BKPT) {
3102 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3103 slice(insn, 3, 0)));
3108 if (PreLoadOpcode(Opcode))
3109 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded);
3111 assert(0 && "Unexpected misc instruction!");
3115 static bool DisassembleThumbMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3116 unsigned short NumOps, unsigned &NumOpsAdded) {
3118 assert(0 && "Unexpected thumb misc. instruction!");
3122 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3123 /// We divide the disassembly task into different categories, with each one
3124 /// corresponding to a specific instruction encoding format. There could be
3125 /// exceptions when handling a specific format, and that is why the Opcode is
3126 /// also present in the function prototype.
3127 static const DisassembleFP FuncPtrs[] = {
3131 &DisassembleBrMiscFrm,
3133 &DisassembleDPSoRegFrm,
3136 &DisassembleLdMiscFrm,
3137 &DisassembleStMiscFrm,
3138 &DisassembleLdStMulFrm,
3139 &DisassembleArithMiscFrm,
3141 &DisassembleVFPUnaryFrm,
3142 &DisassembleVFPBinaryFrm,
3143 &DisassembleVFPConv1Frm,
3144 &DisassembleVFPConv2Frm,
3145 &DisassembleVFPConv3Frm,
3146 &DisassembleVFPConv4Frm,
3147 &DisassembleVFPConv5Frm,
3148 &DisassembleVFPLdStFrm,
3149 &DisassembleVFPLdStMulFrm,
3150 &DisassembleVFPMiscFrm,
3151 &DisassembleThumbFrm,
3152 &DisassembleNEONFrm,
3153 &DisassembleNEONGetLnFrm,
3154 &DisassembleNEONSetLnFrm,
3155 &DisassembleNEONDupFrm,
3156 &DisassembleLdStExFrm,
3157 &DisassembleMiscFrm,
3158 &DisassembleThumbMiscFrm,
3162 /// ARMAlgorithm - ARMAlgorithm implements ARMDisassemblyAlgorithm for solving
3163 /// the problem of building the MCOperands of an MCInst. Construction of
3164 /// ARMAlgorithm requires passing in a function pointer with the DisassembleFP
3166 class ARMAlgorithm : public ARMDisassemblyAlgorithm {
3167 /// Algorithms - Algorithms stores a map from Format to ARMAlgorithm*.
3168 static std::vector<ARMAlgorithm*> Algorithms;
3169 /// NSAlgorithms - NSAlgorithms stores a map from NSFormat to ARMAlgorithm*.
3170 static std::vector<ARMAlgorithm*> NSAlgorithms;
3172 DisassembleFP Disassemble;
3175 /// GetInstance - GetInstance returns an instance of ARMAlgorithm given the
3176 /// encoding Format. API clients should not free up the returned instance.
3177 static ARMAlgorithm *GetInstance(ARMFormat Format, NSFormat NSF) {
3178 /// Init the first time.
3179 if (Algorithms.size() == 0) {
3180 Algorithms.resize(array_lengthof(FuncPtrs));
3181 for (unsigned i = 0, num = array_lengthof(FuncPtrs); i < num; ++i)
3183 Algorithms[i] = new ARMAlgorithm(FuncPtrs[i]);
3185 Algorithms[i] = NULL;
3187 if (NSAlgorithms.size() == 0) {
3188 NSAlgorithms.resize(array_lengthof(NEONFuncPtrs));
3189 for (unsigned i = 0, num = array_lengthof(NEONFuncPtrs); i < num; ++i)
3190 if (NEONFuncPtrs[i])
3191 NSAlgorithms[i] = new ARMAlgorithm(NEONFuncPtrs[i]);
3193 NSAlgorithms[i] = NULL;
3196 if (Format != ARM_FORMAT_NEONFRM)
3197 return Algorithms[Format];
3199 return NSAlgorithms[NSF];
3202 virtual bool Solve(MCInst &MI, unsigned Opcode, uint32_t insn,
3203 unsigned short NumOps, unsigned &NumOpsAdded) const {
3204 if (Disassemble == NULL)
3207 return (*Disassemble)(MI, Opcode, insn, NumOps, NumOpsAdded);
3211 ARMAlgorithm(DisassembleFP fp) :
3212 ARMDisassemblyAlgorithm(), Disassemble(fp) {}
3214 ARMAlgorithm(ARMAlgorithm &AA) :
3215 ARMDisassemblyAlgorithm(), Disassemble(AA.Disassemble) {}
3217 virtual ~ARMAlgorithm() {}
3220 // Define the symbol here.
3221 std::vector<ARMAlgorithm*> ARMAlgorithm::Algorithms;
3223 // Define the symbol here.
3224 std::vector<ARMAlgorithm*> ARMAlgorithm::NSAlgorithms;
3226 // Define the symbol here.
3227 unsigned ARMBasicMCBuilder::ITCounter = 0;
3229 // Define the symbol here.
3230 unsigned ARMBasicMCBuilder::ITState = 0;
3233 static unsigned short CountITSize(unsigned ITMask) {
3234 // First count the trailing zeros of the IT mask.
3235 unsigned TZ = CountTrailingZeros_32(ITMask);
3240 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3241 /// The general idea is to set the Opcode for the MCInst, followed by adding
3242 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3243 /// to the Algo (ARM Disassemble Algorithm) object to perform Format-specific
3244 /// disassembly, followed by class method TryPredicateAndSBitModifier() to do
3245 /// PredicateOperand and OptionalDefOperand which follow the Dst/Src Operands.
3246 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3247 // Stage 1 sets the Opcode.
3248 MI.setOpcode(Opcode);
3249 // If the number of operands is zero, we're done!
3253 // Stage 2 calls the ARM Disassembly Algorithm to build the operand list.
3254 unsigned NumOpsAdded = 0;
3255 bool OK = Algo.Solve(MI, Opcode, insn, NumOps, NumOpsAdded);
3257 if (!OK) return false;
3258 if (NumOpsAdded >= NumOps)
3261 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3262 // FIXME: Should this be done selectively?
3263 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3266 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3267 uint32_t insn, unsigned short NumOpsRemaining) {
3269 assert(NumOpsRemaining > 0);
3271 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3272 const std::string &Name = ARMInsts[Opcode].Name;
3273 unsigned Idx = MI.getNumOperands();
3275 // First, we check whether this instr specifies the PredicateOperand through
3276 // a pair of TargetOperandInfos with isPredicate() property.
3277 if (NumOpsRemaining >= 2 &&
3278 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3279 OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3281 // If we are inside an IT block, get the IT condition bits maintained via
3282 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3285 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3287 if (Name.length() > 1 && Name[0] == 't') {
3288 // Thumb conditional branch instructions have their cond field embedded,
3292 if (Name == "t2Bcc")
3293 MI.addOperand(MCOperand::CreateImm(slice(insn, 25, 22)));
3294 else if (Name == "tBcc")
3295 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 8)));
3297 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3299 // ARM Instructions. Check condition field.
3300 int64_t CondVal = getCondField(insn);
3302 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3304 MI.addOperand(MCOperand::CreateImm(CondVal));
3307 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3309 NumOpsRemaining -= 2;
3310 if (NumOpsRemaining == 0)
3314 assert(NumOpsRemaining > 0);
3316 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3317 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3318 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3322 if (NumOpsRemaining == 0)
3328 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3329 /// after BuildIt is finished.
3330 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3333 if (Opcode == ARM::t2IT) {
3334 ARMBasicMCBuilder::ITCounter = CountITSize(slice(insn, 3, 0));
3335 ARMBasicMCBuilder::InitITState(slice(insn, 7, 0));
3336 } else if (InITBlock())
3337 ARMBasicMCBuilder::UpdateITState();
3342 AbstractARMMCBuilder *ARMMCBuilderFactory::CreateMCBuilder(unsigned Opcode,
3343 ARMFormat Format, NSFormat NSF) {
3345 ARMAlgorithm *Algo = ARMAlgorithm::GetInstance(Format, NSF);
3349 return new ARMBasicMCBuilder(Opcode, Format, NSF,
3350 ARMInsts[Opcode].getNumOperands(), *Algo);