1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "arm-disassembler"
18 #include "ARMDisassemblerCore.h"
19 #include "ARMAddressingModes.h"
20 #include "ARMMCExpr.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Support/raw_ostream.h"
24 //#define DEBUG(X) do { X; } while (0)
26 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
27 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
28 /// describing the operand info for each ARMInsts[i].
30 /// Together with an instruction's encoding format, we can take advantage of the
31 /// NumOperands and the OpInfo fields of the target instruction description in
32 /// the quest to build out the MCOperand list for an MCInst.
34 /// The general guideline is that with a known format, the number of dst and src
35 /// operands are well-known. The dst is built first, followed by the src
36 /// operand(s). The operands not yet used at this point are for the Implicit
37 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
38 /// defined with two components:
40 /// def pred { // Operand PredicateOperand
41 /// ValueType Type = OtherVT;
42 /// string PrintMethod = "printPredicateOperand";
43 /// string AsmOperandLowerMethod = ?;
44 /// dag MIOperandInfo = (ops i32imm, CCR);
45 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
46 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
49 /// which is manifested by the TargetOperandInfo[] of:
51 /// { 0, 0|(1<<TOI::Predicate), 0 },
52 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
54 /// So the first predicate MCOperand corresponds to the immediate part of the
55 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
56 /// corresponds to a register kind of ARM::CPSR.
58 /// For the Defs part, in the simple case of only cc_out:$s, we have:
60 /// def cc_out { // Operand OptionalDefOperand
61 /// ValueType Type = OtherVT;
62 /// string PrintMethod = "printSBitModifierOperand";
63 /// string AsmOperandLowerMethod = ?;
64 /// dag MIOperandInfo = (ops CCR);
65 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
66 /// dag DefaultOps = (ops (i32 zero_reg));
69 /// which is manifested by the one TargetOperandInfo of:
71 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
73 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
74 #include "ARMGenInstrInfo.inc"
78 const char *ARMUtils::OpcodeName(unsigned Opcode) {
79 return ARMInsts[Opcode].Name;
82 // Return the register enum Based on RegClass and the raw register number.
85 getRegisterEnum(BO B, unsigned RegClassID, unsigned RawRegister) {
86 if (RegClassID == ARM::rGPRRegClassID) {
87 // Check for The register numbers 13 and 15 that are not permitted for many
88 // Thumb register specifiers.
89 if (RawRegister == 13 || RawRegister == 15) {
93 // For this purpose, we can treat rGPR as if it were GPR.
94 RegClassID = ARM::GPRRegClassID;
97 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
98 // A7.3 register encoding
100 // Qn -> bit[16] == 0
103 // If one of these bits is 1, the instruction is UNDEFINED.
104 if (RegClassID == ARM::QPRRegClassID && slice(RawRegister, 0, 0) == 1) {
109 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
115 switch (RegClassID) {
116 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
117 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
118 case ARM::DPR_VFP2RegClassID:
120 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
121 case ARM::QPR_VFP2RegClassID:
123 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
127 switch (RegClassID) {
128 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
129 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
130 case ARM::DPR_VFP2RegClassID:
132 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
133 case ARM::QPR_VFP2RegClassID:
135 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
139 switch (RegClassID) {
140 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
141 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
142 case ARM::DPR_VFP2RegClassID:
144 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
145 case ARM::QPR_VFP2RegClassID:
147 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
151 switch (RegClassID) {
152 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
153 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
154 case ARM::DPR_VFP2RegClassID:
156 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
157 case ARM::QPR_VFP2RegClassID:
159 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
163 switch (RegClassID) {
164 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
165 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
166 case ARM::DPR_VFP2RegClassID:
168 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
169 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
173 switch (RegClassID) {
174 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
175 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
176 case ARM::DPR_VFP2RegClassID:
178 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
179 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
183 switch (RegClassID) {
184 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
185 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
186 case ARM::DPR_VFP2RegClassID:
188 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
189 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
193 switch (RegClassID) {
194 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
195 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
196 case ARM::DPR_VFP2RegClassID:
198 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
199 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
203 switch (RegClassID) {
204 case ARM::GPRRegClassID: return ARM::R8;
205 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
206 case ARM::QPRRegClassID: return ARM::Q8;
207 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
211 switch (RegClassID) {
212 case ARM::GPRRegClassID: return ARM::R9;
213 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
214 case ARM::QPRRegClassID: return ARM::Q9;
215 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
219 switch (RegClassID) {
220 case ARM::GPRRegClassID: return ARM::R10;
221 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
222 case ARM::QPRRegClassID: return ARM::Q10;
223 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
227 switch (RegClassID) {
228 case ARM::GPRRegClassID: return ARM::R11;
229 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
230 case ARM::QPRRegClassID: return ARM::Q11;
231 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
235 switch (RegClassID) {
236 case ARM::GPRRegClassID: return ARM::R12;
237 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
238 case ARM::QPRRegClassID: return ARM::Q12;
239 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
243 switch (RegClassID) {
244 case ARM::GPRRegClassID: return ARM::SP;
245 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
246 case ARM::QPRRegClassID: return ARM::Q13;
247 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
251 switch (RegClassID) {
252 case ARM::GPRRegClassID: return ARM::LR;
253 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
254 case ARM::QPRRegClassID: return ARM::Q14;
255 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
259 switch (RegClassID) {
260 case ARM::GPRRegClassID: return ARM::PC;
261 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
262 case ARM::QPRRegClassID: return ARM::Q15;
263 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
267 switch (RegClassID) {
268 case ARM::DPRRegClassID: return ARM::D16;
269 case ARM::SPRRegClassID: return ARM::S16;
273 switch (RegClassID) {
274 case ARM::DPRRegClassID: return ARM::D17;
275 case ARM::SPRRegClassID: return ARM::S17;
279 switch (RegClassID) {
280 case ARM::DPRRegClassID: return ARM::D18;
281 case ARM::SPRRegClassID: return ARM::S18;
285 switch (RegClassID) {
286 case ARM::DPRRegClassID: return ARM::D19;
287 case ARM::SPRRegClassID: return ARM::S19;
291 switch (RegClassID) {
292 case ARM::DPRRegClassID: return ARM::D20;
293 case ARM::SPRRegClassID: return ARM::S20;
297 switch (RegClassID) {
298 case ARM::DPRRegClassID: return ARM::D21;
299 case ARM::SPRRegClassID: return ARM::S21;
303 switch (RegClassID) {
304 case ARM::DPRRegClassID: return ARM::D22;
305 case ARM::SPRRegClassID: return ARM::S22;
309 switch (RegClassID) {
310 case ARM::DPRRegClassID: return ARM::D23;
311 case ARM::SPRRegClassID: return ARM::S23;
315 switch (RegClassID) {
316 case ARM::DPRRegClassID: return ARM::D24;
317 case ARM::SPRRegClassID: return ARM::S24;
321 switch (RegClassID) {
322 case ARM::DPRRegClassID: return ARM::D25;
323 case ARM::SPRRegClassID: return ARM::S25;
327 switch (RegClassID) {
328 case ARM::DPRRegClassID: return ARM::D26;
329 case ARM::SPRRegClassID: return ARM::S26;
333 switch (RegClassID) {
334 case ARM::DPRRegClassID: return ARM::D27;
335 case ARM::SPRRegClassID: return ARM::S27;
339 switch (RegClassID) {
340 case ARM::DPRRegClassID: return ARM::D28;
341 case ARM::SPRRegClassID: return ARM::S28;
345 switch (RegClassID) {
346 case ARM::DPRRegClassID: return ARM::D29;
347 case ARM::SPRRegClassID: return ARM::S29;
351 switch (RegClassID) {
352 case ARM::DPRRegClassID: return ARM::D30;
353 case ARM::SPRRegClassID: return ARM::S30;
357 switch (RegClassID) {
358 case ARM::DPRRegClassID: return ARM::D31;
359 case ARM::SPRRegClassID: return ARM::S31;
363 DEBUG(errs() << "Invalid (RegClassID, RawRegister) combination\n");
364 // Encoding error. Mark the builder with error code != 0.
369 ///////////////////////////////
371 // Utility Functions //
373 ///////////////////////////////
375 // Extract/Decode Rd: Inst{15-12}.
376 static inline unsigned decodeRd(uint32_t insn) {
377 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
380 // Extract/Decode Rn: Inst{19-16}.
381 static inline unsigned decodeRn(uint32_t insn) {
382 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
385 // Extract/Decode Rm: Inst{3-0}.
386 static inline unsigned decodeRm(uint32_t insn) {
387 return (insn & ARMII::GPRRegMask);
390 // Extract/Decode Rs: Inst{11-8}.
391 static inline unsigned decodeRs(uint32_t insn) {
392 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
395 static inline unsigned getCondField(uint32_t insn) {
396 return (insn >> ARMII::CondShift);
399 static inline unsigned getIBit(uint32_t insn) {
400 return (insn >> ARMII::I_BitShift) & 1;
403 static inline unsigned getAM3IBit(uint32_t insn) {
404 return (insn >> ARMII::AM3_I_BitShift) & 1;
407 static inline unsigned getPBit(uint32_t insn) {
408 return (insn >> ARMII::P_BitShift) & 1;
411 static inline unsigned getUBit(uint32_t insn) {
412 return (insn >> ARMII::U_BitShift) & 1;
415 static inline unsigned getPUBits(uint32_t insn) {
416 return (insn >> ARMII::U_BitShift) & 3;
419 static inline unsigned getSBit(uint32_t insn) {
420 return (insn >> ARMII::S_BitShift) & 1;
423 static inline unsigned getWBit(uint32_t insn) {
424 return (insn >> ARMII::W_BitShift) & 1;
427 static inline unsigned getDBit(uint32_t insn) {
428 return (insn >> ARMII::D_BitShift) & 1;
431 static inline unsigned getNBit(uint32_t insn) {
432 return (insn >> ARMII::N_BitShift) & 1;
435 static inline unsigned getMBit(uint32_t insn) {
436 return (insn >> ARMII::M_BitShift) & 1;
439 // See A8.4 Shifts applied to a register.
440 // A8.4.2 Register controlled shifts.
442 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
443 // into llvm enums for shift opcode. The API clients should pass in the value
444 // encoded with two bits, so the assert stays to signal a wrong API usage.
446 // A8-12: DecodeRegShift()
447 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
449 default: assert(0 && "No such value"); return ARM_AM::no_shift;
450 case 0: return ARM_AM::lsl;
451 case 1: return ARM_AM::lsr;
452 case 2: return ARM_AM::asr;
453 case 3: return ARM_AM::ror;
457 // See A8.4 Shifts applied to a register.
458 // A8.4.1 Constant shifts.
460 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
461 // encodings into the intended ShiftOpc and shift amount.
463 // A8-11: DecodeImmShift()
464 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
468 case ARM_AM::no_shift:
472 ShOp = ARM_AM::no_shift;
484 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
485 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
486 // clients should pass in the value encoded with two bits, so the assert stays
487 // to signal a wrong API usage.
488 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
490 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
491 case 1: return ARM_AM::ia; // P=0 U=1
492 case 3: return ARM_AM::ib; // P=1 U=1
493 case 0: return ARM_AM::da; // P=0 U=0
494 case 2: return ARM_AM::db; // P=1 U=0
498 ////////////////////////////////////////////
500 // Disassemble function definitions //
502 ////////////////////////////////////////////
504 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
505 /// instr into a list of MCOperands in the appropriate order, with possible dst,
506 /// followed by possible src(s).
508 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
509 /// the CPSR, is factored into ARMBasicMCBuilder's method named
510 /// TryPredicateAndSBitModifier.
512 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
513 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
515 assert(0 && "Unexpected pseudo instruction!");
520 // if d == 15 || n == 15 || m == 15 || a == 15 then UNPREDICTABLE;
523 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
526 // if dLo == 15 || dHi == 15 || n == 15 || m == 15 then UNPREDICTABLE;
527 // if dHi == dLo then UNPREDICTABLE;
528 static bool BadRegsMulFrm(unsigned Opcode, uint32_t insn) {
529 unsigned R19_16 = slice(insn, 19, 16);
530 unsigned R15_12 = slice(insn, 15, 12);
531 unsigned R11_8 = slice(insn, 11, 8);
532 unsigned R3_0 = slice(insn, 3, 0);
535 // Did we miss an opcode?
536 DEBUG(errs() << "BadRegsMulFrm: unexpected opcode!");
538 case ARM::MLA: case ARM::MLS: case ARM::SMLABB: case ARM::SMLABT:
539 case ARM::SMLATB: case ARM::SMLATT: case ARM::SMLAWB: case ARM::SMLAWT:
540 case ARM::SMMLA: case ARM::SMMLAR: case ARM::SMMLS: case ARM::SMMLSR:
542 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
545 case ARM::MUL: case ARM::SMMUL: case ARM::SMMULR:
546 case ARM::SMULBB: case ARM::SMULBT: case ARM::SMULTB: case ARM::SMULTT:
547 case ARM::SMULWB: case ARM::SMULWT: case ARM::SMUAD: case ARM::SMUADX:
548 // A8.6.167 SMLAD & A8.6.172 SMLSD
549 case ARM::SMLAD: case ARM::SMLADX: case ARM::SMLSD: case ARM::SMLSDX:
551 if (R19_16 == 15 || R11_8 == 15 || R3_0 == 15)
554 case ARM::SMLAL: case ARM::SMULL: case ARM::UMAAL: case ARM::UMLAL:
556 case ARM::SMLALBB: case ARM::SMLALBT: case ARM::SMLALTB: case ARM::SMLALTT:
557 case ARM::SMLALD: case ARM::SMLALDX: case ARM::SMLSLD: case ARM::SMLSLDX:
558 if (R19_16 == 15 || R15_12 == 15 || R11_8 == 15 || R3_0 == 15)
560 if (R19_16 == R15_12)
566 // Multiply Instructions.
567 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLAR,
568 // SMMLS, SMMLAR, SMLAD, SMLADX, SMLSD, SMLSDX, and USADA8 (for convenience):
569 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
570 // But note that register checking for {SMLAD, SMLADX, SMLSD, SMLSDX} is
571 // only for {d, n, m}.
573 // MUL, SMMUL, SMMULR, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT, SMUAD,
574 // SMUADX, and USAD8 (for convenience):
575 // Rd{19-16} Rn{3-0} Rm{11-8}
577 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT,
578 // SMLALD, SMLADLX, SMLSLD, SMLSLDX:
579 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
581 // The mapping of the multiply registers to the "regular" ARM registers, where
582 // there are convenience decoder functions, is:
588 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
589 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
591 const TargetInstrDesc &TID = ARMInsts[Opcode];
592 unsigned short NumDefs = TID.getNumDefs();
593 const TargetOperandInfo *OpInfo = TID.OpInfo;
594 unsigned &OpIdx = NumOpsAdded;
598 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
600 && OpInfo[0].RegClass == ARM::GPRRegClassID
601 && OpInfo[1].RegClass == ARM::GPRRegClassID
602 && OpInfo[2].RegClass == ARM::GPRRegClassID
603 && "Expect three register operands");
605 // Sanity check for the register encodings.
606 if (BadRegsMulFrm(Opcode, insn))
609 // Instructions with two destination registers have RdLo{15-12} first.
611 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
612 "Expect 4th register operand");
613 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
618 // The destination register: RdHi{19-16} or Rd{19-16}.
619 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
622 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
623 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
625 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
629 // Many multiply instructions (e.g., MLA) have three src registers.
630 // The third register operand is Ra{15-12}.
631 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
632 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
640 // Helper routines for disassembly of coprocessor instructions.
642 static bool LdStCopOpcode(unsigned Opcode) {
643 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
644 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
648 static bool CoprocessorOpcode(unsigned Opcode) {
649 if (LdStCopOpcode(Opcode))
655 case ARM::CDP: case ARM::CDP2:
656 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
657 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
661 static inline unsigned GetCoprocessor(uint32_t insn) {
662 return slice(insn, 11, 8);
664 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
665 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
667 static inline unsigned GetCopOpc2(uint32_t insn) {
668 return slice(insn, 7, 5);
670 static inline unsigned GetCopOpc(uint32_t insn) {
671 return slice(insn, 7, 4);
673 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
676 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
678 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
680 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
682 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
684 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
688 // LDC_OPTION: cop CRd Rn imm8
690 // STC_OPTION: cop CRd Rn imm8
693 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
694 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
696 assert(NumOps >= 4 && "Num of operands >= 4 for coprocessor instr");
698 unsigned &OpIdx = NumOpsAdded;
700 // if coproc == '101x' then SEE "Advanced SIMD and VFP"
701 // But since the special instructions have more explicit encoding bits
702 // specified, if coproc == 10 or 11, we should reject it as invalid.
703 unsigned coproc = GetCoprocessor(insn);
704 if ((Opcode == ARM::MCR || Opcode == ARM::MCRR ||
705 Opcode == ARM::MRC || Opcode == ARM::MRRC) &&
706 (coproc == 10 || coproc == 11)) {
707 DEBUG(errs() << "Encoding error: coproc == 10 or 11 for MCR[R]/MR[R]C\n");
711 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
712 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
714 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
715 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
716 bool LdStCop = LdStCopOpcode(Opcode);
717 bool RtOut = (Opcode == ARM::MRC || Opcode == ARM::MRC2);
722 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
726 MI.addOperand(MCOperand::CreateImm(coproc));
730 // Unindex if P:W = 0b00 --> _OPTION variant
731 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
733 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
735 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
740 MI.addOperand(MCOperand::CreateReg(0));
741 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
742 const TargetInstrDesc &TID = ARMInsts[Opcode];
744 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
745 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
746 ARM_AM::no_shift, IndexMode);
747 MI.addOperand(MCOperand::CreateImm(Offset));
750 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
754 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
755 : GetCopOpc1(insn, NoGPR)));
759 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
760 : MCOperand::CreateReg(
761 getRegisterEnum(B, ARM::GPRRegClassID,
766 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
767 getRegisterEnum(B, ARM::GPRRegClassID,
769 : MCOperand::CreateImm(decodeRn(insn)));
771 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
776 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
784 // Branch Instructions.
785 // BL: SignExtend(Imm24:'00', 32)
786 // Bcc, BL_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
787 // SMC: ZeroExtend(imm4, 32)
788 // SVC: ZeroExtend(Imm24, 32)
790 // Various coprocessor instructions are assigned BrFrm arbitrarily.
791 // Delegates to DisassembleCoprocessor() helper function.
794 // MSR/MSRsys: Rm mask=Inst{19-16}
796 // MSRi/MSRsysi: so_imm
797 // SRSW/SRS: ldstm_mode:$amode mode_imm
798 // RFEW/RFE: ldstm_mode:$amode Rn
799 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
800 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
802 if (CoprocessorOpcode(Opcode))
803 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded, B);
805 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
806 if (!OpInfo) return false;
808 // MRS and MRSsys take one GPR reg Rd.
809 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
810 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
811 "Reg operand expected");
812 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
817 // BXJ takes one GPR reg Rm.
818 if (Opcode == ARM::BXJ) {
819 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
820 "Reg operand expected");
821 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
826 // MSR take a mask, followed by one GPR reg Rm. The mask contains the R Bit in
827 // bit 4, and the special register fields in bits 3-0.
828 if (Opcode == ARM::MSR) {
829 assert(NumOps >= 1 && OpInfo[1].RegClass == ARM::GPRRegClassID &&
830 "Reg operand expected");
831 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
832 slice(insn, 19, 16) /* Special Reg */ ));
833 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
838 // MSRi take a mask, followed by one so_imm operand. The mask contains the
839 // R Bit in bit 4, and the special register fields in bits 3-0.
840 if (Opcode == ARM::MSRi) {
841 // A5.2.11 MSR (immediate), and hints & B6.1.6 MSR (immediate)
842 // The hints instructions have more specific encodings, so if mask == 0,
843 // we should reject this as an invalid instruction.
844 if (slice(insn, 19, 16) == 0)
846 MI.addOperand(MCOperand::CreateImm(slice(insn, 22, 22) << 4 /* R Bit */ |
847 slice(insn, 19, 16) /* Special Reg */ ));
848 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
849 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
850 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
851 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
852 unsigned Imm = insn & 0xFF;
853 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
857 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
858 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
859 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
860 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
862 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
863 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
865 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
871 assert((Opcode == ARM::Bcc || Opcode == ARM::BL || Opcode == ARM::BL_pred
872 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
873 "Unexpected Opcode");
875 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
878 if (Opcode == ARM::SMC) {
879 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
880 Imm32 = slice(insn, 3, 0);
881 } else if (Opcode == ARM::SVC) {
882 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
883 Imm32 = slice(insn, 23, 0);
885 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
886 unsigned Imm26 = slice(insn, 23, 0) << 2;
887 //Imm32 = signextend<signed int, 26>(Imm26);
888 Imm32 = SignExtend32<26>(Imm26);
891 MI.addOperand(MCOperand::CreateImm(Imm32));
897 // Misc. Branch Instructions.
900 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
901 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
903 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
904 if (!OpInfo) return false;
906 unsigned &OpIdx = NumOpsAdded;
910 // BX_RET and MOVPCLR have only two predicate operands; do an early return.
911 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR)
914 // BLX and BX take one GPR reg.
915 if (Opcode == ARM::BLX || Opcode == ARM::BLX_pred ||
917 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
918 "Reg operand expected");
919 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
925 // BLXi takes imm32 (the PC offset).
926 if (Opcode == ARM::BLXi) {
927 assert(NumOps >= 1 && OpInfo[0].RegClass < 0 && "Imm operand expected");
928 // SignExtend(imm24:H:'0', 32) where imm24 = Inst{23-0} and H = Inst{24}.
929 unsigned Imm26 = slice(insn, 23, 0) << 2 | slice(insn, 24, 24) << 1;
930 int Imm32 = SignExtend32<26>(Imm26);
931 MI.addOperand(MCOperand::CreateImm(Imm32));
939 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
940 uint32_t lsb = slice(insn, 11, 7);
941 uint32_t msb = slice(insn, 20, 16);
944 DEBUG(errs() << "Encoding error: msb < lsb\n");
948 for (uint32_t i = lsb; i <= msb; ++i)
954 // Standard data-processing instructions allow PC as a register specifier,
955 // but we should reject other DPFrm instructions with PC as registers.
956 static bool BadRegsDPFrm(unsigned Opcode, uint32_t insn) {
959 // Did we miss an opcode?
960 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 || decodeRm(insn) == 15) {
961 DEBUG(errs() << "DPFrm with bad reg specifier(s)\n");
964 case ARM::ADCrr: case ARM::ADDSrr: case ARM::ADDrr: case ARM::ANDrr:
965 case ARM::BICrr: case ARM::CMNzrr: case ARM::CMPrr: case ARM::EORrr:
966 case ARM::ORRrr: case ARM::RSBrr: case ARM::RSCrr: case ARM::SBCrr:
967 case ARM::SUBSrr: case ARM::SUBrr: case ARM::TEQrr: case ARM::TSTrr:
972 // A major complication is the fact that some of the saturating add/subtract
973 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
974 // They are QADD, QDADD, QDSUB, and QSUB.
975 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
976 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
978 const TargetInstrDesc &TID = ARMInsts[Opcode];
979 unsigned short NumDefs = TID.getNumDefs();
980 bool isUnary = isUnaryDP(TID.TSFlags);
981 const TargetOperandInfo *OpInfo = TID.OpInfo;
982 unsigned &OpIdx = NumOpsAdded;
986 // Disassemble register def if there is one.
987 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
988 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
993 // Now disassemble the src operands.
997 // Special-case handling of BFC/BFI/SBFX/UBFX.
998 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
999 // A8.6.17 BFC & A8.6.18 BFI
1001 if (decodeRd(insn) == 15)
1003 MI.addOperand(MCOperand::CreateReg(0));
1004 if (Opcode == ARM::BFI) {
1005 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1010 if (!getBFCInvMask(insn, mask))
1013 MI.addOperand(MCOperand::CreateImm(mask));
1017 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
1018 // Sanity check Rd and Rm.
1019 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1021 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1023 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
1024 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
1029 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
1030 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
1032 // BinaryDP has an Rn operand.
1034 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1035 "Reg operand expected");
1036 MI.addOperand(MCOperand::CreateReg(
1037 getRegisterEnum(B, ARM::GPRRegClassID,
1038 RmRn ? decodeRm(insn) : decodeRn(insn))));
1042 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1043 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1044 MI.addOperand(MCOperand::CreateReg(0));
1048 // Now disassemble operand 2.
1049 if (OpIdx >= NumOps)
1052 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1053 // We have a reg/reg form.
1054 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1055 // routed here as well.
1056 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1057 if (BadRegsDPFrm(Opcode, insn))
1059 MI.addOperand(MCOperand::CreateReg(
1060 getRegisterEnum(B, ARM::GPRRegClassID,
1061 RmRn? decodeRn(insn) : decodeRm(insn))));
1063 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1064 // These two instructions don't allow d as 15.
1065 if (decodeRd(insn) == 15)
1067 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1068 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1069 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1070 if (!B->tryAddingSymbolicOperand(Imm16, 4, MI))
1071 MI.addOperand(MCOperand::CreateImm(Imm16));
1074 // We have a reg/imm form.
1075 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1076 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1077 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1078 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1079 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1080 unsigned Imm = insn & 0xFF;
1081 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1088 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1089 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1091 const TargetInstrDesc &TID = ARMInsts[Opcode];
1092 unsigned short NumDefs = TID.getNumDefs();
1093 bool isUnary = isUnaryDP(TID.TSFlags);
1094 const TargetOperandInfo *OpInfo = TID.OpInfo;
1095 unsigned &OpIdx = NumOpsAdded;
1099 // Disassemble register def if there is one.
1100 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1101 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1106 // Disassemble the src operands.
1107 if (OpIdx >= NumOps)
1110 // BinaryDP has an Rn operand.
1112 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1113 "Reg operand expected");
1114 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1119 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1120 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1121 MI.addOperand(MCOperand::CreateReg(0));
1125 // Disassemble operand 2, which consists of three components.
1126 if (OpIdx + 2 >= NumOps)
1129 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1130 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1131 (OpInfo[OpIdx+2].RegClass < 0) &&
1132 "Expect 3 reg operands");
1134 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1135 unsigned Rs = slice(insn, 4, 4);
1137 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1140 // If Inst{7} != 0, we should reject this insn as an invalid encoding.
1141 if (slice(insn, 7, 7))
1144 // A8.6.3 ADC (register-shifted register)
1145 // if d == 15 || n == 15 || m == 15 || s == 15 then UNPREDICTABLE;
1147 // This also accounts for shift instructions (register) where, fortunately,
1148 // Inst{19-16} = 0b0000.
1149 // A8.6.89 LSL (register)
1150 // if d == 15 || n == 15 || m == 15 then UNPREDICTABLE;
1151 if (decodeRd(insn) == 15 || decodeRn(insn) == 15 ||
1152 decodeRm(insn) == 15 || decodeRs(insn) == 15)
1155 // Register-controlled shifts: [Rm, Rs, shift].
1156 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1158 // Inst{6-5} encodes the shift opcode.
1159 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1160 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1162 // Constant shifts: [Rm, reg0, shift_imm].
1163 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1164 // Inst{6-5} encodes the shift opcode.
1165 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1166 // Inst{11-7} encodes the imm5 shift amount.
1167 unsigned ShImm = slice(insn, 11, 7);
1169 // A8.4.1. Possible rrx or shift amount of 32...
1170 getImmShiftSE(ShOp, ShImm);
1171 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1178 static bool BadRegsLdStFrm(unsigned Opcode, uint32_t insn, bool Store, bool WBack,
1180 const StringRef Name = ARMInsts[Opcode].Name;
1181 unsigned Rt = decodeRd(insn);
1182 unsigned Rn = decodeRn(insn);
1183 unsigned Rm = decodeRm(insn);
1184 unsigned P = getPBit(insn);
1185 unsigned W = getWBit(insn);
1188 // Only STR (immediate, register) allows PC as the source.
1189 if (Name.startswith("STRB") && Rt == 15) {
1190 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1193 if (WBack && (Rn == 15 || Rn == Rt)) {
1194 DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
1197 if (!Imm && Rm == 15) {
1198 DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
1202 // Only LDR (immediate, register) allows PC as the destination.
1203 if (Name.startswith("LDRB") && Rt == 15) {
1204 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1210 // The literal form must be in offset mode; it's an encoding error
1212 if (!(P == 1 && W == 0)) {
1213 DEBUG(errs() << "Ld literal form with !(P == 1 && W == 0)\n");
1216 // LDRB (literal) does not allow PC as the destination.
1217 if (Opcode != ARM::LDRi12 && Rt == 15) {
1218 DEBUG(errs() << "if t == 15 then UNPREDICTABLE\n");
1222 // Write back while Rn == Rt does not make sense.
1223 if (WBack && (Rn == Rt)) {
1224 DEBUG(errs() << "if wback && n == t then UNPREDICTABLE\n");
1231 DEBUG(errs() << "if m == 15 then UNPREDICTABLE\n");
1234 if (WBack && (Rn == 15 || Rn == Rt)) {
1235 DEBUG(errs() << "if wback && (n == 15 || n == t) then UNPREDICTABLE\n");
1243 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1244 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1246 const TargetInstrDesc &TID = ARMInsts[Opcode];
1247 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1248 const TargetOperandInfo *OpInfo = TID.OpInfo;
1249 if (!OpInfo) return false;
1251 unsigned &OpIdx = NumOpsAdded;
1255 assert(((!isStore && TID.getNumDefs() > 0) ||
1256 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1257 && "Invalid arguments");
1259 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1260 if (isPrePost && isStore) {
1261 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1262 "Reg operand expected");
1263 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1268 // Disassemble the dst/src operand.
1269 if (OpIdx >= NumOps)
1272 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1273 "Reg operand expected");
1274 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1278 // After dst of a pre- and post-indexed load is the address base writeback.
1279 if (isPrePost && !isStore) {
1280 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1281 "Reg operand expected");
1282 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1287 // Disassemble the base operand.
1288 if (OpIdx >= NumOps)
1291 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1292 "Reg operand expected");
1293 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1294 && "Index mode or tied_to operand expected");
1295 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1299 // For reg/reg form, base reg is followed by +/- reg shop imm.
1300 // For immediate form, it is followed by +/- imm12.
1301 // See also ARMAddressingModes.h (Addressing Mode #2).
1302 if (OpIdx + 1 >= NumOps)
1305 if (BadRegsLdStFrm(Opcode, insn, isStore, isPrePost, getIBit(insn)==0))
1308 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1309 unsigned IndexMode =
1310 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1311 if (getIBit(insn) == 0) {
1312 // For pre- and post-indexed case, add a reg0 operand (Addressing Mode #2).
1313 // Otherwise, skip the reg operand since for addrmode_imm12, Rn has already
1316 MI.addOperand(MCOperand::CreateReg(0));
1320 unsigned Imm12 = slice(insn, 11, 0);
1321 if (Opcode == ARM::LDRBi12 || Opcode == ARM::LDRi12 ||
1322 Opcode == ARM::STRBi12 || Opcode == ARM::STRi12) {
1323 // Disassemble the 12-bit immediate offset, which is the second operand in
1324 // $addrmode_imm12 => (ops GPR:$base, i32imm:$offsimm).
1325 int Offset = AddrOpcode == ARM_AM::add ? 1 * Imm12 : -1 * Imm12;
1326 MI.addOperand(MCOperand::CreateImm(Offset));
1328 // Disassemble the 12-bit immediate offset, which is the second operand in
1329 // $am2offset => (ops GPR, i32imm).
1330 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift,
1332 MI.addOperand(MCOperand::CreateImm(Offset));
1336 // If Inst{25} = 1 and Inst{4} != 0, we should reject this as invalid.
1337 if (slice(insn,4,4) == 1)
1340 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1341 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1343 // Inst{6-5} encodes the shift opcode.
1344 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1345 // Inst{11-7} encodes the imm5 shift amount.
1346 unsigned ShImm = slice(insn, 11, 7);
1348 // A8.4.1. Possible rrx or shift amount of 32...
1349 getImmShiftSE(ShOp, ShImm);
1350 MI.addOperand(MCOperand::CreateImm(
1351 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp, IndexMode)));
1358 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1359 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1360 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false, B);
1363 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1364 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1365 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1368 static bool HasDualReg(unsigned Opcode) {
1372 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1373 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1378 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1379 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore, BO B) {
1381 const TargetInstrDesc &TID = ARMInsts[Opcode];
1382 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1383 const TargetOperandInfo *OpInfo = TID.OpInfo;
1384 if (!OpInfo) return false;
1386 unsigned &OpIdx = NumOpsAdded;
1390 assert(((!isStore && TID.getNumDefs() > 0) ||
1391 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1392 && "Invalid arguments");
1394 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1395 if (isPrePost && isStore) {
1396 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1397 "Reg operand expected");
1398 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1403 // Disassemble the dst/src operand.
1404 if (OpIdx >= NumOps)
1407 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1408 "Reg operand expected");
1409 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1413 // Fill in LDRD and STRD's second operand Rt operand.
1414 if (HasDualReg(Opcode)) {
1415 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1416 decodeRd(insn) + 1)));
1420 // After dst of a pre- and post-indexed load is the address base writeback.
1421 if (isPrePost && !isStore) {
1422 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1423 "Reg operand expected");
1424 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1429 // Disassemble the base operand.
1430 if (OpIdx >= NumOps)
1433 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1434 "Reg operand expected");
1435 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1436 && "Offset mode or tied_to operand expected");
1437 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1441 // For reg/reg form, base reg is followed by +/- reg.
1442 // For immediate form, it is followed by +/- imm8.
1443 // See also ARMAddressingModes.h (Addressing Mode #3).
1444 if (OpIdx + 1 >= NumOps)
1447 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1448 (OpInfo[OpIdx+1].RegClass < 0) &&
1449 "Expect 1 reg operand followed by 1 imm operand");
1451 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1452 unsigned IndexMode =
1453 (TID.TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift;
1454 if (getAM3IBit(insn) == 1) {
1455 MI.addOperand(MCOperand::CreateReg(0));
1457 // Disassemble the 8-bit immediate offset.
1458 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1459 unsigned Imm4L = insn & 0xF;
1460 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L,
1462 MI.addOperand(MCOperand::CreateImm(Offset));
1464 // Disassemble the offset reg (Rm).
1465 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1467 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0, IndexMode);
1468 MI.addOperand(MCOperand::CreateImm(Offset));
1475 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1476 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1477 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false,
1481 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1482 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1483 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true, B);
1486 // The algorithm for disassembly of LdStMulFrm is different from others because
1487 // it explicitly populates the two predicate operands after the base register.
1488 // After that, we need to populate the reglist with each affected register
1489 // encoded as an MCOperand.
1490 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1491 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1493 assert(NumOps >= 4 && "LdStMulFrm expects NumOps >= 4");
1496 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
1498 // Writeback to base, if necessary.
1499 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::STMIA_UPD ||
1500 Opcode == ARM::LDMDA_UPD || Opcode == ARM::STMDA_UPD ||
1501 Opcode == ARM::LDMDB_UPD || Opcode == ARM::STMDB_UPD ||
1502 Opcode == ARM::LDMIB_UPD || Opcode == ARM::STMIB_UPD) {
1503 MI.addOperand(MCOperand::CreateReg(Base));
1507 // Add the base register operand.
1508 MI.addOperand(MCOperand::CreateReg(Base));
1510 // Handling the two predicate operands before the reglist.
1511 int64_t CondVal = getCondField(insn);
1514 MI.addOperand(MCOperand::CreateImm(CondVal));
1515 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1519 // Fill the variadic part of reglist.
1520 unsigned RegListBits = insn & ((1 << 16) - 1);
1521 for (unsigned i = 0; i < 16; ++i) {
1522 if ((RegListBits >> i) & 1) {
1523 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1532 // LDREX, LDREXB, LDREXH: Rd Rn
1533 // LDREXD: Rd Rd+1 Rn
1534 // STREX, STREXB, STREXH: Rd Rm Rn
1535 // STREXD: Rd Rm Rm+1 Rn
1537 // SWP, SWPB: Rd Rm Rn
1538 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1539 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1541 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1542 if (!OpInfo) return false;
1544 unsigned &OpIdx = NumOpsAdded;
1549 && OpInfo[0].RegClass == ARM::GPRRegClassID
1550 && OpInfo[1].RegClass == ARM::GPRRegClassID
1551 && "Expect 2 reg operands");
1553 bool isStore = slice(insn, 20, 20) == 0;
1554 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1556 // Add the destination operand.
1557 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1561 // Store register Exclusive needs a source operand.
1563 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1568 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1569 decodeRm(insn)+1)));
1573 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1574 decodeRd(insn)+1)));
1578 // Finally add the pointer operand.
1579 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1586 // Misc. Arithmetic Instructions.
1588 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1589 // RBIT, REV, REV16, REVSH: Rd Rm
1590 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1591 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1593 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1594 unsigned &OpIdx = NumOpsAdded;
1599 && OpInfo[0].RegClass == ARM::GPRRegClassID
1600 && OpInfo[1].RegClass == ARM::GPRRegClassID
1601 && "Expect 2 reg operands");
1603 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1605 // Sanity check the registers, which should not be 15.
1606 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1608 if (ThreeReg && decodeRn(insn) == 15)
1611 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1616 assert(NumOps >= 4 && "Expect >= 4 operands");
1617 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1622 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1626 // If there is still an operand info left which is an immediate operand, add
1627 // an additional imm5 LSL/ASR operand.
1628 if (ThreeReg && OpInfo[OpIdx].RegClass < 0
1629 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1630 // Extract the 5-bit immediate field Inst{11-7}.
1631 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1632 ARM_AM::ShiftOpc Opc = ARM_AM::no_shift;
1633 if (Opcode == ARM::PKHBT)
1635 else if (Opcode == ARM::PKHTB)
1637 getImmShiftSE(Opc, ShiftAmt);
1638 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShiftAmt)));
1645 /// DisassembleSatFrm - Disassemble saturate instructions:
1646 /// SSAT, SSAT16, USAT, and USAT16.
1647 static bool DisassembleSatFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1648 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1651 // if d == 15 || n == 15 then UNPREDICTABLE;
1652 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1655 const TargetInstrDesc &TID = ARMInsts[Opcode];
1656 NumOpsAdded = TID.getNumOperands() - 2; // ignore predicate operands
1658 // Disassemble register def.
1659 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1662 unsigned Pos = slice(insn, 20, 16);
1663 if (Opcode == ARM::SSAT || Opcode == ARM::SSAT16)
1665 MI.addOperand(MCOperand::CreateImm(Pos));
1667 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1670 if (NumOpsAdded == 4) {
1671 ARM_AM::ShiftOpc Opc = (slice(insn, 6, 6) != 0 ? ARM_AM::asr : ARM_AM::lsl);
1672 // Inst{11-7} encodes the imm5 shift amount.
1673 unsigned ShAmt = slice(insn, 11, 7);
1675 // A8.6.183. Possible ASR shift amount of 32...
1676 if (Opc == ARM_AM::asr)
1679 Opc = ARM_AM::no_shift;
1681 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(Opc, ShAmt)));
1686 // Extend instructions.
1687 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1688 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1689 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1690 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1691 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1694 // if d == 15 || m == 15 then UNPREDICTABLE;
1695 if (decodeRd(insn) == 15 || decodeRm(insn) == 15)
1698 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1699 unsigned &OpIdx = NumOpsAdded;
1704 && OpInfo[0].RegClass == ARM::GPRRegClassID
1705 && OpInfo[1].RegClass == ARM::GPRRegClassID
1706 && "Expect 2 reg operands");
1708 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1710 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1715 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1720 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1724 // If there is still an operand info left which is an immediate operand, add
1725 // an additional rotate immediate operand.
1726 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
1727 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1728 // Extract the 2-bit rotate field Inst{11-10}.
1729 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1730 // Rotation by 8, 16, or 24 bits.
1731 MI.addOperand(MCOperand::CreateImm(rot << 3));
1738 /////////////////////////////////////
1740 // Utility Functions For VFP //
1742 /////////////////////////////////////
1744 // Extract/Decode Dd/Sd:
1746 // SP => d = UInt(Vd:D)
1747 // DP => d = UInt(D:Vd)
1748 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1749 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1750 : (decodeRd(insn) | getDBit(insn) << 4);
1753 // Extract/Decode Dn/Sn:
1755 // SP => n = UInt(Vn:N)
1756 // DP => n = UInt(N:Vn)
1757 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1758 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1759 : (decodeRn(insn) | getNBit(insn) << 4);
1762 // Extract/Decode Dm/Sm:
1764 // SP => m = UInt(Vm:M)
1765 // DP => m = UInt(M:Vm)
1766 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1767 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1768 : (decodeRm(insn) | getMBit(insn) << 4);
1772 static APInt VFPExpandImm(unsigned char byte, unsigned N) {
1773 assert(N == 32 || N == 64);
1776 unsigned bit6 = slice(byte, 6, 6);
1778 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1780 Result |= 0x1f << 25;
1782 Result |= 0x1 << 30;
1784 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1785 (uint64_t)slice(byte, 5, 0) << 48;
1787 Result |= 0xffULL << 54;
1789 Result |= 0x1ULL << 62;
1791 return APInt(N, Result);
1794 // VFP Unary Format Instructions:
1796 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1797 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1798 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1799 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1800 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1802 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1804 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1805 unsigned &OpIdx = NumOpsAdded;
1809 unsigned RegClass = OpInfo[OpIdx].RegClass;
1810 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1811 "Reg operand expected");
1812 bool isSP = (RegClass == ARM::SPRRegClassID);
1814 MI.addOperand(MCOperand::CreateReg(
1815 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1818 // Early return for compare with zero instructions.
1819 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1820 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1823 RegClass = OpInfo[OpIdx].RegClass;
1824 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1825 "Reg operand expected");
1826 isSP = (RegClass == ARM::SPRRegClassID);
1828 MI.addOperand(MCOperand::CreateReg(
1829 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1835 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1836 // Some of them have operand constraints which tie the first operand in the
1837 // InOperandList to that of the dst. As far as asm printing is concerned, this
1838 // tied_to operand is simply skipped.
1839 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1840 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1842 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1844 const TargetInstrDesc &TID = ARMInsts[Opcode];
1845 const TargetOperandInfo *OpInfo = TID.OpInfo;
1846 unsigned &OpIdx = NumOpsAdded;
1850 unsigned RegClass = OpInfo[OpIdx].RegClass;
1851 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1852 "Reg operand expected");
1853 bool isSP = (RegClass == ARM::SPRRegClassID);
1855 MI.addOperand(MCOperand::CreateReg(
1856 getRegisterEnum(B, RegClass, decodeVFPRd(insn, isSP))));
1859 // Skip tied_to operand constraint.
1860 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1861 assert(NumOps >= 4 && "Expect >=4 operands");
1862 MI.addOperand(MCOperand::CreateReg(0));
1866 MI.addOperand(MCOperand::CreateReg(
1867 getRegisterEnum(B, RegClass, decodeVFPRn(insn, isSP))));
1870 MI.addOperand(MCOperand::CreateReg(
1871 getRegisterEnum(B, RegClass, decodeVFPRm(insn, isSP))));
1877 // A8.6.295 vcvt (floating-point <-> integer)
1878 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1879 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1881 // A8.6.297 vcvt (floating-point and fixed-point)
1882 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1883 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1884 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1886 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1888 const TargetInstrDesc &TID = ARMInsts[Opcode];
1889 const TargetOperandInfo *OpInfo = TID.OpInfo;
1890 if (!OpInfo) return false;
1892 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1893 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1894 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1898 assert(NumOps >= 3 && "Expect >= 3 operands");
1899 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1900 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1901 MI.addOperand(MCOperand::CreateReg(
1902 getRegisterEnum(B, RegClassID,
1903 decodeVFPRd(insn, SP))));
1905 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1906 "Tied to operand expected");
1907 MI.addOperand(MI.getOperand(0));
1909 assert(OpInfo[2].RegClass < 0 && !OpInfo[2].isPredicate() &&
1910 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1911 MI.addOperand(MCOperand::CreateImm(fbits));
1916 // The Rd (destination) and Rm (source) bits have different interpretations
1917 // depending on their single-precisonness.
1919 if (slice(insn, 18, 18) == 1) { // to_integer operation
1920 d = decodeVFPRd(insn, true /* Is Single Precision */);
1921 MI.addOperand(MCOperand::CreateReg(
1922 getRegisterEnum(B, ARM::SPRRegClassID, d)));
1923 m = decodeVFPRm(insn, SP);
1924 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, m)));
1926 d = decodeVFPRd(insn, SP);
1927 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, d)));
1928 m = decodeVFPRm(insn, true /* Is Single Precision */);
1929 MI.addOperand(MCOperand::CreateReg(
1930 getRegisterEnum(B, ARM::SPRRegClassID, m)));
1938 // VMOVRS - A8.6.330
1939 // Rt => Rd; Sn => UInt(Vn:N)
1940 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1941 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1943 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1945 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1947 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1948 decodeVFPRn(insn, true))));
1953 // VMOVRRD - A8.6.332
1954 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1956 // VMOVRRS - A8.6.331
1957 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1958 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1959 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1961 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1963 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1964 unsigned &OpIdx = NumOpsAdded;
1966 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1968 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
1972 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1973 unsigned Sm = decodeVFPRm(insn, true);
1974 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1976 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1980 MI.addOperand(MCOperand::CreateReg(
1981 getRegisterEnum(B, ARM::DPRRegClassID,
1982 decodeVFPRm(insn, false))));
1988 // VMOVSR - A8.6.330
1989 // Rt => Rd; Sn => UInt(Vn:N)
1990 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1991 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
1993 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1995 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
1996 decodeVFPRn(insn, true))));
1997 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2003 // VMOVDRR - A8.6.332
2004 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
2006 // VMOVRRS - A8.6.331
2007 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
2008 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
2009 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2011 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
2013 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2014 unsigned &OpIdx = NumOpsAdded;
2018 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
2019 unsigned Sm = decodeVFPRm(insn, true);
2020 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2022 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::SPRRegClassID,
2026 MI.addOperand(MCOperand::CreateReg(
2027 getRegisterEnum(B, ARM::DPRRegClassID,
2028 decodeVFPRm(insn, false))));
2032 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2034 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2040 // VFP Load/Store Instructions.
2041 // VLDRD, VLDRS, VSTRD, VSTRS
2042 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2043 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2045 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
2047 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS);
2048 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2050 // Extract Dd/Sd for operand 0.
2051 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2053 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID, RegD)));
2055 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2056 MI.addOperand(MCOperand::CreateReg(Base));
2058 // Next comes the AM5 Opcode.
2059 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2060 unsigned char Imm8 = insn & 0xFF;
2061 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
2068 // VFP Load/Store Multiple Instructions.
2069 // We have an optional write back reg, the base, and two predicate operands.
2070 // It is then followed by a reglist of either DPR(s) or SPR(s).
2072 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
2073 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2074 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2076 assert(NumOps >= 4 && "VFPLdStMulFrm expects NumOps >= 4");
2078 unsigned &OpIdx = NumOpsAdded;
2082 unsigned Base = getRegisterEnum(B, ARM::GPRRegClassID, decodeRn(insn));
2084 // Writeback to base, if necessary.
2085 if (Opcode == ARM::VLDMDIA_UPD || Opcode == ARM::VLDMSIA_UPD ||
2086 Opcode == ARM::VLDMDDB_UPD || Opcode == ARM::VLDMSDB_UPD ||
2087 Opcode == ARM::VSTMDIA_UPD || Opcode == ARM::VSTMSIA_UPD ||
2088 Opcode == ARM::VSTMDDB_UPD || Opcode == ARM::VSTMSDB_UPD) {
2089 MI.addOperand(MCOperand::CreateReg(Base));
2093 MI.addOperand(MCOperand::CreateReg(Base));
2095 // Handling the two predicate operands before the reglist.
2096 int64_t CondVal = getCondField(insn);
2099 MI.addOperand(MCOperand::CreateImm(CondVal));
2100 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
2104 bool isSPVFP = (Opcode == ARM::VLDMSIA ||
2105 Opcode == ARM::VLDMSIA_UPD || Opcode == ARM::VLDMSDB_UPD ||
2106 Opcode == ARM::VSTMSIA ||
2107 Opcode == ARM::VSTMSIA_UPD || Opcode == ARM::VSTMSDB_UPD);
2108 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
2111 unsigned RegD = decodeVFPRd(insn, isSPVFP);
2113 // Fill the variadic part of reglist.
2114 unsigned char Imm8 = insn & 0xFF;
2115 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
2117 // Apply some sanity checks before proceeding.
2118 if (Regs == 0 || (RegD + Regs) > 32 || (!isSPVFP && Regs > 16))
2121 for (unsigned i = 0; i < Regs; ++i) {
2122 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClassID,
2130 // Misc. VFP Instructions.
2131 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
2132 // FCONSTD (DPR and a VFPf64Imm operand)
2133 // FCONSTS (SPR and a VFPf32Imm operand)
2134 // VMRS/VMSR (GPR operand)
2135 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2136 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2138 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2139 unsigned &OpIdx = NumOpsAdded;
2143 if (Opcode == ARM::FMSTAT)
2146 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
2148 unsigned RegEnum = 0;
2149 switch (OpInfo[0].RegClass) {
2150 case ARM::DPRRegClassID:
2151 RegEnum = getRegisterEnum(B, ARM::DPRRegClassID, decodeVFPRd(insn, false));
2153 case ARM::SPRRegClassID:
2154 RegEnum = getRegisterEnum(B, ARM::SPRRegClassID, decodeVFPRd(insn, true));
2156 case ARM::GPRRegClassID:
2157 RegEnum = getRegisterEnum(B, ARM::GPRRegClassID, decodeRd(insn));
2160 assert(0 && "Invalid reg class id");
2164 MI.addOperand(MCOperand::CreateReg(RegEnum));
2167 // Extract/decode the f64/f32 immediate.
2168 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2169 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2170 // The asm syntax specifies the floating point value, not the 8-bit literal.
2171 APInt immRaw = VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
2172 Opcode == ARM::FCONSTD ? 64 : 32);
2173 APFloat immFP = APFloat(immRaw, true);
2174 double imm = Opcode == ARM::FCONSTD ? immFP.convertToDouble() :
2175 immFP.convertToFloat();
2176 MI.addOperand(MCOperand::CreateFPImm(imm));
2184 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
2185 #include "ThumbDisassemblerCore.h"
2187 /////////////////////////////////////////////////////
2189 // Utility Functions For ARM Advanced SIMD //
2191 /////////////////////////////////////////////////////
2193 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
2194 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
2196 // A7.3 Register encoding
2198 // Extract/Decode NEON D/Vd:
2200 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2201 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2202 // handling it in the getRegisterEnum() utility function.
2203 // D = Inst{22}, Vd = Inst{15-12}
2204 static unsigned decodeNEONRd(uint32_t insn) {
2205 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2206 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2209 // Extract/Decode NEON N/Vn:
2211 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2212 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2213 // handling it in the getRegisterEnum() utility function.
2214 // N = Inst{7}, Vn = Inst{19-16}
2215 static unsigned decodeNEONRn(uint32_t insn) {
2216 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2217 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2220 // Extract/Decode NEON M/Vm:
2222 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2223 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2224 // handling it in the getRegisterEnum() utility function.
2225 // M = Inst{5}, Vm = Inst{3-0}
2226 static unsigned decodeNEONRm(uint32_t insn) {
2227 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2228 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2239 } // End of unnamed namespace
2241 // size field -> Inst{11-10}
2242 // index_align field -> Inst{7-4}
2244 // The Lane Index interpretation depends on the Data Size:
2245 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2246 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2247 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2249 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2250 static unsigned decodeLaneIndex(uint32_t insn) {
2251 unsigned size = insn >> 10 & 3;
2252 assert((size == 0 || size == 1 || size == 2) &&
2253 "Encoding error: size should be either 0, 1, or 2");
2255 unsigned index_align = insn >> 4 & 0xF;
2256 return (index_align >> 1) >> size;
2259 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2260 // op = Inst{5}, cmode = Inst{11-8}
2261 // i = Inst{24} (ARM architecture)
2262 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2263 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2264 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2265 unsigned char op = (insn >> 5) & 1;
2266 unsigned char cmode = (insn >> 8) & 0xF;
2267 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2268 ((insn >> 16) & 7) << 4 |
2270 return (op << 12) | (cmode << 8) | Imm8;
2273 // A8.6.339 VMUL, VMULL (by scalar)
2274 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2275 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2276 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2283 assert(0 && "Unreachable code!");
2288 // A8.6.339 VMUL, VMULL (by scalar)
2289 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2290 // ESize32 => index = Inst{5} (M) D0-D15
2291 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2294 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2296 return (insn >> 5) & 1;
2298 assert(0 && "Unreachable code!");
2303 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2304 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2305 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2306 return 64 - ((insn >> 16) & 0x3F);
2309 // A8.6.302 VDUP (scalar)
2310 // ESize8 => index = Inst{19-17}
2311 // ESize16 => index = Inst{19-18}
2312 // ESize32 => index = Inst{19}
2313 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2316 return (insn >> 17) & 7;
2318 return (insn >> 18) & 3;
2320 return (insn >> 19) & 1;
2322 assert(0 && "Unspecified element size!");
2327 // A8.6.328 VMOV (ARM core register to scalar)
2328 // A8.6.329 VMOV (scalar to ARM core register)
2329 // ESize8 => index = Inst{21:6-5}
2330 // ESize16 => index = Inst{21:6}
2331 // ESize32 => index = Inst{21}
2332 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2335 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2337 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2339 return ((insn >> 21) & 1);
2341 assert(0 && "Unspecified element size!");
2346 // Imm6 = Inst{21-16}, L = Inst{7}
2348 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2350 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2351 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2352 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2353 // '1xxxxxx' => esize = 64; shift_amount = imm6
2355 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2357 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2358 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2359 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2360 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2362 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2363 ElemSize esize = ESizeNA;
2364 unsigned L = (insn >> 7) & 1;
2365 unsigned imm6 = (insn >> 16) & 0x3F;
2369 else if (imm6 >> 4 == 1)
2371 else if (imm6 >> 5 == 1)
2374 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2379 return esize == ESize64 ? imm6 : (imm6 - esize);
2381 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2385 // Imm4 = Inst{11-8}
2386 static unsigned decodeN3VImm(uint32_t insn) {
2387 return (insn >> 8) & 0xF;
2391 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2393 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2395 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2397 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2399 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2400 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2401 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced,
2402 unsigned alignment, BO B) {
2404 const TargetInstrDesc &TID = ARMInsts[Opcode];
2405 const TargetOperandInfo *OpInfo = TID.OpInfo;
2407 // At least one DPR register plus addressing mode #6.
2408 assert(NumOps >= 3 && "Expect >= 3 operands");
2410 unsigned &OpIdx = NumOpsAdded;
2414 // We have homogeneous NEON registers for Load/Store.
2415 unsigned RegClass = 0;
2417 // Double-spaced registers have increments of 2.
2418 unsigned Inc = DblSpaced ? 2 : 1;
2420 unsigned Rn = decodeRn(insn);
2421 unsigned Rm = decodeRm(insn);
2422 unsigned Rd = decodeNEONRd(insn);
2424 // A7.7.1 Advanced SIMD addressing mode.
2427 // LLVM Addressing Mode #6.
2428 unsigned RmEnum = 0;
2430 RmEnum = getRegisterEnum(B, ARM::GPRRegClassID, Rm);
2433 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2434 // then possible lane index.
2435 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2436 "Reg operand expected");
2439 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2444 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2445 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2446 // addrmode6 := (ops GPR:$addr, i32imm)
2447 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2449 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2453 MI.addOperand(MCOperand::CreateReg(RmEnum));
2457 assert(OpIdx < NumOps &&
2458 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2459 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2460 "Reg operand expected");
2462 RegClass = OpInfo[OpIdx].RegClass;
2463 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2464 MI.addOperand(MCOperand::CreateReg(
2465 getRegisterEnum(B, RegClass, Rd)));
2470 // Handle possible lane index.
2471 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2472 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2473 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2478 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2479 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2480 RegClass = OpInfo[0].RegClass;
2482 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2483 MI.addOperand(MCOperand::CreateReg(
2484 getRegisterEnum(B, RegClass, Rd)));
2490 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2495 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2496 OpInfo[OpIdx + 1].RegClass < 0 && "Addrmode #6 Operands expected");
2497 // addrmode6 := (ops GPR:$addr, i32imm)
2498 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
2500 MI.addOperand(MCOperand::CreateImm(alignment)); // Alignment
2504 MI.addOperand(MCOperand::CreateReg(RmEnum));
2508 while (OpIdx < NumOps && (unsigned)OpInfo[OpIdx].RegClass == RegClass) {
2509 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2510 "Tied to operand expected");
2511 MI.addOperand(MCOperand::CreateReg(0));
2515 // Handle possible lane index.
2516 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2517 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2518 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2523 // Accessing registers past the end of the NEON register file is not
2531 // A8.6.308, A8.6.311, A8.6.314, A8.6.317.
2532 static bool Align4OneLaneInst(unsigned elem, unsigned size,
2533 unsigned index_align, unsigned & alignment) {
2541 return slice(index_align, 0, 0) == 0;
2542 else if (size == 1) {
2543 bits = slice(index_align, 1, 0);
2544 if (bits != 0 && bits != 1)
2549 } else if (size == 2) {
2550 bits = slice(index_align, 2, 0);
2551 if (bits != 0 && bits != 3)
2561 if (slice(index_align, 0, 0) == 1)
2565 if (slice(index_align, 0, 0) == 1)
2568 } else if (size == 2) {
2569 if (slice(index_align, 1, 1) != 0)
2571 if (slice(index_align, 0, 0) == 1)
2579 if (slice(index_align, 0, 0) != 0)
2583 if (slice(index_align, 0, 0) != 0)
2587 } else if (size == 2) {
2588 if (slice(index_align, 1, 0) != 0)
2596 if (slice(index_align, 0, 0) == 1)
2600 if (slice(index_align, 0, 0) == 1)
2603 } else if (size == 2) {
2604 bits = slice(index_align, 1, 0);
2618 // If L (Inst{21}) == 0, store instructions.
2619 // Find out about double-spaced-ness of the Opcode and pass it on to
2620 // DisassembleNLdSt0().
2621 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2622 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2624 const StringRef Name = ARMInsts[Opcode].Name;
2625 bool DblSpaced = false;
2626 // 0 represents standard alignment, i.e., unaligned data access.
2627 unsigned alignment = 0;
2629 unsigned elem = 0; // legal values: {1, 2, 3, 4}
2630 if (Name.startswith("VST1") || Name.startswith("VLD1"))
2633 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2636 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2639 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2642 if (Name.find("LN") != std::string::npos) {
2643 // To one lane instructions.
2644 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2646 // Utility function takes number of elements, size, and index_align.
2647 if (!Align4OneLaneInst(elem,
2648 slice(insn, 11, 10),
2653 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2654 if (Name.endswith("16") || Name.endswith("16_UPD"))
2655 DblSpaced = slice(insn, 5, 5) == 1;
2657 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2658 if (Name.endswith("32") || Name.endswith("32_UPD"))
2659 DblSpaced = slice(insn, 6, 6) == 1;
2660 } else if (Name.find("DUP") != std::string::npos) {
2661 // Single element (or structure) to all lanes.
2662 // Inst{9-8} encodes the number of element(s) in the structure, with:
2663 // 0b00 (VLD1DUP) (for this, a bit makes sense only for data size 16 and 32.
2665 // 0b10 (VLD3DUP) (for this, a bit must be encoded as 0)
2668 // Inst{7-6} encodes the data size, with:
2669 // 0b00 => 8, 0b01 => 16, 0b10 => 32
2671 // Inst{4} (the a bit) encodes the align action (0: standard alignment)
2672 unsigned elem = slice(insn, 9, 8) + 1;
2673 unsigned a = slice(insn, 4, 4);
2675 // 0b11 is not a valid encoding for Inst{7-6}.
2676 if (slice(insn, 7, 6) == 3)
2678 unsigned data_size = 8 << slice(insn, 7, 6);
2679 // For VLD1DUP, a bit makes sense only for data size of 16 and 32.
2680 if (a && data_size == 8)
2683 // Now we can calculate the alignment!
2685 alignment = elem * data_size;
2688 // A8.6.315 VLD3 (single 3-element structure to all lanes)
2689 // The a bit must be encoded as 0.
2694 // Multiple n-element structures with type encoded as Inst{11-8}.
2695 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2697 // Inst{5-4} encodes alignment.
2698 unsigned align = slice(insn, 5, 4);
2703 alignment = 64; break;
2705 alignment = 128; break;
2707 alignment = 256; break;
2710 unsigned type = slice(insn, 11, 8);
2711 // Reject UNDEFINED instructions based on type and align.
2712 // Plus set DblSpaced flag where appropriate.
2718 // A8.6.307 & A8.6.391
2719 if ((type == 7 && slice(align, 1, 1) == 1) ||
2720 (type == 10 && align == 3) ||
2721 (type == 6 && slice(align, 1, 1) == 1))
2725 // n == 2 && type == 0b1001 -> DblSpaced = true
2726 // A8.6.310 & A8.6.393
2727 if ((type == 8 || type == 9) && align == 3)
2729 DblSpaced = (type == 9);
2732 // n == 3 && type == 0b0101 -> DblSpaced = true
2733 // A8.6.313 & A8.6.395
2734 if (slice(insn, 7, 6) == 3 || slice(align, 1, 1) == 1)
2736 DblSpaced = (type == 5);
2739 // n == 4 && type == 0b0001 -> DblSpaced = true
2740 // A8.6.316 & A8.6.397
2741 if (slice(insn, 7, 6) == 3)
2743 DblSpaced = (type == 1);
2747 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2748 slice(insn, 21, 21) == 0, DblSpaced, alignment/8, B);
2755 // Qd/Dd imm src(=Qd/Dd)
2756 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2757 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2759 const TargetInstrDesc &TID = ARMInsts[Opcode];
2760 const TargetOperandInfo *OpInfo = TID.OpInfo;
2762 assert(NumOps >= 2 &&
2763 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2764 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2765 (OpInfo[1].RegClass < 0) &&
2766 "Expect 1 reg operand followed by 1 imm operand");
2768 // Qd/Dd = Inst{22:15-12} => NEON Rd
2769 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2770 decodeNEONRd(insn))));
2772 ElemSize esize = ESizeNA;
2775 case ARM::VMOVv16i8:
2778 case ARM::VMOVv4i16:
2779 case ARM::VMOVv8i16:
2780 case ARM::VMVNv4i16:
2781 case ARM::VMVNv8i16:
2782 case ARM::VBICiv4i16:
2783 case ARM::VBICiv8i16:
2784 case ARM::VORRiv4i16:
2785 case ARM::VORRiv8i16:
2788 case ARM::VMOVv2i32:
2789 case ARM::VMOVv4i32:
2790 case ARM::VMVNv2i32:
2791 case ARM::VMVNv4i32:
2792 case ARM::VBICiv2i32:
2793 case ARM::VBICiv4i32:
2794 case ARM::VORRiv2i32:
2795 case ARM::VORRiv4i32:
2798 case ARM::VMOVv1i64:
2799 case ARM::VMOVv2i64:
2803 assert(0 && "Unexpected opcode!");
2807 // One register and a modified immediate value.
2808 // Add the imm operand.
2809 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2813 // VBIC/VORRiv*i* variants have an extra $src = $Vd to be filled in.
2815 (OpInfo[2].RegClass == ARM::DPRRegClassID ||
2816 OpInfo[2].RegClass == ARM::QPRRegClassID)) {
2817 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[0].RegClass,
2818 decodeNEONRd(insn))));
2829 N2V_VectorConvert_Between_Float_Fixed
2831 } // End of unnamed namespace
2833 // Vector Convert [between floating-point and fixed-point]
2834 // Qd/Dd Qm/Dm [fbits]
2836 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2837 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2840 // Vector Move Long:
2843 // Vector Move Narrow:
2847 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2848 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag, BO B) {
2850 const TargetInstrDesc &TID = ARMInsts[Opc];
2851 const TargetOperandInfo *OpInfo = TID.OpInfo;
2853 assert(NumOps >= 2 &&
2854 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2855 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2856 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2857 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2858 "Expect >= 2 operands and first 2 as reg operands");
2860 unsigned &OpIdx = NumOpsAdded;
2864 ElemSize esize = ESizeNA;
2865 if (Flag == N2V_VectorDupLane) {
2866 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2867 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2868 "Unexpected Opcode");
2869 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2870 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2874 // Qd/Dd = Inst{22:15-12} => NEON Rd
2875 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2876 decodeNEONRd(insn))));
2880 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2882 MI.addOperand(MCOperand::CreateReg(0));
2886 // Dm = Inst{5:3-0} => NEON Rm
2887 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2888 decodeNEONRm(insn))));
2891 // VZIP and others have two TIED_TO reg operands.
2893 while (OpIdx < NumOps &&
2894 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2895 // Add TIED_TO operand.
2896 MI.addOperand(MI.getOperand(Idx));
2900 // Add the imm operand, if required.
2901 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
2902 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2904 unsigned imm = 0xFFFFFFFF;
2906 if (Flag == N2V_VectorDupLane)
2907 imm = decodeNVLaneDupIndex(insn, esize);
2908 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2909 imm = decodeVCVTFractionBits(insn);
2911 assert(imm != 0xFFFFFFFF && "Internal error");
2912 MI.addOperand(MCOperand::CreateImm(imm));
2919 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2920 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2922 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2925 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2926 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2928 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2929 N2V_VectorConvert_Between_Float_Fixed, B);
2931 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2932 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
2934 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2935 N2V_VectorDupLane, B);
2938 // Vector Shift [Accumulate] Instructions.
2939 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2941 // Vector Shift Left Long (with maximum shift count) Instructions.
2942 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2944 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2945 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift, BO B) {
2947 const TargetInstrDesc &TID = ARMInsts[Opcode];
2948 const TargetOperandInfo *OpInfo = TID.OpInfo;
2950 assert(NumOps >= 3 &&
2951 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2952 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2953 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2954 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2955 "Expect >= 3 operands and first 2 as reg operands");
2957 unsigned &OpIdx = NumOpsAdded;
2961 // Qd/Dd = Inst{22:15-12} => NEON Rd
2962 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2963 decodeNEONRd(insn))));
2966 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2968 MI.addOperand(MCOperand::CreateReg(0));
2972 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2973 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2974 "Reg operand expected");
2976 // Qm/Dm = Inst{5:3-0} => NEON Rm
2977 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
2978 decodeNEONRm(insn))));
2981 assert(OpInfo[OpIdx].RegClass < 0 && "Imm operand expected");
2983 // Add the imm operand.
2985 // VSHLL has maximum shift count as the imm, inferred from its size.
2989 Imm = decodeNVSAmt(insn, LeftShift);
3001 MI.addOperand(MCOperand::CreateImm(Imm));
3007 // Left shift instructions.
3008 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
3009 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3011 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true,
3014 // Right shift instructions have different shift amount interpretation.
3015 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
3016 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3018 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false,
3027 N3V_Multiply_By_Scalar
3029 } // End of unnamed namespace
3031 // NEON Three Register Instructions with Optional Immediate Operand
3033 // Vector Extract Instructions.
3034 // Qd/Dd Qn/Dn Qm/Dm imm4
3036 // Vector Shift (Register) Instructions.
3037 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
3039 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
3040 // Qd/Dd Qn/Dn RestrictedDm index
3043 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
3044 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag, BO B) {
3046 const TargetInstrDesc &TID = ARMInsts[Opcode];
3047 const TargetOperandInfo *OpInfo = TID.OpInfo;
3049 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
3050 assert(NumOps >= 3 &&
3051 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3052 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3053 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
3054 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
3055 "Expect >= 3 operands and first 2 as reg operands");
3057 unsigned &OpIdx = NumOpsAdded;
3061 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
3062 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
3063 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
3064 ElemSize esize = ESizeNA;
3065 if (Flag == N3V_Multiply_By_Scalar) {
3066 unsigned size = (insn >> 20) & 3;
3067 if (size == 1) esize = ESize16;
3068 if (size == 2) esize = ESize32;
3069 assert (esize == ESize16 || esize == ESize32);
3072 // Qd/Dd = Inst{22:15-12} => NEON Rd
3073 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3074 decodeNEONRd(insn))));
3077 // VABA, VABAL, VBSLd, VBSLq, ...
3078 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
3080 MI.addOperand(MCOperand::CreateReg(0));
3084 // Dn = Inst{7:19-16} => NEON Rn
3086 // Dm = Inst{5:3-0} => NEON Rm
3087 MI.addOperand(MCOperand::CreateReg(
3088 getRegisterEnum(B, OpInfo[OpIdx].RegClass,
3089 VdVnVm ? decodeNEONRn(insn)
3090 : decodeNEONRm(insn))));
3093 // Special case handling for VMOVDneon and VMOVQ because they are marked as
3095 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
3098 // Dm = Inst{5:3-0} => NEON Rm
3100 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
3102 // Dn = Inst{7:19-16} => NEON Rn
3103 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
3104 : decodeNEONRm(insn))
3105 : decodeNEONRn(insn);
3107 MI.addOperand(MCOperand::CreateReg(
3108 getRegisterEnum(B, OpInfo[OpIdx].RegClass, m)));
3111 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass < 0
3112 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
3113 // Add the imm operand.
3116 Imm = decodeN3VImm(insn);
3117 else if (IsDmRestricted)
3118 Imm = decodeRestrictedDmIndex(insn, esize);
3120 assert(0 && "Internal error: unreachable code!");
3124 MI.addOperand(MCOperand::CreateImm(Imm));
3131 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3132 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3134 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3137 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
3138 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3140 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3141 N3V_VectorShift, B);
3143 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode,
3144 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3146 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3147 N3V_VectorExtract, B);
3149 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
3150 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3152 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
3153 N3V_Multiply_By_Scalar, B);
3156 // Vector Table Lookup
3158 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
3159 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
3160 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
3161 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
3162 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3163 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3165 const TargetInstrDesc &TID = ARMInsts[Opcode];
3166 const TargetOperandInfo *OpInfo = TID.OpInfo;
3167 if (!OpInfo) return false;
3169 assert(NumOps >= 3 &&
3170 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3171 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3172 OpInfo[2].RegClass == ARM::DPRRegClassID &&
3173 "Expect >= 3 operands and first 3 as reg operands");
3175 unsigned &OpIdx = NumOpsAdded;
3179 unsigned Rn = decodeNEONRn(insn);
3181 // {Dn} encoded as len = 0b00
3182 // {Dn Dn+1} encoded as len = 0b01
3183 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
3184 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
3185 unsigned Len = slice(insn, 9, 8) + 1;
3187 // Dd (the destination vector)
3188 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3189 decodeNEONRd(insn))));
3192 // Process tied_to operand constraint.
3194 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
3195 MI.addOperand(MI.getOperand(Idx));
3199 // Do the <list> now.
3200 for (unsigned i = 0; i < Len; ++i) {
3201 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3202 "Reg operand expected");
3203 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3208 // Dm (the index vector)
3209 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
3210 "Reg operand (index vector) expected");
3211 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3212 decodeNEONRm(insn))));
3218 // Vector Get Lane (move scalar to ARM core register) Instructions.
3219 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
3220 static bool DisassembleNGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3221 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3223 const TargetInstrDesc &TID = ARMInsts[Opcode];
3224 const TargetOperandInfo *OpInfo = TID.OpInfo;
3225 if (!OpInfo) return false;
3227 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
3228 OpInfo[0].RegClass == ARM::GPRRegClassID &&
3229 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3230 OpInfo[2].RegClass < 0 &&
3231 "Expect >= 3 operands with one dst operand");
3234 Opcode == ARM::VGETLNi32 ? ESize32
3235 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
3238 // Rt = Inst{15-12} => ARM Rd
3239 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3242 // Dn = Inst{7:19-16} => NEON Rn
3243 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3244 decodeNEONRn(insn))));
3246 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3252 // Vector Set Lane (move ARM core register to scalar) Instructions.
3253 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
3254 static bool DisassembleNSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3255 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3257 const TargetInstrDesc &TID = ARMInsts[Opcode];
3258 const TargetOperandInfo *OpInfo = TID.OpInfo;
3259 if (!OpInfo) return false;
3261 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
3262 OpInfo[0].RegClass == ARM::DPRRegClassID &&
3263 OpInfo[1].RegClass == ARM::DPRRegClassID &&
3264 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
3265 OpInfo[2].RegClass == ARM::GPRRegClassID &&
3266 OpInfo[3].RegClass < 0 &&
3267 "Expect >= 3 operands with one dst operand");
3270 Opcode == ARM::VSETLNi8 ? ESize8
3271 : (Opcode == ARM::VSETLNi16 ? ESize16
3274 // Dd = Inst{7:19-16} => NEON Rn
3275 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::DPRRegClassID,
3276 decodeNEONRn(insn))));
3279 MI.addOperand(MCOperand::CreateReg(0));
3281 // Rt = Inst{15-12} => ARM Rd
3282 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3285 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
3291 // Vector Duplicate Instructions (from ARM core register to all elements).
3292 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
3293 static bool DisassembleNDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3294 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3296 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3298 assert(NumOps >= 2 &&
3299 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
3300 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
3301 OpInfo[1].RegClass == ARM::GPRRegClassID &&
3302 "Expect >= 2 operands and first 2 as reg operand");
3304 unsigned RegClass = OpInfo[0].RegClass;
3306 // Qd/Dd = Inst{7:19-16} => NEON Rn
3307 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, RegClass,
3308 decodeNEONRn(insn))));
3310 // Rt = Inst{15-12} => ARM Rd
3311 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3318 static inline bool PreLoadOpcode(unsigned Opcode) {
3320 case ARM::PLDi12: case ARM::PLDrs:
3321 case ARM::PLDWi12: case ARM::PLDWrs:
3322 case ARM::PLIi12: case ARM::PLIrs:
3329 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3330 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3332 // Preload Data/Instruction requires either 2 or 3 operands.
3333 // PLDi12, PLDWi12, PLIi12: addrmode_imm12
3334 // PLDrs, PLDWrs, PLIrs: ldst_so_reg
3336 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3339 if (Opcode == ARM::PLDi12 || Opcode == ARM::PLDWi12
3340 || Opcode == ARM::PLIi12) {
3341 unsigned Imm12 = slice(insn, 11, 0);
3342 bool Negative = getUBit(insn) == 0;
3344 // A8.6.118 PLD (literal) PLDWi12 with Rn=PC is transformed to PLDi12.
3345 if (Opcode == ARM::PLDWi12 && slice(insn, 19, 16) == 0xF) {
3346 DEBUG(errs() << "Rn == '1111': PLDWi12 morphed to PLDi12\n");
3347 MI.setOpcode(ARM::PLDi12);
3350 // -0 is represented specially. All other values are as normal.
3351 int Offset = Negative ? -1 * Imm12 : Imm12;
3352 if (Imm12 == 0 && Negative)
3355 MI.addOperand(MCOperand::CreateImm(Offset));
3358 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(B, ARM::GPRRegClassID,
3361 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
3363 // Inst{6-5} encodes the shift opcode.
3364 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
3365 // Inst{11-7} encodes the imm5 shift amount.
3366 unsigned ShImm = slice(insn, 11, 7);
3368 // A8.4.1. Possible rrx or shift amount of 32...
3369 getImmShiftSE(ShOp, ShImm);
3370 MI.addOperand(MCOperand::CreateImm(
3371 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3378 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3379 unsigned short NumOps, unsigned &NumOpsAdded, BO B) {
3381 if (Opcode == ARM::DMB || Opcode == ARM::DSB) {
3382 // Inst{3-0} encodes the memory barrier option for the variants.
3383 unsigned opt = slice(insn, 3, 0);
3385 case ARM_MB::SY: case ARM_MB::ST:
3386 case ARM_MB::ISH: case ARM_MB::ISHST:
3387 case ARM_MB::NSH: case ARM_MB::NSHST:
3388 case ARM_MB::OSH: case ARM_MB::OSHST:
3389 MI.addOperand(MCOperand::CreateImm(opt));
3408 // SWP, SWPB: Rd Rm Rn
3409 // Delegate to DisassembleLdStExFrm()....
3410 return DisassembleLdStExFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3415 if (Opcode == ARM::SETEND) {
3417 MI.addOperand(MCOperand::CreateImm(slice(insn, 9, 9)));
3421 // FIXME: To enable correct asm parsing and disasm of CPS we need 3 different
3422 // opcodes which match the same real instruction. This is needed since there's
3423 // no current handling of optional arguments. Fix here when a better handling
3424 // of optional arguments is implemented.
3425 if (Opcode == ARM::CPS3p) { // M = 1
3426 // Let's reject these impossible imod values by returning false:
3429 // AsmPrinter cannot handle imod=0b00, plus (imod=0b00,M=1,iflags!=0) is an
3430 // invalid combination, so we just check for imod=0b00 here.
3431 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3433 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3434 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3435 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3439 if (Opcode == ARM::CPS2p) { // mode = 0, M = 0
3440 // Let's reject these impossible imod values by returning false:
3441 // 1. (imod=0b00,M=0)
3443 if (slice(insn, 19, 18) == 0 || slice(insn, 19, 18) == 1)
3445 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 18))); // imod
3446 MI.addOperand(MCOperand::CreateImm(slice(insn, 8, 6))); // iflags
3450 if (Opcode == ARM::CPS1p) { // imod = 0, iflags = 0, M = 1
3451 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0))); // mode
3456 // DBG has its option specified in Inst{3-0}.
3457 if (Opcode == ARM::DBG) {
3458 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3463 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3464 if (Opcode == ARM::BKPT) {
3465 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3466 slice(insn, 3, 0)));
3471 if (PreLoadOpcode(Opcode))
3472 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded, B);
3474 assert(0 && "Unexpected misc instruction!");
3478 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3479 /// We divide the disassembly task into different categories, with each one
3480 /// corresponding to a specific instruction encoding format. There could be
3481 /// exceptions when handling a specific format, and that is why the Opcode is
3482 /// also present in the function prototype.
3483 static const DisassembleFP FuncPtrs[] = {
3487 &DisassembleBrMiscFrm,
3489 &DisassembleDPSoRegFrm,
3492 &DisassembleLdMiscFrm,
3493 &DisassembleStMiscFrm,
3494 &DisassembleLdStMulFrm,
3495 &DisassembleLdStExFrm,
3496 &DisassembleArithMiscFrm,
3499 &DisassembleVFPUnaryFrm,
3500 &DisassembleVFPBinaryFrm,
3501 &DisassembleVFPConv1Frm,
3502 &DisassembleVFPConv2Frm,
3503 &DisassembleVFPConv3Frm,
3504 &DisassembleVFPConv4Frm,
3505 &DisassembleVFPConv5Frm,
3506 &DisassembleVFPLdStFrm,
3507 &DisassembleVFPLdStMulFrm,
3508 &DisassembleVFPMiscFrm,
3509 &DisassembleThumbFrm,
3510 &DisassembleMiscFrm,
3511 &DisassembleNGetLnFrm,
3512 &DisassembleNSetLnFrm,
3513 &DisassembleNDupFrm,
3515 // VLD and VST (including one lane) Instructions.
3518 // A7.4.6 One register and a modified immediate value
3519 // 1-Register Instructions with imm.
3520 // LLVM only defines VMOVv instructions.
3521 &DisassembleN1RegModImmFrm,
3523 // 2-Register Instructions with no imm.
3524 &DisassembleN2RegFrm,
3526 // 2-Register Instructions with imm (vector convert float/fixed point).
3527 &DisassembleNVCVTFrm,
3529 // 2-Register Instructions with imm (vector dup lane).
3530 &DisassembleNVecDupLnFrm,
3532 // Vector Shift Left Instructions.
3533 &DisassembleN2RegVecShLFrm,
3535 // Vector Shift Righ Instructions, which has different interpretation of the
3536 // shift amount from the imm6 field.
3537 &DisassembleN2RegVecShRFrm,
3539 // 3-Register Data-Processing Instructions.
3540 &DisassembleN3RegFrm,
3542 // Vector Shift (Register) Instructions.
3543 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3544 &DisassembleN3RegVecShFrm,
3546 // Vector Extract Instructions.
3547 &DisassembleNVecExtractFrm,
3549 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3550 // By Scalar Instructions.
3551 &DisassembleNVecMulScalarFrm,
3553 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3554 // values in a table and generate a new vector.
3555 &DisassembleNVTBLFrm,
3560 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3561 /// The general idea is to set the Opcode for the MCInst, followed by adding
3562 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3563 /// to the Format-specific disassemble function for disassembly, followed by
3564 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3565 /// which follow the Dst/Src Operands.
3566 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3567 // Stage 1 sets the Opcode.
3568 MI.setOpcode(Opcode);
3569 // If the number of operands is zero, we're done!
3573 // Stage 2 calls the format-specific disassemble function to build the operand
3577 unsigned NumOpsAdded = 0;
3578 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3580 if (!OK || this->Err != 0) return false;
3581 if (NumOpsAdded >= NumOps)
3584 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3585 // FIXME: Should this be done selectively?
3586 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3589 // A8.3 Conditional execution
3590 // A8.3.1 Pseudocode details of conditional execution
3591 // Condition bits '111x' indicate the instruction is always executed.
3592 static uint32_t CondCode(uint32_t CondField) {
3593 if (CondField == 0xF)
3598 /// DoPredicateOperands - DoPredicateOperands process the predicate operands
3599 /// of some Thumb instructions which come before the reglist operands. It
3600 /// returns true if the two predicate operands have been processed.
3601 bool ARMBasicMCBuilder::DoPredicateOperands(MCInst& MI, unsigned Opcode,
3602 uint32_t /* insn */, unsigned short NumOpsRemaining) {
3604 assert(NumOpsRemaining > 0 && "Invalid argument");
3606 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3607 unsigned Idx = MI.getNumOperands();
3609 // First, we check whether this instr specifies the PredicateOperand through
3610 // a pair of TargetOperandInfos with isPredicate() property.
3611 if (NumOpsRemaining >= 2 &&
3612 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3613 OpInfo[Idx].RegClass < 0 &&
3614 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3616 // If we are inside an IT block, get the IT condition bits maintained via
3617 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3620 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3622 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3623 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3630 /// TryPredicateAndSBitModifier - TryPredicateAndSBitModifier tries to process
3631 /// the possible Predicate and SBitModifier, to build the remaining MCOperand
3633 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3634 uint32_t insn, unsigned short NumOpsRemaining) {
3636 assert(NumOpsRemaining > 0 && "Invalid argument");
3638 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3639 const std::string &Name = ARMInsts[Opcode].Name;
3640 unsigned Idx = MI.getNumOperands();
3641 uint64_t TSFlags = ARMInsts[Opcode].TSFlags;
3643 // First, we check whether this instr specifies the PredicateOperand through
3644 // a pair of TargetOperandInfos with isPredicate() property.
3645 if (NumOpsRemaining >= 2 &&
3646 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3647 OpInfo[Idx].RegClass < 0 &&
3648 OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3650 // If we are inside an IT block, get the IT condition bits maintained via
3651 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3654 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3656 if (Name.length() > 1 && Name[0] == 't') {
3657 // Thumb conditional branch instructions have their cond field embedded,
3661 // Check for undefined encodings.
3663 if (Name == "t2Bcc") {
3664 if ((cond = slice(insn, 25, 22)) >= 14)
3666 MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
3667 } else if (Name == "tBcc") {
3668 if ((cond = slice(insn, 11, 8)) == 14)
3670 MI.addOperand(MCOperand::CreateImm(CondCode(cond)));
3672 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3674 // ARM instructions get their condition field from Inst{31-28}.
3675 // We should reject Inst{31-28} = 0b1111 as invalid encoding.
3676 if (!isNEONDomain(TSFlags) && getCondField(insn) == 0xF)
3678 MI.addOperand(MCOperand::CreateImm(CondCode(getCondField(insn))));
3681 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3683 NumOpsRemaining -= 2;
3686 if (NumOpsRemaining == 0)
3689 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3690 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3691 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3695 if (NumOpsRemaining == 0)
3701 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3702 /// after BuildIt is finished.
3703 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3706 if (!SP) return Status;
3708 if (Opcode == ARM::t2IT)
3709 Status = SP->InitIT(slice(insn, 7, 0)) ? Status : false;
3710 else if (InITBlock())
3716 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3717 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3719 : Opcode(opc), Format(format), NumOps(num), SP(0), Err(0) {
3720 unsigned Idx = (unsigned)format;
3721 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3722 Disasm = FuncPtrs[Idx];
3725 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3726 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3727 /// Return NULL if it fails to create/return a proper builder. API clients
3728 /// are responsible for freeing up of the allocated memory. Cacheing can be
3729 /// performed by the API clients to improve performance.
3730 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3731 // For "Unknown format", fail by returning a NULL pointer.
3732 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1)) {
3733 DEBUG(errs() << "Unknown format\n");
3737 return new ARMBasicMCBuilder(Opcode, Format,
3738 ARMInsts[Opcode].getNumOperands());
3741 /// tryAddingSymbolicOperand - tryAddingSymbolicOperand trys to add a symbolic
3742 /// operand in place of the immediate Value in the MCInst. The immediate
3743 /// Value has had any PC adjustment made by the caller. If the getOpInfo()
3744 /// function was set as part of the setupBuilderForSymbolicDisassembly() call
3745 /// then that function is called to get any symbolic information at the
3746 /// builder's Address for this instrution. If that returns non-zero then the
3747 /// symbolic information it returns is used to create an MCExpr and that is
3748 /// added as an operand to the MCInst. This function returns true if it adds
3749 /// an operand to the MCInst and false otherwise.
3750 bool ARMBasicMCBuilder::tryAddingSymbolicOperand(uint64_t Value,
3756 struct LLVMOpInfo1 SymbolicOp;
3757 SymbolicOp.Value = Value;
3758 if (!GetOpInfo(DisInfo, Address, 0 /* Offset */, InstSize, 1, &SymbolicOp))
3761 const MCExpr *Add = NULL;
3762 if (SymbolicOp.AddSymbol.Present) {
3763 if (SymbolicOp.AddSymbol.Name) {
3764 StringRef Name(SymbolicOp.AddSymbol.Name);
3765 MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
3766 Add = MCSymbolRefExpr::Create(Sym, *Ctx);
3768 Add = MCConstantExpr::Create(SymbolicOp.AddSymbol.Value, *Ctx);
3772 const MCExpr *Sub = NULL;
3773 if (SymbolicOp.SubtractSymbol.Present) {
3774 if (SymbolicOp.SubtractSymbol.Name) {
3775 StringRef Name(SymbolicOp.SubtractSymbol.Name);
3776 MCSymbol *Sym = Ctx->GetOrCreateSymbol(Name);
3777 Sub = MCSymbolRefExpr::Create(Sym, *Ctx);
3779 Sub = MCConstantExpr::Create(SymbolicOp.SubtractSymbol.Value, *Ctx);
3783 const MCExpr *Off = NULL;
3784 if (SymbolicOp.Value != 0)
3785 Off = MCConstantExpr::Create(SymbolicOp.Value, *Ctx);
3791 LHS = MCBinaryExpr::CreateSub(Add, Sub, *Ctx);
3793 LHS = MCUnaryExpr::CreateMinus(Sub, *Ctx);
3795 Expr = MCBinaryExpr::CreateAdd(LHS, Off, *Ctx);
3800 Expr = MCBinaryExpr::CreateAdd(Add, Off, *Ctx);
3806 if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_HI16)
3807 MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateUpper16(Expr, *Ctx)));
3808 else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_ARM_LO16)
3809 MI.addOperand(MCOperand::CreateExpr(ARMMCExpr::CreateLower16(Expr, *Ctx)));
3810 else if (SymbolicOp.VariantKind == LLVMDisassembler_VariantKind_None)
3811 MI.addOperand(MCOperand::CreateExpr(Expr));
3813 assert("bad SymbolicOp.VariantKind");