1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder, Builder Factory,
12 // as well as the Algorithm to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #include "ARMDisassemblerCore.h"
17 #include "ARMAddressingModes.h"
19 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
20 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
21 /// describing the operand info for each ARMInsts[i].
23 /// Together with an instruction's encoding format, we can take advantage of the
24 /// NumOperands and the OpInfo fields of the target instruction description in
25 /// the quest to build out the MCOperand list for an MCInst.
27 /// The general guideline is that with a known format, the number of dst and src
28 /// operands are well-known. The dst is built first, followed by the src
29 /// operand(s). The operands not yet used at this point are for the Implicit
30 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
31 /// defined with two components:
33 /// def pred { // Operand PredicateOperand
34 /// ValueType Type = OtherVT;
35 /// string PrintMethod = "printPredicateOperand";
36 /// string AsmOperandLowerMethod = ?;
37 /// dag MIOperandInfo = (ops i32imm, CCR);
38 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
39 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
42 /// which is manifested by the TargetOperandInfo[] of:
44 /// { 0, 0|(1<<TOI::Predicate), 0 },
45 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
47 /// So the first predicate MCOperand corresponds to the immediate part of the
48 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
49 /// corresponds to a register kind of ARM::CPSR.
51 /// For the Defs part, in the simple case of only cc_out:$s, we have:
53 /// def cc_out { // Operand OptionalDefOperand
54 /// ValueType Type = OtherVT;
55 /// string PrintMethod = "printSBitModifierOperand";
56 /// string AsmOperandLowerMethod = ?;
57 /// dag MIOperandInfo = (ops CCR);
58 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
59 /// dag DefaultOps = (ops (i32 zero_reg));
62 /// which is manifested by the one TargetOperandInfo of:
64 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
66 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
67 #include "ARMGenInstrInfo.inc"
71 const char *ARMUtils::OpcodeName(unsigned Opcode) {
72 return ARMInsts[Opcode].Name;
75 // Return the register enum Based on RegClass and the raw register number.
76 // For DRegPair, see comments below.
78 static unsigned getRegisterEnum(unsigned RegClassID, unsigned RawRegister,
79 bool DRegPair = false) {
81 if (DRegPair && RegClassID == ARM::QPRRegClassID) {
82 // LLVM expects { Dd, Dd+1 } to form a super register; this is not specified
83 // in the ARM Architecture Manual as far as I understand it (A8.6.307).
84 // Therefore, we morph the RegClassID to be the sub register class and don't
85 // subsequently transform the RawRegister encoding when calculating RegNum.
87 // See also ARMinstPrinter::printOperand() wrt "dregpair" modifier part
88 // where this workaround is meant for.
89 RegClassID = ARM::DPRRegClassID;
92 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
94 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
100 switch (RegClassID) {
101 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
102 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
103 case ARM::DPR_VFP2RegClassID:
105 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
106 case ARM::QPR_VFP2RegClassID:
108 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
112 switch (RegClassID) {
113 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
114 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
115 case ARM::DPR_VFP2RegClassID:
117 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
118 case ARM::QPR_VFP2RegClassID:
120 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
124 switch (RegClassID) {
125 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
126 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
127 case ARM::DPR_VFP2RegClassID:
129 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
130 case ARM::QPR_VFP2RegClassID:
132 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
136 switch (RegClassID) {
137 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
138 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
139 case ARM::DPR_VFP2RegClassID:
141 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
142 case ARM::QPR_VFP2RegClassID:
144 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
148 switch (RegClassID) {
149 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
150 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
151 case ARM::DPR_VFP2RegClassID:
153 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
154 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
158 switch (RegClassID) {
159 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
160 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
161 case ARM::DPR_VFP2RegClassID:
163 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
164 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
168 switch (RegClassID) {
169 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
170 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
171 case ARM::DPR_VFP2RegClassID:
173 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
174 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
178 switch (RegClassID) {
179 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
180 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
181 case ARM::DPR_VFP2RegClassID:
183 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
184 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
188 switch (RegClassID) {
189 case ARM::GPRRegClassID: return ARM::R8;
190 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
191 case ARM::QPRRegClassID: return ARM::Q8;
192 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
196 switch (RegClassID) {
197 case ARM::GPRRegClassID: return ARM::R9;
198 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
199 case ARM::QPRRegClassID: return ARM::Q9;
200 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
204 switch (RegClassID) {
205 case ARM::GPRRegClassID: return ARM::R10;
206 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
207 case ARM::QPRRegClassID: return ARM::Q10;
208 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
212 switch (RegClassID) {
213 case ARM::GPRRegClassID: return ARM::R11;
214 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
215 case ARM::QPRRegClassID: return ARM::Q11;
216 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
220 switch (RegClassID) {
221 case ARM::GPRRegClassID: return ARM::R12;
222 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
223 case ARM::QPRRegClassID: return ARM::Q12;
224 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
228 switch (RegClassID) {
229 case ARM::GPRRegClassID: return ARM::SP;
230 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
231 case ARM::QPRRegClassID: return ARM::Q13;
232 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
236 switch (RegClassID) {
237 case ARM::GPRRegClassID: return ARM::LR;
238 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
239 case ARM::QPRRegClassID: return ARM::Q14;
240 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
244 switch (RegClassID) {
245 case ARM::GPRRegClassID: return ARM::PC;
246 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
247 case ARM::QPRRegClassID: return ARM::Q15;
248 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
252 switch (RegClassID) {
253 case ARM::DPRRegClassID: return ARM::D16;
254 case ARM::SPRRegClassID: return ARM::S16;
258 switch (RegClassID) {
259 case ARM::DPRRegClassID: return ARM::D17;
260 case ARM::SPRRegClassID: return ARM::S17;
264 switch (RegClassID) {
265 case ARM::DPRRegClassID: return ARM::D18;
266 case ARM::SPRRegClassID: return ARM::S18;
270 switch (RegClassID) {
271 case ARM::DPRRegClassID: return ARM::D19;
272 case ARM::SPRRegClassID: return ARM::S19;
276 switch (RegClassID) {
277 case ARM::DPRRegClassID: return ARM::D20;
278 case ARM::SPRRegClassID: return ARM::S20;
282 switch (RegClassID) {
283 case ARM::DPRRegClassID: return ARM::D21;
284 case ARM::SPRRegClassID: return ARM::S21;
288 switch (RegClassID) {
289 case ARM::DPRRegClassID: return ARM::D22;
290 case ARM::SPRRegClassID: return ARM::S22;
294 switch (RegClassID) {
295 case ARM::DPRRegClassID: return ARM::D23;
296 case ARM::SPRRegClassID: return ARM::S23;
300 switch (RegClassID) {
301 case ARM::DPRRegClassID: return ARM::D24;
302 case ARM::SPRRegClassID: return ARM::S24;
306 switch (RegClassID) {
307 case ARM::DPRRegClassID: return ARM::D25;
308 case ARM::SPRRegClassID: return ARM::S25;
312 switch (RegClassID) {
313 case ARM::DPRRegClassID: return ARM::D26;
314 case ARM::SPRRegClassID: return ARM::S26;
318 switch (RegClassID) {
319 case ARM::DPRRegClassID: return ARM::D27;
320 case ARM::SPRRegClassID: return ARM::S27;
324 switch (RegClassID) {
325 case ARM::DPRRegClassID: return ARM::D28;
326 case ARM::SPRRegClassID: return ARM::S28;
330 switch (RegClassID) {
331 case ARM::DPRRegClassID: return ARM::D29;
332 case ARM::SPRRegClassID: return ARM::S29;
336 switch (RegClassID) {
337 case ARM::DPRRegClassID: return ARM::D30;
338 case ARM::SPRRegClassID: return ARM::S30;
342 switch (RegClassID) {
343 case ARM::DPRRegClassID: return ARM::D31;
344 case ARM::SPRRegClassID: return ARM::S31;
348 assert(0 && "Invalid (RegClassID, RawRegister) combination");
352 ///////////////////////////////
354 // Utility Functions //
356 ///////////////////////////////
358 // Extract/Decode Rd: Inst{15-12}.
359 static inline unsigned decodeRd(uint32_t insn) {
360 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
363 // Extract/Decode Rn: Inst{19-16}.
364 static inline unsigned decodeRn(uint32_t insn) {
365 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
368 // Extract/Decode Rm: Inst{3-0}.
369 static inline unsigned decodeRm(uint32_t insn) {
370 return (insn & ARMII::GPRRegMask);
373 // Extract/Decode Rs: Inst{11-8}.
374 static inline unsigned decodeRs(uint32_t insn) {
375 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
378 static inline unsigned getCondField(uint32_t insn) {
379 return (insn >> ARMII::CondShift);
382 static inline unsigned getIBit(uint32_t insn) {
383 return (insn >> ARMII::I_BitShift) & 1;
386 static inline unsigned getAM3IBit(uint32_t insn) {
387 return (insn >> ARMII::AM3_I_BitShift) & 1;
390 static inline unsigned getPBit(uint32_t insn) {
391 return (insn >> ARMII::P_BitShift) & 1;
394 static inline unsigned getUBit(uint32_t insn) {
395 return (insn >> ARMII::U_BitShift) & 1;
398 static inline unsigned getPUBits(uint32_t insn) {
399 return (insn >> ARMII::U_BitShift) & 3;
402 static inline unsigned getSBit(uint32_t insn) {
403 return (insn >> ARMII::S_BitShift) & 1;
406 static inline unsigned getWBit(uint32_t insn) {
407 return (insn >> ARMII::W_BitShift) & 1;
410 static inline unsigned getDBit(uint32_t insn) {
411 return (insn >> ARMII::D_BitShift) & 1;
414 static inline unsigned getNBit(uint32_t insn) {
415 return (insn >> ARMII::N_BitShift) & 1;
418 static inline unsigned getMBit(uint32_t insn) {
419 return (insn >> ARMII::M_BitShift) & 1;
422 // See A8.4 Shifts applied to a register.
423 // A8.4.2 Register controlled shifts.
425 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
426 // into llvm enums for shift opcode. The API clients should pass in the value
427 // encoded with two bits, so the assert stays to signal a wrong API usage.
429 // A8-12: DecodeRegShift()
430 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
432 default: assert(0 && "No such value"); return ARM_AM::no_shift;
433 case 0: return ARM_AM::lsl;
434 case 1: return ARM_AM::lsr;
435 case 2: return ARM_AM::asr;
436 case 3: return ARM_AM::ror;
440 // See A8.4 Shifts applied to a register.
441 // A8.4.1 Constant shifts.
443 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
444 // encodings into the intended ShiftOpc and shift amount.
446 // A8-11: DecodeImmShift()
447 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
448 // If type == 0b11 and imm5 == 0, we have an rrx, instead.
449 if (ShOp == ARM_AM::ror && ShImm == 0)
451 // If (lsr or asr) and imm5 == 0, shift amount is 32.
452 if ((ShOp == ARM_AM::lsr || ShOp == ARM_AM::asr) && ShImm == 0)
456 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
457 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
458 // clients should pass in the value encoded with two bits, so the assert stays
459 // to signal a wrong API usage.
460 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
462 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
463 case 1: return ARM_AM::ia; // P=0 U=1
464 case 3: return ARM_AM::ib; // P=1 U=1
465 case 0: return ARM_AM::da; // P=0 U=0
466 case 2: return ARM_AM::db; // P=1 U=0
470 ////////////////////////////////////////////
472 // Disassemble function definitions //
474 ////////////////////////////////////////////
476 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
477 /// instr into a list of MCOperands in the appropriate order, with possible dst,
478 /// followed by possible src(s).
480 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
481 /// the CPSR, is factored into ARMBasicMCBuilder's class method named
482 /// TryPredicateAndSBitModifier.
484 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
485 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
487 if (Opcode == ARM::Int_MemBarrierV7 || Opcode == ARM::Int_SyncBarrierV7)
490 assert(0 && "Unexpected pseudo instruction!");
494 // Multiply Instructions.
495 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
496 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
498 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
499 // Rd{19-16} Rn{3-0} Rm{11-8}
501 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
502 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
504 // The mapping of the multiply registers to the "regular" ARM registers, where
505 // there are convenience decoder functions, is:
511 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
512 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
514 const TargetInstrDesc &TID = ARMInsts[Opcode];
515 unsigned short NumDefs = TID.getNumDefs();
516 const TargetOperandInfo *OpInfo = TID.OpInfo;
517 unsigned &OpIdx = NumOpsAdded;
521 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
523 && OpInfo[0].RegClass == ARM::GPRRegClassID
524 && OpInfo[1].RegClass == ARM::GPRRegClassID
525 && OpInfo[2].RegClass == ARM::GPRRegClassID
526 && "Expect three register operands");
528 // Instructions with two destination registers have RdLo{15-12} first.
530 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
531 "Expect 4th register operand");
532 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
537 // The destination register: RdHi{19-16} or Rd{19-16}.
538 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
541 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
542 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
544 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
548 // Many multiply instructions (e.g., MLA) have three src registers.
549 // The third register operand is Ra{15-12}.
550 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
551 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
559 // Helper routines for disassembly of coprocessor instructions.
561 static bool LdStCopOpcode(unsigned Opcode) {
562 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
563 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
567 static bool CoprocessorOpcode(unsigned Opcode) {
568 if (LdStCopOpcode(Opcode))
574 case ARM::CDP: case ARM::CDP2:
575 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
576 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
580 static inline unsigned GetCoprocessor(uint32_t insn) {
581 return slice(insn, 11, 8);
583 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
584 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
586 static inline unsigned GetCopOpc2(uint32_t insn) {
587 return slice(insn, 7, 5);
589 static inline unsigned GetCopOpc(uint32_t insn) {
590 return slice(insn, 7, 4);
592 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
595 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
597 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
599 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
601 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
603 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
607 // LDC_OPTION: cop CRd Rn imm8
609 // STC_OPTION: cop CRd Rn imm8
612 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
613 unsigned short NumOps, unsigned &NumOpsAdded) {
615 assert(NumOps >= 5 && "Num of operands >= 5 for coprocessor instr");
617 unsigned &OpIdx = NumOpsAdded;
618 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
619 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
620 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
621 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
622 bool LdStCop = LdStCopOpcode(Opcode);
626 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
629 // Unindex if P:W = 0b00 --> _OPTION variant
630 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
632 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
634 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
638 MI.addOperand(MCOperand::CreateReg(0));
639 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
640 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
642 MI.addOperand(MCOperand::CreateImm(Offset));
645 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
649 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
650 : GetCopOpc1(insn, NoGPR)));
652 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
653 : MCOperand::CreateReg(
654 getRegisterEnum(ARM::GPRRegClassID,
657 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
658 getRegisterEnum(ARM::GPRRegClassID,
660 : MCOperand::CreateImm(decodeRn(insn)));
662 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
667 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
675 // Branch Instructions.
676 // BLr9: SignExtend(Imm24:'00', 32)
677 // Bcc, BLr9_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
678 // SMC: ZeroExtend(imm4, 32)
679 // SVC: ZeroExtend(Imm24, 32)
681 // Various coprocessor instructions are assigned BrFrm arbitrarily.
682 // Delegates to DisassembleCoprocessor() helper function.
685 // MSR/MSRsys: Rm mask=Inst{19-16}
687 // MSRi/MSRsysi: so_imm
688 // SRSW/SRS: addrmode4:$addr mode_imm
689 // RFEW/RFE: addrmode4:$addr Rn
690 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
691 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
693 if (CoprocessorOpcode(Opcode))
694 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded);
696 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
698 // MRS and MRSsys take one GPR reg Rd.
699 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
700 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
701 "Reg operand expected");
702 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
707 // BXJ takes one GPR reg Rm.
708 if (Opcode == ARM::BXJ) {
709 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
710 "Reg operand expected");
711 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
716 // MSR and MSRsys take one GPR reg Rm, followed by the mask.
717 if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
718 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
719 "Reg operand expected");
720 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
722 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
726 // MSRi and MSRsysi take one so_imm operand, followed by the mask.
727 if (Opcode == ARM::MSRi || Opcode == ARM::MSRsysi) {
728 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
729 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
730 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
731 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
732 unsigned Imm = insn & 0xFF;
733 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
734 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
738 // SRSW and SRS requires addrmode4:$addr for ${addr:submode}, followed by the
739 // mode immediate (Inst{4-0}).
740 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
741 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
742 // ARMInstPrinter::printAddrMode4Operand() prints special mode string
743 // if the base register is SP; so don't set ARM::SP.
744 MI.addOperand(MCOperand::CreateReg(0));
745 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
746 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
748 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
749 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
751 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
757 assert((Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
758 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
759 "Unexpected Opcode");
761 assert(NumOps >= 1 && OpInfo[0].RegClass == 0 && "Reg operand expected");
764 if (Opcode == ARM::SMC) {
765 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
766 Imm32 = slice(insn, 3, 0);
767 } else if (Opcode == ARM::SVC) {
768 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
769 Imm32 = slice(insn, 23, 0);
771 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
772 unsigned Imm26 = slice(insn, 23, 0) << 2;
773 //Imm32 = signextend<signed int, 26>(Imm26);
774 Imm32 = SignExtend32<26>(Imm26);
776 // When executing an ARM instruction, PC reads as the address of the current
777 // instruction plus 8. The assembler subtracts 8 from the difference
778 // between the branch instruction and the target address, disassembler has
779 // to add 8 to compensate.
783 MI.addOperand(MCOperand::CreateImm(Imm32));
789 // Misc. Branch Instructions.
790 // BR_JTadd, BR_JTr, BR_JTm
793 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
794 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
796 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
797 unsigned &OpIdx = NumOpsAdded;
801 // BX_RET has only two predicate operands, do an early return.
802 if (Opcode == ARM::BX_RET)
805 // BLXr9 and BRIND take one GPR reg.
806 if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
807 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
808 "Reg operand expected");
809 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
815 // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
816 if (Opcode == ARM::BR_JTadd) {
817 // InOperandList with GPR:$target and GPR:$idx regs.
819 assert(NumOps == 4 && "Expect 4 operands");
820 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
822 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
825 // Fill in the two remaining imm operands to signify build completion.
826 MI.addOperand(MCOperand::CreateImm(0));
827 MI.addOperand(MCOperand::CreateImm(0));
833 // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
834 if (Opcode == ARM::BR_JTr) {
835 // InOperandList with GPR::$target reg.
837 assert(NumOps == 3 && "Expect 3 operands");
838 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
841 // Fill in the two remaining imm operands to signify build completion.
842 MI.addOperand(MCOperand::CreateImm(0));
843 MI.addOperand(MCOperand::CreateImm(0));
849 // BR_JTm is an LDR with Rt = PC.
850 if (Opcode == ARM::BR_JTm) {
851 // This is the reg/reg form, with base reg followed by +/- reg shop imm.
852 // See also ARMAddressingModes.h (Addressing Mode #2).
854 assert(NumOps == 5 && getIBit(insn) == 1 && "Expect 5 operands && I-bit=1");
855 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
858 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
860 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
861 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
863 // Inst{6-5} encodes the shift opcode.
864 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
865 // Inst{11-7} encodes the imm5 shift amount.
866 unsigned ShImm = slice(insn, 11, 7);
868 // A8.4.1. Possible rrx or shift amount of 32...
869 getImmShiftSE(ShOp, ShImm);
870 MI.addOperand(MCOperand::CreateImm(
871 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
873 // Fill in the two remaining imm operands to signify build completion.
874 MI.addOperand(MCOperand::CreateImm(0));
875 MI.addOperand(MCOperand::CreateImm(0));
881 assert(0 && "Unexpected BrMiscFrm Opcode");
885 static inline uint32_t getBFCInvMask(uint32_t insn) {
886 uint32_t lsb = slice(insn, 11, 7);
887 uint32_t msb = slice(insn, 20, 16);
889 assert(lsb <= msb && "Encoding error: lsb > msb");
890 for (uint32_t i = lsb; i <= msb; ++i)
895 static inline bool SaturateOpcode(unsigned Opcode) {
897 case ARM::SSATlsl: case ARM::SSATasr: case ARM::SSAT16:
898 case ARM::USATlsl: case ARM::USATasr: case ARM::USAT16:
905 static inline unsigned decodeSaturatePos(unsigned Opcode, uint32_t insn) {
909 return slice(insn, 20, 16) + 1;
911 return slice(insn, 19, 16) + 1;
914 return slice(insn, 20, 16);
916 return slice(insn, 19, 16);
918 assert(0 && "Invalid opcode passed in");
923 // A major complication is the fact that some of the saturating add/subtract
924 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
925 // They are QADD, QDADD, QDSUB, and QSUB.
926 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
927 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
929 const TargetInstrDesc &TID = ARMInsts[Opcode];
930 unsigned short NumDefs = TID.getNumDefs();
931 bool isUnary = isUnaryDP(TID.TSFlags);
932 const TargetOperandInfo *OpInfo = TID.OpInfo;
933 unsigned &OpIdx = NumOpsAdded;
937 // Disassemble register def if there is one.
938 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
939 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
944 // Now disassemble the src operands.
948 // SSAT/SSAT16/USAT/USAT16 has imm operand after Rd.
949 if (SaturateOpcode(Opcode)) {
950 MI.addOperand(MCOperand::CreateImm(decodeSaturatePos(Opcode, insn)));
952 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
955 if (Opcode == ARM::SSAT16 || Opcode == ARM::USAT16) {
960 // For SSAT operand reg (Rm) has been disassembled above.
961 // Now disassemble the shift amount.
963 // Inst{11-7} encodes the imm5 shift amount.
964 unsigned ShAmt = slice(insn, 11, 7);
966 // A8.6.183. Possible ASR shift amount of 32...
967 if (Opcode == ARM::SSATasr && ShAmt == 0)
970 MI.addOperand(MCOperand::CreateImm(ShAmt));
976 // Special-case handling of BFC/BFI/SBFX/UBFX.
977 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
978 // TIED_TO operand skipped for BFC and Inst{3-0} (Reg) for BFI.
979 MI.addOperand(MCOperand::CreateReg(Opcode == ARM::BFC ? 0
980 : getRegisterEnum(ARM::GPRRegClassID,
982 MI.addOperand(MCOperand::CreateImm(getBFCInvMask(insn)));
986 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
987 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
989 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
990 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
995 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
996 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
998 // BinaryDP has an Rn operand.
1000 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1001 "Reg operand expected");
1002 MI.addOperand(MCOperand::CreateReg(
1003 getRegisterEnum(ARM::GPRRegClassID,
1004 RmRn ? decodeRm(insn) : decodeRn(insn))));
1008 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1009 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1010 MI.addOperand(MCOperand::CreateReg(0));
1014 // Now disassemble operand 2.
1015 if (OpIdx >= NumOps)
1018 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1019 // We have a reg/reg form.
1020 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1021 // routed here as well.
1022 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1023 MI.addOperand(MCOperand::CreateReg(
1024 getRegisterEnum(ARM::GPRRegClassID,
1025 RmRn? decodeRn(insn) : decodeRm(insn))));
1027 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1028 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1029 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1030 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1031 MI.addOperand(MCOperand::CreateImm(Imm16));
1034 // We have a reg/imm form.
1035 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1036 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1037 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1038 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1039 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1040 unsigned Imm = insn & 0xFF;
1041 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1048 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1049 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1051 const TargetInstrDesc &TID = ARMInsts[Opcode];
1052 unsigned short NumDefs = TID.getNumDefs();
1053 bool isUnary = isUnaryDP(TID.TSFlags);
1054 const TargetOperandInfo *OpInfo = TID.OpInfo;
1055 unsigned &OpIdx = NumOpsAdded;
1059 // Disassemble register def if there is one.
1060 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1061 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1066 // Disassemble the src operands.
1067 if (OpIdx >= NumOps)
1070 // BinaryDP has an Rn operand.
1072 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1073 "Reg operand expected");
1074 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1079 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1080 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1081 MI.addOperand(MCOperand::CreateReg(0));
1085 // Disassemble operand 2, which consists of three components.
1086 if (OpIdx + 2 >= NumOps)
1089 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1090 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1091 (OpInfo[OpIdx+2].RegClass == 0) &&
1092 "Expect 3 reg operands");
1094 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1095 unsigned Rs = slice(insn, 4, 4);
1097 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1100 // Register-controlled shifts: [Rm, Rs, shift].
1101 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1103 // Inst{6-5} encodes the shift opcode.
1104 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1105 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1107 // Constant shifts: [Rm, reg0, shift_imm].
1108 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1109 // Inst{6-5} encodes the shift opcode.
1110 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1111 // Inst{11-7} encodes the imm5 shift amount.
1112 unsigned ShImm = slice(insn, 11, 7);
1114 // A8.4.1. Possible rrx or shift amount of 32...
1115 getImmShiftSE(ShOp, ShImm);
1116 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1123 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1124 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
1126 const TargetInstrDesc &TID = ARMInsts[Opcode];
1127 unsigned short NumDefs = TID.getNumDefs();
1128 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1129 const TargetOperandInfo *OpInfo = TID.OpInfo;
1130 unsigned &OpIdx = NumOpsAdded;
1134 assert(((!isStore && NumDefs > 0) || (isStore && (NumDefs == 0 || isPrePost)))
1135 && "Invalid arguments");
1137 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1138 if (isPrePost && isStore) {
1139 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1140 "Reg operand expected");
1141 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1146 // Disassemble the dst/src operand.
1147 if (OpIdx >= NumOps)
1150 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1151 "Reg operand expected");
1152 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1156 // After dst of a pre- and post-indexed load is the address base writeback.
1157 if (isPrePost && !isStore) {
1158 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1159 "Reg operand expected");
1160 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1165 // Disassemble the base operand.
1166 if (OpIdx >= NumOps)
1169 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1170 "Reg operand expected");
1171 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1172 && "Index mode or tied_to operand expected");
1173 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1177 // For reg/reg form, base reg is followed by +/- reg shop imm.
1178 // For immediate form, it is followed by +/- imm12.
1179 // See also ARMAddressingModes.h (Addressing Mode #2).
1180 if (OpIdx + 1 >= NumOps)
1183 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1184 (OpInfo[OpIdx+1].RegClass == 0) &&
1185 "Expect 1 reg operand followed by 1 imm operand");
1187 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1188 if (getIBit(insn) == 0) {
1189 MI.addOperand(MCOperand::CreateReg(0));
1191 // Disassemble the 12-bit immediate offset.
1192 unsigned Imm12 = slice(insn, 11, 0);
1193 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift);
1194 MI.addOperand(MCOperand::CreateImm(Offset));
1196 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1197 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1199 // Inst{6-5} encodes the shift opcode.
1200 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1201 // Inst{11-7} encodes the imm5 shift amount.
1202 unsigned ShImm = slice(insn, 11, 7);
1204 // A8.4.1. Possible rrx or shift amount of 32...
1205 getImmShiftSE(ShOp, ShImm);
1206 MI.addOperand(MCOperand::CreateImm(
1207 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
1214 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1215 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1216 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
1219 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1220 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1221 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
1224 static bool HasDualReg(unsigned Opcode) {
1228 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1229 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1234 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1235 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
1237 const TargetInstrDesc &TID = ARMInsts[Opcode];
1238 unsigned short NumDefs = TID.getNumDefs();
1239 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1240 const TargetOperandInfo *OpInfo = TID.OpInfo;
1241 unsigned &OpIdx = NumOpsAdded;
1245 assert(((!isStore && NumDefs > 0) || (isStore && (NumDefs == 0 || isPrePost)))
1246 && "Invalid arguments");
1248 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1249 if (isPrePost && isStore) {
1250 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1251 "Reg operand expected");
1252 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1257 bool DualReg = HasDualReg(Opcode);
1259 // Disassemble the dst/src operand.
1260 if (OpIdx >= NumOps)
1263 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1264 "Reg operand expected");
1265 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1269 // Fill in LDRD and STRD's second operand.
1271 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1272 decodeRd(insn) + 1)));
1276 // After dst of a pre- and post-indexed load is the address base writeback.
1277 if (isPrePost && !isStore) {
1278 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1279 "Reg operand expected");
1280 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1285 // Disassemble the base operand.
1286 if (OpIdx >= NumOps)
1289 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1290 "Reg operand expected");
1291 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1292 && "Index mode or tied_to operand expected");
1293 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1297 // For reg/reg form, base reg is followed by +/- reg.
1298 // For immediate form, it is followed by +/- imm8.
1299 // See also ARMAddressingModes.h (Addressing Mode #3).
1300 if (OpIdx + 1 >= NumOps)
1303 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1304 (OpInfo[OpIdx+1].RegClass == 0) &&
1305 "Expect 1 reg operand followed by 1 imm operand");
1307 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1308 if (getAM3IBit(insn) == 1) {
1309 MI.addOperand(MCOperand::CreateReg(0));
1311 // Disassemble the 8-bit immediate offset.
1312 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1313 unsigned Imm4L = insn & 0xF;
1314 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L);
1315 MI.addOperand(MCOperand::CreateImm(Offset));
1317 // Disassemble the offset reg (Rm).
1318 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1320 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0);
1321 MI.addOperand(MCOperand::CreateImm(Offset));
1328 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1329 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1330 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
1333 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1334 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1335 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
1338 // The algorithm for disassembly of LdStMulFrm is different from others because
1339 // it explicitly populates the two predicate operands after operand 0 (the base)
1340 // and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
1341 // reglist with each affected register encoded as an MCOperand.
1342 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1343 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1345 assert(NumOps >= 5 && "LdStMulFrm expects NumOps >= 5");
1347 unsigned &OpIdx = NumOpsAdded;
1351 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1353 // Writeback to base, if necessary.
1354 if (Opcode == ARM::LDM_UPD || Opcode == ARM::STM_UPD) {
1355 MI.addOperand(MCOperand::CreateReg(Base));
1359 MI.addOperand(MCOperand::CreateReg(Base));
1361 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1362 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
1364 // Handling the two predicate operands before the reglist.
1365 int64_t CondVal = insn >> ARMII::CondShift;
1366 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1367 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1371 // Fill the variadic part of reglist.
1372 unsigned RegListBits = insn & ((1 << 16) - 1);
1373 for (unsigned i = 0; i < 16; ++i) {
1374 if ((RegListBits >> i) & 1) {
1375 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1384 // LDREX, LDREXB, LDREXH: Rd Rn
1385 // LDREXD: Rd Rd+1 Rn
1386 // STREX, STREXB, STREXH: Rd Rm Rn
1387 // STREXD: Rd Rm Rm+1 Rn
1389 // SWP, SWPB: Rd Rm Rn
1390 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1391 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1393 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1394 unsigned &OpIdx = NumOpsAdded;
1399 && OpInfo[0].RegClass == ARM::GPRRegClassID
1400 && OpInfo[1].RegClass == ARM::GPRRegClassID
1401 && "Expect 2 reg operands");
1403 bool isStore = slice(insn, 20, 20) == 0;
1404 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1406 // Add the destination operand.
1407 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1411 // Store register Exclusive needs a source operand.
1413 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1418 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1419 decodeRm(insn)+1)));
1423 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1424 decodeRd(insn)+1)));
1428 // Finally add the pointer operand.
1429 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1436 // Misc. Arithmetic Instructions.
1438 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1439 // RBIT, REV, REV16, REVSH: Rd Rm
1440 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1441 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1443 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1444 unsigned &OpIdx = NumOpsAdded;
1449 && OpInfo[0].RegClass == ARM::GPRRegClassID
1450 && OpInfo[1].RegClass == ARM::GPRRegClassID
1451 && "Expect 2 reg operands");
1453 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1455 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1460 assert(NumOps >= 4 && "Expect >= 4 operands");
1461 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1466 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1470 // If there is still an operand info left which is an immediate operand, add
1471 // an additional imm5 LSL/ASR operand.
1472 if (ThreeReg && OpInfo[OpIdx].RegClass == 0
1473 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1474 // Extract the 5-bit immediate field Inst{11-7}.
1475 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1476 MI.addOperand(MCOperand::CreateImm(ShiftAmt));
1483 // Extend instructions.
1484 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1485 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1486 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1487 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1488 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1490 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1491 unsigned &OpIdx = NumOpsAdded;
1496 && OpInfo[0].RegClass == ARM::GPRRegClassID
1497 && OpInfo[1].RegClass == ARM::GPRRegClassID
1498 && "Expect 2 reg operands");
1500 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1502 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1507 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1512 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1516 // If there is still an operand info left which is an immediate operand, add
1517 // an additional rotate immediate operand.
1518 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
1519 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1520 // Extract the 2-bit rotate field Inst{11-10}.
1521 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1522 // Rotation by 8, 16, or 24 bits.
1523 MI.addOperand(MCOperand::CreateImm(rot << 3));
1530 /////////////////////////////////////
1532 // Utility Functions For VFP //
1534 /////////////////////////////////////
1536 // Extract/Decode Dd/Sd:
1538 // SP => d = UInt(Vd:D)
1539 // DP => d = UInt(D:Vd)
1540 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1541 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1542 : (decodeRd(insn) | getDBit(insn) << 4);
1545 // Extract/Decode Dn/Sn:
1547 // SP => n = UInt(Vn:N)
1548 // DP => n = UInt(N:Vn)
1549 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1550 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1551 : (decodeRn(insn) | getNBit(insn) << 4);
1554 // Extract/Decode Dm/Sm:
1556 // SP => m = UInt(Vm:M)
1557 // DP => m = UInt(M:Vm)
1558 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1559 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1560 : (decodeRm(insn) | getMBit(insn) << 4);
1565 static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
1566 assert(N == 32 || N == 64);
1569 unsigned bit6 = slice(byte, 6, 6);
1571 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1573 Result |= 0x1f << 25;
1575 Result |= 0x1 << 30;
1577 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1578 (uint64_t)slice(byte, 5, 0) << 48;
1580 Result |= 0xffL << 54;
1582 Result |= 0x1L << 62;
1588 // VFP Unary Format Instructions:
1590 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1591 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1592 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1593 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1594 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1596 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1598 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1599 unsigned &OpIdx = NumOpsAdded;
1603 unsigned RegClass = OpInfo[OpIdx].RegClass;
1604 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1605 "Reg operand expected");
1606 bool isSP = (RegClass == ARM::SPRRegClassID);
1608 MI.addOperand(MCOperand::CreateReg(
1609 getRegisterEnum(RegClass, decodeVFPRd(insn, isSP))));
1612 // Early return for compare with zero instructions.
1613 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1614 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1617 RegClass = OpInfo[OpIdx].RegClass;
1618 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1619 "Reg operand expected");
1620 isSP = (RegClass == ARM::SPRRegClassID);
1622 MI.addOperand(MCOperand::CreateReg(
1623 getRegisterEnum(RegClass, decodeVFPRm(insn, isSP))));
1629 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1630 // Some of them have operand constraints which tie the first operand in the
1631 // InOperandList to that of the dst. As far as asm printing is concerned, this
1632 // tied_to operand is simply skipped.
1633 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1634 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1636 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1638 const TargetInstrDesc &TID = ARMInsts[Opcode];
1639 const TargetOperandInfo *OpInfo = TID.OpInfo;
1640 unsigned &OpIdx = NumOpsAdded;
1644 unsigned RegClass = OpInfo[OpIdx].RegClass;
1645 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1646 "Reg operand expected");
1647 bool isSP = (RegClass == ARM::SPRRegClassID);
1649 MI.addOperand(MCOperand::CreateReg(
1650 getRegisterEnum(RegClass, decodeVFPRd(insn, isSP))));
1653 // Skip tied_to operand constraint.
1654 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1655 assert(NumOps >= 4 && "Expect >=4 operands");
1656 MI.addOperand(MCOperand::CreateReg(0));
1660 MI.addOperand(MCOperand::CreateReg(
1661 getRegisterEnum(RegClass, decodeVFPRn(insn, isSP))));
1664 MI.addOperand(MCOperand::CreateReg(
1665 getRegisterEnum(RegClass, decodeVFPRm(insn, isSP))));
1671 // A8.6.295 vcvt (floating-point <-> integer)
1672 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1673 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1675 // A8.6.297 vcvt (floating-point and fixed-point)
1676 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1677 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1678 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1680 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1682 const TargetInstrDesc &TID = ARMInsts[Opcode];
1683 const TargetOperandInfo *OpInfo = TID.OpInfo;
1685 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1686 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1687 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1691 assert(NumOps >= 3 && "Expect >= 3 operands");
1692 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1693 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1694 MI.addOperand(MCOperand::CreateReg(
1695 getRegisterEnum(RegClassID,
1696 decodeVFPRd(insn, SP))));
1698 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1699 "Tied to operand expected");
1700 MI.addOperand(MI.getOperand(0));
1702 assert(OpInfo[2].RegClass == 0 && !OpInfo[2].isPredicate() &&
1703 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1704 MI.addOperand(MCOperand::CreateImm(fbits));
1709 // The Rd (destination) and Rm (source) bits have different interpretations
1710 // depending on their single-precisonness.
1712 if (slice(insn, 18, 18) == 1) { // to_integer operation
1713 d = decodeVFPRd(insn, true /* Is Single Precision */);
1714 MI.addOperand(MCOperand::CreateReg(
1715 getRegisterEnum(ARM::SPRRegClassID, d)));
1716 m = decodeVFPRm(insn, SP);
1717 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, m)));
1719 d = decodeVFPRd(insn, SP);
1720 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, d)));
1721 m = decodeVFPRm(insn, true /* Is Single Precision */);
1722 MI.addOperand(MCOperand::CreateReg(
1723 getRegisterEnum(ARM::SPRRegClassID, m)));
1731 // VMOVRS - A8.6.330
1732 // Rt => Rd; Sn => UInt(Vn:N)
1733 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1734 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1736 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1738 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1740 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1741 decodeVFPRn(insn, true))));
1746 // VMOVRRD - A8.6.332
1747 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1749 // VMOVRRS - A8.6.331
1750 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1751 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1752 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1754 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1756 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1757 unsigned &OpIdx = NumOpsAdded;
1759 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1761 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1765 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1766 unsigned Sm = decodeVFPRm(insn, true);
1767 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1769 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1773 MI.addOperand(MCOperand::CreateReg(
1774 getRegisterEnum(ARM::DPRRegClassID,
1775 decodeVFPRm(insn, false))));
1781 // VMOVSR - A8.6.330
1782 // Rt => Rd; Sn => UInt(Vn:N)
1783 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1784 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1786 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1788 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1789 decodeVFPRn(insn, true))));
1790 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1796 // VMOVDRR - A8.6.332
1797 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1799 // VMOVRRS - A8.6.331
1800 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1801 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1802 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1804 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1806 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1807 unsigned &OpIdx = NumOpsAdded;
1811 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1812 unsigned Sm = decodeVFPRm(insn, true);
1813 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1815 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1819 MI.addOperand(MCOperand::CreateReg(
1820 getRegisterEnum(ARM::DPRRegClassID,
1821 decodeVFPRm(insn, false))));
1825 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1827 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1833 // VFP Load/Store Instructions.
1834 // VLDRD, VLDRS, VSTRD, VSTRS
1835 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1836 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1838 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1840 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS) ? true : false;
1841 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1843 // Extract Dd/Sd for operand 0.
1844 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1846 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, RegD)));
1848 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1849 MI.addOperand(MCOperand::CreateReg(Base));
1851 // Next comes the AM5 Opcode.
1852 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1853 unsigned char Imm8 = insn & 0xFF;
1854 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1861 // VFP Load/Store Multiple Instructions.
1862 // This is similar to the algorithm for LDM/STM in that operand 0 (the base) and
1863 // operand 1 (the AM5 mode imm) is followed by two predicate operands. It is
1864 // followed by a reglist of either DPR(s) or SPR(s).
1866 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1867 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1868 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1870 assert(NumOps >= 5 && "VFPLdStMulFrm expects NumOps >= 5");
1872 unsigned &OpIdx = NumOpsAdded;
1876 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1878 // Writeback to base, if necessary.
1879 if (Opcode == ARM::VLDMD_UPD || Opcode == ARM::VLDMS_UPD ||
1880 Opcode == ARM::VSTMD_UPD || Opcode == ARM::VSTMS_UPD) {
1881 MI.addOperand(MCOperand::CreateReg(Base));
1885 MI.addOperand(MCOperand::CreateReg(Base));
1887 // Next comes the AM5 Opcode.
1888 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1889 unsigned char Imm8 = insn & 0xFF;
1890 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, Imm8)));
1892 // Handling the two predicate operands before the reglist.
1893 int64_t CondVal = insn >> ARMII::CondShift;
1894 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1895 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1899 bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VLDMS_UPD ||
1900 Opcode == ARM::VSTMS || Opcode == ARM::VSTMS_UPD) ? true : false;
1901 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1904 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1906 // Fill the variadic part of reglist.
1907 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1908 for (unsigned i = 0; i < Regs; ++i) {
1909 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID,
1917 // Misc. VFP Instructions.
1918 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1919 // FCONSTD (DPR and a VFPf64Imm operand)
1920 // FCONSTS (SPR and a VFPf32Imm operand)
1921 // VMRS/VMSR (GPR operand)
1922 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1923 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1925 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1926 unsigned &OpIdx = NumOpsAdded;
1930 if (Opcode == ARM::FMSTAT)
1933 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
1935 unsigned RegEnum = 0;
1936 switch (OpInfo[0].RegClass) {
1937 case ARM::DPRRegClassID:
1938 RegEnum = getRegisterEnum(ARM::DPRRegClassID, decodeVFPRd(insn, false));
1940 case ARM::SPRRegClassID:
1941 RegEnum = getRegisterEnum(ARM::SPRRegClassID, decodeVFPRd(insn, true));
1943 case ARM::GPRRegClassID:
1944 RegEnum = getRegisterEnum(ARM::GPRRegClassID, decodeRd(insn));
1947 assert(0 && "Invalid reg class id");
1951 MI.addOperand(MCOperand::CreateReg(RegEnum));
1954 // Extract/decode the f64/f32 immediate.
1955 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
1956 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1957 // The asm syntax specifies the before-expanded <imm>.
1958 // Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
1959 // Opcode == ARM::FCONSTD ? 64 : 32)
1960 MI.addOperand(MCOperand::CreateImm(slice(insn,19,16)<<4 | slice(insn,3,0)));
1967 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
1968 #include "ThumbDisassemblerCore.h"
1970 /////////////////////////////////////////////////////
1972 // Utility Functions For ARM Advanced SIMD //
1974 /////////////////////////////////////////////////////
1976 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1977 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1979 // A7.3 Register encoding
1981 // Extract/Decode NEON D/Vd:
1983 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
1984 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
1985 // handling it in the getRegisterEnum() utility function.
1986 // D = Inst{22}, Vd = Inst{15-12}
1987 static unsigned decodeNEONRd(uint32_t insn) {
1988 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
1989 | (insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask;
1992 // Extract/Decode NEON N/Vn:
1994 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
1995 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
1996 // handling it in the getRegisterEnum() utility function.
1997 // N = Inst{7}, Vn = Inst{19-16}
1998 static unsigned decodeNEONRn(uint32_t insn) {
1999 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2000 | (insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask;
2003 // Extract/Decode NEON M/Vm:
2005 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2006 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2007 // handling it in the getRegisterEnum() utility function.
2008 // M = Inst{5}, Vm = Inst{3-0}
2009 static unsigned decodeNEONRm(uint32_t insn) {
2010 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2011 | (insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask;
2022 } // End of unnamed namespace
2024 // size field -> Inst{11-10}
2025 // index_align field -> Inst{7-4}
2027 // The Lane Index interpretation depends on the Data Size:
2028 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2029 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2030 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2032 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2033 static unsigned decodeLaneIndex(uint32_t insn) {
2034 unsigned size = insn >> 10 & 3;
2035 assert((size == 0 || size == 1 || size == 2) &&
2036 "Encoding error: size should be either 0, 1, or 2");
2038 unsigned index_align = insn >> 4 & 0xF;
2039 return (index_align >> 1) >> size;
2042 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2043 // op = Inst{5}, cmode = Inst{11-8}
2044 // i = Inst{24} (ARM architecture)
2045 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2046 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2047 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2048 unsigned char cmode = (insn >> 8) & 0xF;
2049 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2050 ((insn >> 16) & 7) << 4 |
2059 Imm64 = Imm8 << 8*(cmode >> 1 & 1);
2063 Imm64 = (Imm8 << 8) | 0xFF;
2064 else if (cmode == 13)
2065 Imm64 = (Imm8 << 16) | 0xFFFF;
2067 // Imm8 to be shifted left by how many bytes...
2068 Imm64 = Imm8 << 8*(cmode >> 1 & 3);
2073 for (unsigned i = 0; i < 8; ++i)
2074 if ((Imm8 >> i) & 1)
2075 Imm64 |= 0xFF << 8*i;
2079 assert(0 && "Unreachable code!");
2086 // A8.6.339 VMUL, VMULL (by scalar)
2087 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2088 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2089 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2096 assert(0 && "Unreachable code!");
2101 // A8.6.339 VMUL, VMULL (by scalar)
2102 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2103 // ESize32 => index = Inst{5} (M) D0-D15
2104 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2107 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2109 return (insn >> 5) & 1;
2111 assert(0 && "Unreachable code!");
2116 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2117 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2118 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2119 return 64 - ((insn >> 16) & 0x3F);
2122 // A8.6.302 VDUP (scalar)
2123 // ESize8 => index = Inst{19-17}
2124 // ESize16 => index = Inst{19-18}
2125 // ESize32 => index = Inst{19}
2126 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2129 return (insn >> 17) & 7;
2131 return (insn >> 18) & 3;
2133 return (insn >> 19) & 1;
2135 assert(0 && "Unspecified element size!");
2140 // A8.6.328 VMOV (ARM core register to scalar)
2141 // A8.6.329 VMOV (scalar to ARM core register)
2142 // ESize8 => index = Inst{21:6-5}
2143 // ESize16 => index = Inst{21:6}
2144 // ESize32 => index = Inst{21}
2145 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2148 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2150 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2152 return ((insn >> 21) & 1);
2154 assert(0 && "Unspecified element size!");
2159 // Imm6 = Inst{21-16}, L = Inst{7}
2161 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2163 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2164 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2165 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2166 // '1xxxxxx' => esize = 64; shift_amount = imm6
2168 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2170 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2171 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2172 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2173 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2175 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2176 ElemSize esize = ESizeNA;
2177 unsigned L = (insn >> 7) & 1;
2178 unsigned imm6 = (insn >> 16) & 0x3F;
2182 else if (imm6 >> 4 == 1)
2184 else if (imm6 >> 5 == 1)
2187 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2192 return esize == ESize64 ? imm6 : (imm6 - esize);
2194 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2198 // Imm4 = Inst{11-8}
2199 static unsigned decodeN3VImm(uint32_t insn) {
2200 return (insn >> 8) & 0xF;
2204 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2206 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2208 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2210 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2212 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2213 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2214 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced) {
2216 const TargetInstrDesc &TID = ARMInsts[Opcode];
2217 const TargetOperandInfo *OpInfo = TID.OpInfo;
2219 // At least one DPR register plus addressing mode #6.
2220 assert(NumOps >= 3 && "Expect >= 3 operands");
2222 unsigned &OpIdx = NumOpsAdded;
2226 // We have homogeneous NEON registers for Load/Store.
2227 unsigned RegClass = 0;
2229 // Double-spaced registers have increments of 2.
2230 unsigned Inc = DblSpaced ? 2 : 1;
2232 unsigned Rn = decodeRn(insn);
2233 unsigned Rm = decodeRm(insn);
2234 unsigned Rd = decodeNEONRd(insn);
2236 // A7.7.1 Advanced SIMD addressing mode.
2239 // LLVM Addressing Mode #6.
2240 unsigned RmEnum = 0;
2242 RmEnum = getRegisterEnum(ARM::GPRRegClassID, Rm);
2245 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2246 // then possible lane index.
2247 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2248 "Reg operand expected");
2251 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2256 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2257 OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
2258 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2260 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2264 MI.addOperand(MCOperand::CreateReg(RmEnum));
2268 assert(OpIdx < NumOps &&
2269 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2270 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2271 "Reg operand expected");
2273 RegClass = OpInfo[OpIdx].RegClass;
2274 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2275 if (Opcode >= ARM::VST1q16 && Opcode <= ARM::VST1q8)
2276 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd,true)));
2278 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd)));
2283 // Handle possible lane index.
2284 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2285 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2286 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2291 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2292 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2293 RegClass = OpInfo[0].RegClass;
2295 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2296 if (Opcode >= ARM::VLD1q16 && Opcode <= ARM::VLD1q8)
2297 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd,true)));
2299 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd)));
2305 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2310 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2311 OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
2312 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2314 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2318 MI.addOperand(MCOperand::CreateReg(RmEnum));
2322 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2323 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2324 "Tied to operand expected");
2325 MI.addOperand(MCOperand::CreateReg(0));
2329 // Handle possible lane index.
2330 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2331 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2332 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2341 // If L (Inst{21}) == 0, store instructions.
2342 // Find out about double-spaced-ness of the Opcode and pass it on to
2343 // DisassembleNLdSt0().
2344 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2345 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2347 const StringRef Name = ARMInsts[Opcode].Name;
2348 bool DblSpaced = false;
2350 if (Name.find("LN") != std::string::npos) {
2351 // To one lane instructions.
2352 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2354 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2355 if (Name.endswith("16") || Name.endswith("16_UPD"))
2356 DblSpaced = slice(insn, 5, 5) == 1;
2358 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2359 if (Name.endswith("32") || Name.endswith("32_UPD"))
2360 DblSpaced = slice(insn, 6, 6) == 1;
2363 // Multiple n-element structures with type encoded as Inst{11-8}.
2364 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2366 // n == 2 && type == 0b1001 -> DblSpaced = true
2367 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2368 DblSpaced = slice(insn, 11, 8) == 9;
2370 // n == 3 && type == 0b0101 -> DblSpaced = true
2371 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2372 DblSpaced = slice(insn, 11, 8) == 5;
2374 // n == 4 && type == 0b0001 -> DblSpaced = true
2375 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2376 DblSpaced = slice(insn, 11, 8) == 1;
2379 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2380 slice(insn, 21, 21) == 0, DblSpaced);
2385 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2386 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2388 const TargetInstrDesc &TID = ARMInsts[Opcode];
2389 const TargetOperandInfo *OpInfo = TID.OpInfo;
2391 assert(NumOps >= 2 &&
2392 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2393 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2394 (OpInfo[1].RegClass == 0) &&
2395 "Expect 1 reg operand followed by 1 imm operand");
2397 // Qd/Dd = Inst{22:15-12} => NEON Rd
2398 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[0].RegClass,
2399 decodeNEONRd(insn))));
2401 ElemSize esize = ESizeNA;
2404 case ARM::VMOVv16i8:
2407 case ARM::VMOVv4i16:
2408 case ARM::VMOVv8i16:
2411 case ARM::VMOVv2i32:
2412 case ARM::VMOVv4i32:
2415 case ARM::VMOVv1i64:
2416 case ARM::VMOVv2i64:
2419 assert(0 && "Unreachable code!");
2423 // One register and a modified immediate value.
2424 // Add the imm operand.
2425 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2435 N2V_VectorConvert_Between_Float_Fixed
2437 } // End of unnamed namespace
2439 // Vector Convert [between floating-point and fixed-point]
2440 // Qd/Dd Qm/Dm [fbits]
2442 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2443 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2446 // Vector Move Long:
2449 // Vector Move Narrow:
2453 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2454 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag = N2V_None) {
2456 const TargetInstrDesc &TID = ARMInsts[Opc];
2457 const TargetOperandInfo *OpInfo = TID.OpInfo;
2459 assert(NumOps >= 2 &&
2460 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2461 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2462 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2463 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2464 "Expect >= 2 operands and first 2 as reg operands");
2466 unsigned &OpIdx = NumOpsAdded;
2470 ElemSize esize = ESizeNA;
2471 if (Flag == N2V_VectorDupLane) {
2472 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2473 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2474 "Unexpected Opcode");
2475 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2476 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2480 // Qd/Dd = Inst{22:15-12} => NEON Rd
2481 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2482 decodeNEONRd(insn))));
2486 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2488 MI.addOperand(MCOperand::CreateReg(0));
2492 // Dm = Inst{5:3-0} => NEON Rm
2493 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2494 decodeNEONRm(insn))));
2497 // VZIP and others have two TIED_TO reg operands.
2499 while (OpIdx < NumOps &&
2500 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2501 // Add TIED_TO operand.
2502 MI.addOperand(MI.getOperand(Idx));
2506 // Add the imm operand, if required.
2507 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2508 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2510 unsigned imm = 0xFFFFFFFF;
2512 if (Flag == N2V_VectorDupLane)
2513 imm = decodeNVLaneDupIndex(insn, esize);
2514 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2515 imm = decodeVCVTFractionBits(insn);
2517 assert(imm != 0xFFFFFFFF && "Internal error");
2518 MI.addOperand(MCOperand::CreateImm(imm));
2525 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2526 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2528 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded);
2530 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2531 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2533 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2534 N2V_VectorConvert_Between_Float_Fixed);
2536 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2537 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2539 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2543 // Vector Shift [Accumulate] Instructions.
2544 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2546 // Vector Shift Left Long (with maximum shift count) Instructions.
2547 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2549 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2550 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift) {
2552 const TargetInstrDesc &TID = ARMInsts[Opcode];
2553 const TargetOperandInfo *OpInfo = TID.OpInfo;
2555 assert(NumOps >= 3 &&
2556 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2557 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2558 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2559 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2560 "Expect >= 3 operands and first 2 as reg operands");
2562 unsigned &OpIdx = NumOpsAdded;
2566 // Qd/Dd = Inst{22:15-12} => NEON Rd
2567 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2568 decodeNEONRd(insn))));
2571 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2573 MI.addOperand(MCOperand::CreateReg(0));
2577 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2578 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2579 "Reg operand expected");
2581 // Qm/Dm = Inst{5:3-0} => NEON Rm
2582 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2583 decodeNEONRm(insn))));
2586 assert(OpInfo[OpIdx].RegClass == 0 && "Imm operand expected");
2588 // Add the imm operand.
2590 // VSHLL has maximum shift count as the imm, inferred from its size.
2594 Imm = decodeNVSAmt(insn, LeftShift);
2606 MI.addOperand(MCOperand::CreateImm(Imm));
2612 // Left shift instructions.
2613 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2614 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2616 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true);
2618 // Right shift instructions have different shift amount interpretation.
2619 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2620 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2622 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false);
2630 N3V_Multiply_By_Scalar
2632 } // End of unnamed namespace
2634 // NEON Three Register Instructions with Optional Immediate Operand
2636 // Vector Extract Instructions.
2637 // Qd/Dd Qn/Dn Qm/Dm imm4
2639 // Vector Shift (Register) Instructions.
2640 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2642 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2643 // Qd/Dd Qn/Dn RestrictedDm index
2646 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2647 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag = N3V_None) {
2649 const TargetInstrDesc &TID = ARMInsts[Opcode];
2650 const TargetOperandInfo *OpInfo = TID.OpInfo;
2652 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2653 assert(NumOps >= 3 &&
2654 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2655 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2656 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2657 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2658 "Expect >= 3 operands and first 2 as reg operands");
2660 unsigned &OpIdx = NumOpsAdded;
2664 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2665 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2666 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2667 ElemSize esize = ESizeNA;
2668 if (Flag == N3V_Multiply_By_Scalar) {
2669 unsigned size = (insn >> 20) & 3;
2670 if (size == 1) esize = ESize16;
2671 if (size == 2) esize = ESize32;
2672 assert (esize == ESize16 || esize == ESize32);
2675 // Qd/Dd = Inst{22:15-12} => NEON Rd
2676 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2677 decodeNEONRd(insn))));
2680 // VABA, VABAL, VBSLd, VBSLq, ...
2681 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2683 MI.addOperand(MCOperand::CreateReg(0));
2687 // Dn = Inst{7:19-16} => NEON Rn
2689 // Dm = Inst{5:3-0} => NEON Rm
2690 MI.addOperand(MCOperand::CreateReg(
2691 getRegisterEnum(OpInfo[OpIdx].RegClass,
2692 VdVnVm ? decodeNEONRn(insn)
2693 : decodeNEONRm(insn))));
2696 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2698 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2701 // Dm = Inst{5:3-0} => NEON Rm
2703 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2705 // Dn = Inst{7:19-16} => NEON Rn
2706 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2707 : decodeNEONRm(insn))
2708 : decodeNEONRn(insn);
2710 MI.addOperand(MCOperand::CreateReg(
2711 getRegisterEnum(OpInfo[OpIdx].RegClass, m)));
2714 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2715 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2716 // Add the imm operand.
2719 Imm = decodeN3VImm(insn);
2720 else if (IsDmRestricted)
2721 Imm = decodeRestrictedDmIndex(insn, esize);
2723 assert(0 && "Internal error: unreachable code!");
2727 MI.addOperand(MCOperand::CreateImm(Imm));
2734 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2735 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2737 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded);
2739 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
2740 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2742 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2745 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2746 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2748 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2751 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
2752 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2754 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2755 N3V_Multiply_By_Scalar);
2758 // Vector Table Lookup
2760 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2761 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2762 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2763 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2764 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2765 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2767 const TargetInstrDesc &TID = ARMInsts[Opcode];
2768 const TargetOperandInfo *OpInfo = TID.OpInfo;
2770 assert(NumOps >= 3 &&
2771 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2772 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2773 OpInfo[2].RegClass == ARM::DPRRegClassID &&
2774 "Expect >= 3 operands and first 3 as reg operands");
2776 unsigned &OpIdx = NumOpsAdded;
2780 unsigned Rn = decodeNEONRn(insn);
2782 // {Dn} encoded as len = 0b00
2783 // {Dn Dn+1} encoded as len = 0b01
2784 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2785 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2786 unsigned Len = slice(insn, 9, 8) + 1;
2788 // Dd (the destination vector)
2789 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2790 decodeNEONRd(insn))));
2793 // Process tied_to operand constraint.
2795 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2796 MI.addOperand(MI.getOperand(Idx));
2800 // Do the <list> now.
2801 for (unsigned i = 0; i < Len; ++i) {
2802 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2803 "Reg operand expected");
2804 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2809 // Dm (the index vector)
2810 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2811 "Reg operand (index vector) expected");
2812 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2813 decodeNEONRm(insn))));
2819 static bool DisassembleNEONFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2820 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2821 assert(0 && "Unreachable code!");
2825 // Vector Get Lane (move scalar to ARM core register) Instructions.
2826 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2827 static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2828 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2830 const TargetInstrDesc &TID = ARMInsts[Opcode];
2831 unsigned short NumDefs = TID.getNumDefs();
2832 const TargetOperandInfo *OpInfo = TID.OpInfo;
2834 assert(NumDefs == 1 && NumOps >= 3 &&
2835 OpInfo[0].RegClass == ARM::GPRRegClassID &&
2836 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2837 OpInfo[2].RegClass == 0 &&
2838 "Expect >= 3 operands with one dst operand");
2841 Opcode == ARM::VGETLNi32 ? ESize32
2842 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
2845 // Rt = Inst{15-12} => ARM Rd
2846 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2849 // Dn = Inst{7:19-16} => NEON Rn
2850 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2851 decodeNEONRn(insn))));
2853 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2859 // Vector Set Lane (move ARM core register to scalar) Instructions.
2860 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2861 static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2862 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2864 const TargetInstrDesc &TID = ARMInsts[Opcode];
2865 unsigned short NumDefs = TID.getNumDefs();
2866 const TargetOperandInfo *OpInfo = TID.OpInfo;
2868 assert(NumDefs == 1 && NumOps >= 3 &&
2869 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2870 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2871 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
2872 OpInfo[2].RegClass == ARM::GPRRegClassID &&
2873 OpInfo[3].RegClass == 0 &&
2874 "Expect >= 3 operands with one dst operand");
2877 Opcode == ARM::VSETLNi8 ? ESize8
2878 : (Opcode == ARM::VSETLNi16 ? ESize16
2881 // Dd = Inst{7:19-16} => NEON Rn
2882 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2883 decodeNEONRn(insn))));
2886 MI.addOperand(MCOperand::CreateReg(0));
2888 // Rt = Inst{15-12} => ARM Rd
2889 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2892 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2898 // Vector Duplicate Instructions (from ARM core register to all elements).
2899 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2900 static bool DisassembleNEONDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2901 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2903 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2905 assert(NumOps >= 2 &&
2906 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2907 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2908 OpInfo[1].RegClass == ARM::GPRRegClassID &&
2909 "Expect >= 2 operands and first 2 as reg operand");
2911 unsigned RegClass = OpInfo[0].RegClass;
2913 // Qd/Dd = Inst{7:19-16} => NEON Rn
2914 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,
2915 decodeNEONRn(insn))));
2917 // Rt = Inst{15-12} => ARM Rd
2918 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2928 static inline bool MemBarrierInstr(uint32_t insn) {
2929 unsigned op7_4 = slice(insn, 7, 4);
2930 if (slice(insn, 31, 20) == 0xf57 && (op7_4 >= 4 && op7_4 <= 6))
2936 static inline bool PreLoadOpcode(unsigned Opcode) {
2938 case ARM::PLDi: case ARM::PLDr:
2939 case ARM::PLDWi: case ARM::PLDWr:
2940 case ARM::PLIi: case ARM::PLIr:
2947 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2948 unsigned short NumOps, unsigned &NumOpsAdded) {
2950 // Preload Data/Instruction requires either 2 or 4 operands.
2951 // PLDi, PLDWi, PLIi: Rn [+/-]imm12 add = (U == '1')
2952 // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: Rn Rm addrmode2_opc
2954 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2957 if (Opcode == ARM::PLDi || Opcode == ARM::PLDWi || Opcode == ARM::PLIi) {
2958 unsigned Imm12 = slice(insn, 11, 0);
2959 bool Negative = getUBit(insn) == 0;
2960 int Offset = Negative ? -1 - Imm12 : 1 * Imm12;
2961 MI.addOperand(MCOperand::CreateImm(Offset));
2964 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2967 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2969 // Inst{6-5} encodes the shift opcode.
2970 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
2971 // Inst{11-7} encodes the imm5 shift amount.
2972 unsigned ShImm = slice(insn, 11, 7);
2974 // A8.4.1. Possible rrx or shift amount of 32...
2975 getImmShiftSE(ShOp, ShImm);
2976 MI.addOperand(MCOperand::CreateImm(
2977 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
2984 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2985 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2987 if (MemBarrierInstr(insn))
3005 // CPS has a singleton $opt operand that contains the following information:
3006 // opt{4-0} = mode from Inst{4-0}
3007 // opt{5} = changemode from Inst{17}
3008 // opt{8-6} = AIF from Inst{8-6}
3009 // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
3010 if (Opcode == ARM::CPS) {
3011 unsigned Option = slice(insn, 4, 0) | slice(insn, 17, 17) << 5 |
3012 slice(insn, 8, 6) << 6 | slice(insn, 19, 18) << 9;
3013 MI.addOperand(MCOperand::CreateImm(Option));
3018 // DBG has its option specified in Inst{3-0}.
3019 if (Opcode == ARM::DBG) {
3020 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3025 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3026 if (Opcode == ARM::BKPT) {
3027 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3028 slice(insn, 3, 0)));
3033 if (PreLoadOpcode(Opcode))
3034 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded);
3036 assert(0 && "Unexpected misc instruction!");
3040 static bool DisassembleThumbMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3041 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
3043 assert(0 && "Unexpected thumb misc. instruction!");
3047 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3048 /// We divide the disassembly task into different categories, with each one
3049 /// corresponding to a specific instruction encoding format. There could be
3050 /// exceptions when handling a specific format, and that is why the Opcode is
3051 /// also present in the function prototype.
3052 static const DisassembleFP FuncPtrs[] = {
3056 &DisassembleBrMiscFrm,
3058 &DisassembleDPSoRegFrm,
3061 &DisassembleLdMiscFrm,
3062 &DisassembleStMiscFrm,
3063 &DisassembleLdStMulFrm,
3064 &DisassembleLdStExFrm,
3065 &DisassembleArithMiscFrm,
3067 &DisassembleVFPUnaryFrm,
3068 &DisassembleVFPBinaryFrm,
3069 &DisassembleVFPConv1Frm,
3070 &DisassembleVFPConv2Frm,
3071 &DisassembleVFPConv3Frm,
3072 &DisassembleVFPConv4Frm,
3073 &DisassembleVFPConv5Frm,
3074 &DisassembleVFPLdStFrm,
3075 &DisassembleVFPLdStMulFrm,
3076 &DisassembleVFPMiscFrm,
3077 &DisassembleThumbFrm,
3078 &DisassembleNEONFrm,
3079 &DisassembleNEONGetLnFrm,
3080 &DisassembleNEONSetLnFrm,
3081 &DisassembleNEONDupFrm,
3082 &DisassembleMiscFrm,
3083 &DisassembleThumbMiscFrm,
3085 // VLD and VST (including one lane) Instructions.
3088 // A7.4.6 One register and a modified immediate value
3089 // 1-Register Instructions with imm.
3090 // LLVM only defines VMOVv instructions.
3091 &DisassembleN1RegModImmFrm,
3093 // 2-Register Instructions with no imm.
3094 &DisassembleN2RegFrm,
3096 // 2-Register Instructions with imm (vector convert float/fixed point).
3097 &DisassembleNVCVTFrm,
3099 // 2-Register Instructions with imm (vector dup lane).
3100 &DisassembleNVecDupLnFrm,
3102 // Vector Shift Left Instructions.
3103 &DisassembleN2RegVecShLFrm,
3105 // Vector Shift Righ Instructions, which has different interpretation of the
3106 // shift amount from the imm6 field.
3107 &DisassembleN2RegVecShRFrm,
3109 // 3-Register Data-Processing Instructions.
3110 &DisassembleN3RegFrm,
3112 // Vector Shift (Register) Instructions.
3113 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3114 &DisassembleN3RegVecShFrm,
3116 // Vector Extract Instructions.
3117 &DisassembleNVecExtractFrm,
3119 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3120 // By Scalar Instructions.
3121 &DisassembleNVecMulScalarFrm,
3123 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3124 // values in a table and generate a new vector.
3125 &DisassembleNVTBLFrm,
3130 /// Algorithms - Algorithms stores a map from Format to ARMAlgorithm*.
3131 static std::vector<ARMAlgorithm*> Algorithms;
3133 /// DoCleanup - Do cleanup of Algorithms upon exit.
3134 void ARMAlgorithm::DoCleanup() {
3135 for (unsigned i = 0; i < array_lengthof(FuncPtrs); ++i)
3137 delete Algorithms[i];
3140 /// GetInstance - GetInstance returns an instance of ARMAlgorithm given the
3141 /// encoding Format. API clients should not free up the returned instance.
3142 ARMAlgorithm *ARMAlgorithm::GetInstance(ARMFormat Format) {
3143 /// Init the first time.
3144 if (Algorithms.size() == 0) {
3145 Algorithms.resize(array_lengthof(FuncPtrs));
3146 for (unsigned i = 0, num = array_lengthof(FuncPtrs); i < num; ++i)
3148 Algorithms[i] = new ARMAlgorithm(FuncPtrs[i]);
3150 Algorithms[i] = NULL;
3152 // Register cleanup routine.
3155 return Algorithms[Format];
3158 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3159 /// The general idea is to set the Opcode for the MCInst, followed by adding
3160 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3161 /// to the Algo (ARM Disassemble Algorithm) object to perform Format-specific
3162 /// disassembly, followed by class method TryPredicateAndSBitModifier() to do
3163 /// PredicateOperand and OptionalDefOperand which follow the Dst/Src Operands.
3164 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3165 // Stage 1 sets the Opcode.
3166 MI.setOpcode(Opcode);
3167 // If the number of operands is zero, we're done!
3171 // Stage 2 calls the ARM Disassembly Algorithm to build the operand list.
3172 unsigned NumOpsAdded = 0;
3173 bool OK = Algo.Solve(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3175 if (!OK) return false;
3176 if (NumOpsAdded >= NumOps)
3179 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3180 // FIXME: Should this be done selectively?
3181 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3184 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3185 uint32_t insn, unsigned short NumOpsRemaining) {
3187 assert(NumOpsRemaining > 0 && "Invalid argument");
3189 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3190 const std::string &Name = ARMInsts[Opcode].Name;
3191 unsigned Idx = MI.getNumOperands();
3193 // First, we check whether this instr specifies the PredicateOperand through
3194 // a pair of TargetOperandInfos with isPredicate() property.
3195 if (NumOpsRemaining >= 2 &&
3196 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3197 OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3199 // If we are inside an IT block, get the IT condition bits maintained via
3200 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3203 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3205 if (Name.length() > 1 && Name[0] == 't') {
3206 // Thumb conditional branch instructions have their cond field embedded,
3210 if (Name == "t2Bcc")
3211 MI.addOperand(MCOperand::CreateImm(slice(insn, 25, 22)));
3212 else if (Name == "tBcc")
3213 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 8)));
3215 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3217 // ARM Instructions. Check condition field.
3218 int64_t CondVal = getCondField(insn);
3220 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3222 MI.addOperand(MCOperand::CreateImm(CondVal));
3225 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3227 NumOpsRemaining -= 2;
3228 if (NumOpsRemaining == 0)
3232 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3233 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3234 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3238 if (NumOpsRemaining == 0)
3244 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3245 /// after BuildIt is finished.
3246 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3249 if (!SP) return Status;
3251 if (Opcode == ARM::t2IT)
3252 SP->InitIT(slice(insn, 7, 0));
3253 else if (InITBlock())
3259 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3260 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3261 /// Return NULL if it fails to create/return a proper builder. API clients
3262 /// are responsible for freeing up of the allocated memory. Cacheing can be
3263 /// performed by the API clients to improve performance.
3264 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3266 ARMAlgorithm *Algo = ARMAlgorithm::GetInstance(Format);
3270 return new ARMBasicMCBuilder(Opcode, Format,
3271 ARMInsts[Opcode].getNumOperands(), *Algo);