1 //===- ARMDisassemblerCore.cpp - ARM disassembler helpers -------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file is part of the ARM Disassembler.
11 // It contains code to represent the core concepts of Builder and DisassembleFP
12 // to solve the problem of disassembling an ARM instr.
14 //===----------------------------------------------------------------------===//
16 #include "ARMDisassemblerCore.h"
17 #include "ARMAddressingModes.h"
18 #include "llvm/Support/raw_ostream.h"
20 /// ARMGenInstrInfo.inc - ARMGenInstrInfo.inc contains the static const
21 /// TargetInstrDesc ARMInsts[] definition and the TargetOperandInfo[]'s
22 /// describing the operand info for each ARMInsts[i].
24 /// Together with an instruction's encoding format, we can take advantage of the
25 /// NumOperands and the OpInfo fields of the target instruction description in
26 /// the quest to build out the MCOperand list for an MCInst.
28 /// The general guideline is that with a known format, the number of dst and src
29 /// operands are well-known. The dst is built first, followed by the src
30 /// operand(s). The operands not yet used at this point are for the Implicit
31 /// Uses and Defs by this instr. For the Uses part, the pred:$p operand is
32 /// defined with two components:
34 /// def pred { // Operand PredicateOperand
35 /// ValueType Type = OtherVT;
36 /// string PrintMethod = "printPredicateOperand";
37 /// string AsmOperandLowerMethod = ?;
38 /// dag MIOperandInfo = (ops i32imm, CCR);
39 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
40 /// dag DefaultOps = (ops (i32 14), (i32 zero_reg));
43 /// which is manifested by the TargetOperandInfo[] of:
45 /// { 0, 0|(1<<TOI::Predicate), 0 },
46 /// { ARM::CCRRegClassID, 0|(1<<TOI::Predicate), 0 }
48 /// So the first predicate MCOperand corresponds to the immediate part of the
49 /// ARM condition field (Inst{31-28}), and the second predicate MCOperand
50 /// corresponds to a register kind of ARM::CPSR.
52 /// For the Defs part, in the simple case of only cc_out:$s, we have:
54 /// def cc_out { // Operand OptionalDefOperand
55 /// ValueType Type = OtherVT;
56 /// string PrintMethod = "printSBitModifierOperand";
57 /// string AsmOperandLowerMethod = ?;
58 /// dag MIOperandInfo = (ops CCR);
59 /// AsmOperandClass ParserMatchClass = ImmAsmOperand;
60 /// dag DefaultOps = (ops (i32 zero_reg));
63 /// which is manifested by the one TargetOperandInfo of:
65 /// { ARM::CCRRegClassID, 0|(1<<TOI::OptionalDef), 0 }
67 /// And this maps to one MCOperand with the regsiter kind of ARM::CPSR.
68 #include "ARMGenInstrInfo.inc"
72 const char *ARMUtils::OpcodeName(unsigned Opcode) {
73 return ARMInsts[Opcode].Name;
76 // Return the register enum Based on RegClass and the raw register number.
77 // For DRegPair, see comments below.
79 static unsigned getRegisterEnum(unsigned RegClassID, unsigned RawRegister,
80 bool DRegPair = false) {
82 if (DRegPair && RegClassID == ARM::QPRRegClassID) {
83 // LLVM expects { Dd, Dd+1 } to form a super register; this is not specified
84 // in the ARM Architecture Manual as far as I understand it (A8.6.307).
85 // Therefore, we morph the RegClassID to be the sub register class and don't
86 // subsequently transform the RawRegister encoding when calculating RegNum.
88 // See also ARMinstPrinter::printOperand() wrt "dregpair" modifier part
89 // where this workaround is meant for.
90 RegClassID = ARM::DPRRegClassID;
93 // See also decodeNEONRd(), decodeNEONRn(), decodeNEONRm().
95 RegClassID == ARM::QPRRegClassID ? RawRegister >> 1 : RawRegister;
101 switch (RegClassID) {
102 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R0;
103 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
104 case ARM::DPR_VFP2RegClassID:
106 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
107 case ARM::QPR_VFP2RegClassID:
109 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S0;
113 switch (RegClassID) {
114 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R1;
115 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
116 case ARM::DPR_VFP2RegClassID:
118 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
119 case ARM::QPR_VFP2RegClassID:
121 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S1;
125 switch (RegClassID) {
126 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R2;
127 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
128 case ARM::DPR_VFP2RegClassID:
130 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
131 case ARM::QPR_VFP2RegClassID:
133 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S2;
137 switch (RegClassID) {
138 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R3;
139 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
140 case ARM::DPR_VFP2RegClassID:
142 case ARM::QPRRegClassID: case ARM::QPR_8RegClassID:
143 case ARM::QPR_VFP2RegClassID:
145 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S3;
149 switch (RegClassID) {
150 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R4;
151 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
152 case ARM::DPR_VFP2RegClassID:
154 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q4;
155 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S4;
159 switch (RegClassID) {
160 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R5;
161 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
162 case ARM::DPR_VFP2RegClassID:
164 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q5;
165 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S5;
169 switch (RegClassID) {
170 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R6;
171 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
172 case ARM::DPR_VFP2RegClassID:
174 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q6;
175 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S6;
179 switch (RegClassID) {
180 case ARM::GPRRegClassID: case ARM::tGPRRegClassID: return ARM::R7;
181 case ARM::DPRRegClassID: case ARM::DPR_8RegClassID:
182 case ARM::DPR_VFP2RegClassID:
184 case ARM::QPRRegClassID: case ARM::QPR_VFP2RegClassID: return ARM::Q7;
185 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S7;
189 switch (RegClassID) {
190 case ARM::GPRRegClassID: return ARM::R8;
191 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D8;
192 case ARM::QPRRegClassID: return ARM::Q8;
193 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S8;
197 switch (RegClassID) {
198 case ARM::GPRRegClassID: return ARM::R9;
199 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D9;
200 case ARM::QPRRegClassID: return ARM::Q9;
201 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S9;
205 switch (RegClassID) {
206 case ARM::GPRRegClassID: return ARM::R10;
207 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D10;
208 case ARM::QPRRegClassID: return ARM::Q10;
209 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S10;
213 switch (RegClassID) {
214 case ARM::GPRRegClassID: return ARM::R11;
215 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D11;
216 case ARM::QPRRegClassID: return ARM::Q11;
217 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S11;
221 switch (RegClassID) {
222 case ARM::GPRRegClassID: return ARM::R12;
223 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D12;
224 case ARM::QPRRegClassID: return ARM::Q12;
225 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S12;
229 switch (RegClassID) {
230 case ARM::GPRRegClassID: return ARM::SP;
231 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D13;
232 case ARM::QPRRegClassID: return ARM::Q13;
233 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S13;
237 switch (RegClassID) {
238 case ARM::GPRRegClassID: return ARM::LR;
239 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D14;
240 case ARM::QPRRegClassID: return ARM::Q14;
241 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S14;
245 switch (RegClassID) {
246 case ARM::GPRRegClassID: return ARM::PC;
247 case ARM::DPRRegClassID: case ARM::DPR_VFP2RegClassID: return ARM::D15;
248 case ARM::QPRRegClassID: return ARM::Q15;
249 case ARM::SPRRegClassID: case ARM::SPR_8RegClassID: return ARM::S15;
253 switch (RegClassID) {
254 case ARM::DPRRegClassID: return ARM::D16;
255 case ARM::SPRRegClassID: return ARM::S16;
259 switch (RegClassID) {
260 case ARM::DPRRegClassID: return ARM::D17;
261 case ARM::SPRRegClassID: return ARM::S17;
265 switch (RegClassID) {
266 case ARM::DPRRegClassID: return ARM::D18;
267 case ARM::SPRRegClassID: return ARM::S18;
271 switch (RegClassID) {
272 case ARM::DPRRegClassID: return ARM::D19;
273 case ARM::SPRRegClassID: return ARM::S19;
277 switch (RegClassID) {
278 case ARM::DPRRegClassID: return ARM::D20;
279 case ARM::SPRRegClassID: return ARM::S20;
283 switch (RegClassID) {
284 case ARM::DPRRegClassID: return ARM::D21;
285 case ARM::SPRRegClassID: return ARM::S21;
289 switch (RegClassID) {
290 case ARM::DPRRegClassID: return ARM::D22;
291 case ARM::SPRRegClassID: return ARM::S22;
295 switch (RegClassID) {
296 case ARM::DPRRegClassID: return ARM::D23;
297 case ARM::SPRRegClassID: return ARM::S23;
301 switch (RegClassID) {
302 case ARM::DPRRegClassID: return ARM::D24;
303 case ARM::SPRRegClassID: return ARM::S24;
307 switch (RegClassID) {
308 case ARM::DPRRegClassID: return ARM::D25;
309 case ARM::SPRRegClassID: return ARM::S25;
313 switch (RegClassID) {
314 case ARM::DPRRegClassID: return ARM::D26;
315 case ARM::SPRRegClassID: return ARM::S26;
319 switch (RegClassID) {
320 case ARM::DPRRegClassID: return ARM::D27;
321 case ARM::SPRRegClassID: return ARM::S27;
325 switch (RegClassID) {
326 case ARM::DPRRegClassID: return ARM::D28;
327 case ARM::SPRRegClassID: return ARM::S28;
331 switch (RegClassID) {
332 case ARM::DPRRegClassID: return ARM::D29;
333 case ARM::SPRRegClassID: return ARM::S29;
337 switch (RegClassID) {
338 case ARM::DPRRegClassID: return ARM::D30;
339 case ARM::SPRRegClassID: return ARM::S30;
343 switch (RegClassID) {
344 case ARM::DPRRegClassID: return ARM::D31;
345 case ARM::SPRRegClassID: return ARM::S31;
349 assert(0 && "Invalid (RegClassID, RawRegister) combination");
353 ///////////////////////////////
355 // Utility Functions //
357 ///////////////////////////////
359 // Extract/Decode Rd: Inst{15-12}.
360 static inline unsigned decodeRd(uint32_t insn) {
361 return (insn >> ARMII::RegRdShift) & ARMII::GPRRegMask;
364 // Extract/Decode Rn: Inst{19-16}.
365 static inline unsigned decodeRn(uint32_t insn) {
366 return (insn >> ARMII::RegRnShift) & ARMII::GPRRegMask;
369 // Extract/Decode Rm: Inst{3-0}.
370 static inline unsigned decodeRm(uint32_t insn) {
371 return (insn & ARMII::GPRRegMask);
374 // Extract/Decode Rs: Inst{11-8}.
375 static inline unsigned decodeRs(uint32_t insn) {
376 return (insn >> ARMII::RegRsShift) & ARMII::GPRRegMask;
379 static inline unsigned getCondField(uint32_t insn) {
380 return (insn >> ARMII::CondShift);
383 static inline unsigned getIBit(uint32_t insn) {
384 return (insn >> ARMII::I_BitShift) & 1;
387 static inline unsigned getAM3IBit(uint32_t insn) {
388 return (insn >> ARMII::AM3_I_BitShift) & 1;
391 static inline unsigned getPBit(uint32_t insn) {
392 return (insn >> ARMII::P_BitShift) & 1;
395 static inline unsigned getUBit(uint32_t insn) {
396 return (insn >> ARMII::U_BitShift) & 1;
399 static inline unsigned getPUBits(uint32_t insn) {
400 return (insn >> ARMII::U_BitShift) & 3;
403 static inline unsigned getSBit(uint32_t insn) {
404 return (insn >> ARMII::S_BitShift) & 1;
407 static inline unsigned getWBit(uint32_t insn) {
408 return (insn >> ARMII::W_BitShift) & 1;
411 static inline unsigned getDBit(uint32_t insn) {
412 return (insn >> ARMII::D_BitShift) & 1;
415 static inline unsigned getNBit(uint32_t insn) {
416 return (insn >> ARMII::N_BitShift) & 1;
419 static inline unsigned getMBit(uint32_t insn) {
420 return (insn >> ARMII::M_BitShift) & 1;
423 // See A8.4 Shifts applied to a register.
424 // A8.4.2 Register controlled shifts.
426 // getShiftOpcForBits - getShiftOpcForBits translates from the ARM encoding bits
427 // into llvm enums for shift opcode. The API clients should pass in the value
428 // encoded with two bits, so the assert stays to signal a wrong API usage.
430 // A8-12: DecodeRegShift()
431 static inline ARM_AM::ShiftOpc getShiftOpcForBits(unsigned bits) {
433 default: assert(0 && "No such value"); return ARM_AM::no_shift;
434 case 0: return ARM_AM::lsl;
435 case 1: return ARM_AM::lsr;
436 case 2: return ARM_AM::asr;
437 case 3: return ARM_AM::ror;
441 // See A8.4 Shifts applied to a register.
442 // A8.4.1 Constant shifts.
444 // getImmShiftSE - getImmShiftSE translates from the raw ShiftOpc and raw Imm5
445 // encodings into the intended ShiftOpc and shift amount.
447 // A8-11: DecodeImmShift()
448 static inline void getImmShiftSE(ARM_AM::ShiftOpc &ShOp, unsigned &ShImm) {
449 // If type == 0b11 and imm5 == 0, we have an rrx, instead.
450 if (ShOp == ARM_AM::ror && ShImm == 0)
452 // If (lsr or asr) and imm5 == 0, shift amount is 32.
453 if ((ShOp == ARM_AM::lsr || ShOp == ARM_AM::asr) && ShImm == 0)
457 // getAMSubModeForBits - getAMSubModeForBits translates from the ARM encoding
458 // bits Inst{24-23} (P(24) and U(23)) into llvm enums for AMSubMode. The API
459 // clients should pass in the value encoded with two bits, so the assert stays
460 // to signal a wrong API usage.
461 static inline ARM_AM::AMSubMode getAMSubModeForBits(unsigned bits) {
463 default: assert(0 && "No such value"); return ARM_AM::bad_am_submode;
464 case 1: return ARM_AM::ia; // P=0 U=1
465 case 3: return ARM_AM::ib; // P=1 U=1
466 case 0: return ARM_AM::da; // P=0 U=0
467 case 2: return ARM_AM::db; // P=1 U=0
471 ////////////////////////////////////////////
473 // Disassemble function definitions //
475 ////////////////////////////////////////////
477 /// There is a separate Disassemble*Frm function entry for disassembly of an ARM
478 /// instr into a list of MCOperands in the appropriate order, with possible dst,
479 /// followed by possible src(s).
481 /// The processing of the predicate, and the 'S' modifier bit, if MI modifies
482 /// the CPSR, is factored into ARMBasicMCBuilder's method named
483 /// TryPredicateAndSBitModifier.
485 static bool DisassemblePseudo(MCInst &MI, unsigned Opcode, uint32_t insn,
486 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
488 if (Opcode == ARM::Int_MemBarrierV7 || Opcode == ARM::Int_SyncBarrierV7)
491 assert(0 && "Unexpected pseudo instruction!");
495 // Multiply Instructions.
496 // MLA, MLS, SMLABB, SMLABT, SMLATB, SMLATT, SMLAWB, SMLAWT, SMMLA, SMMLS:
497 // Rd{19-16} Rn{3-0} Rm{11-8} Ra{15-12}
499 // MUL, SMMUL, SMULBB, SMULBT, SMULTB, SMULTT, SMULWB, SMULWT:
500 // Rd{19-16} Rn{3-0} Rm{11-8}
502 // SMLAL, SMULL, UMAAL, UMLAL, UMULL, SMLALBB, SMLALBT, SMLALTB, SMLALTT:
503 // RdLo{15-12} RdHi{19-16} Rn{3-0} Rm{11-8}
505 // The mapping of the multiply registers to the "regular" ARM registers, where
506 // there are convenience decoder functions, is:
512 static bool DisassembleMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
513 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
515 const TargetInstrDesc &TID = ARMInsts[Opcode];
516 unsigned short NumDefs = TID.getNumDefs();
517 const TargetOperandInfo *OpInfo = TID.OpInfo;
518 unsigned &OpIdx = NumOpsAdded;
522 assert(NumDefs > 0 && "NumDefs should be greater than 0 for MulFrm");
524 && OpInfo[0].RegClass == ARM::GPRRegClassID
525 && OpInfo[1].RegClass == ARM::GPRRegClassID
526 && OpInfo[2].RegClass == ARM::GPRRegClassID
527 && "Expect three register operands");
529 // Instructions with two destination registers have RdLo{15-12} first.
531 assert(NumOps >= 4 && OpInfo[3].RegClass == ARM::GPRRegClassID &&
532 "Expect 4th register operand");
533 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
538 // The destination register: RdHi{19-16} or Rd{19-16}.
539 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
542 // The two src regsiters: Rn{3-0}, then Rm{11-8}.
543 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
545 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
549 // Many multiply instructions (e.g., MLA) have three src registers.
550 // The third register operand is Ra{15-12}.
551 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
552 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
560 // Helper routines for disassembly of coprocessor instructions.
562 static bool LdStCopOpcode(unsigned Opcode) {
563 if ((Opcode >= ARM::LDC2L_OFFSET && Opcode <= ARM::LDC_PRE) ||
564 (Opcode >= ARM::STC2L_OFFSET && Opcode <= ARM::STC_PRE))
568 static bool CoprocessorOpcode(unsigned Opcode) {
569 if (LdStCopOpcode(Opcode))
575 case ARM::CDP: case ARM::CDP2:
576 case ARM::MCR: case ARM::MCR2: case ARM::MRC: case ARM::MRC2:
577 case ARM::MCRR: case ARM::MCRR2: case ARM::MRRC: case ARM::MRRC2:
581 static inline unsigned GetCoprocessor(uint32_t insn) {
582 return slice(insn, 11, 8);
584 static inline unsigned GetCopOpc1(uint32_t insn, bool CDP) {
585 return CDP ? slice(insn, 23, 20) : slice(insn, 23, 21);
587 static inline unsigned GetCopOpc2(uint32_t insn) {
588 return slice(insn, 7, 5);
590 static inline unsigned GetCopOpc(uint32_t insn) {
591 return slice(insn, 7, 4);
593 // Most of the operands are in immediate forms, except Rd and Rn, which are ARM
596 // CDP, CDP2: cop opc1 CRd CRn CRm opc2
598 // MCR, MCR2, MRC, MRC2: cop opc1 Rd CRn CRm opc2
600 // MCRR, MCRR2, MRRC, MRRc2: cop opc Rd Rn CRm
602 // LDC_OFFSET, LDC_PRE, LDC_POST: cop CRd Rn R0 [+/-]imm8:00
604 // STC_OFFSET, STC_PRE, STC_POST: cop CRd Rn R0 [+/-]imm8:00
608 // LDC_OPTION: cop CRd Rn imm8
610 // STC_OPTION: cop CRd Rn imm8
613 static bool DisassembleCoprocessor(MCInst &MI, unsigned Opcode, uint32_t insn,
614 unsigned short NumOps, unsigned &NumOpsAdded) {
616 assert(NumOps >= 5 && "Num of operands >= 5 for coprocessor instr");
618 unsigned &OpIdx = NumOpsAdded;
619 bool OneCopOpc = (Opcode == ARM::MCRR || Opcode == ARM::MCRR2 ||
620 Opcode == ARM::MRRC || Opcode == ARM::MRRC2);
621 // CDP/CDP2 has no GPR operand; the opc1 operand is also wider (Inst{23-20}).
622 bool NoGPR = (Opcode == ARM::CDP || Opcode == ARM::CDP2);
623 bool LdStCop = LdStCopOpcode(Opcode);
627 MI.addOperand(MCOperand::CreateImm(GetCoprocessor(insn)));
630 // Unindex if P:W = 0b00 --> _OPTION variant
631 unsigned PW = getPBit(insn) << 1 | getWBit(insn);
633 MI.addOperand(MCOperand::CreateImm(decodeRd(insn)));
635 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
639 MI.addOperand(MCOperand::CreateReg(0));
640 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
641 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, slice(insn, 7, 0) << 2,
643 MI.addOperand(MCOperand::CreateImm(Offset));
646 MI.addOperand(MCOperand::CreateImm(slice(insn, 7, 0)));
650 MI.addOperand(MCOperand::CreateImm(OneCopOpc ? GetCopOpc(insn)
651 : GetCopOpc1(insn, NoGPR)));
653 MI.addOperand(NoGPR ? MCOperand::CreateImm(decodeRd(insn))
654 : MCOperand::CreateReg(
655 getRegisterEnum(ARM::GPRRegClassID,
658 MI.addOperand(OneCopOpc ? MCOperand::CreateReg(
659 getRegisterEnum(ARM::GPRRegClassID,
661 : MCOperand::CreateImm(decodeRn(insn)));
663 MI.addOperand(MCOperand::CreateImm(decodeRm(insn)));
668 MI.addOperand(MCOperand::CreateImm(GetCopOpc2(insn)));
676 // Branch Instructions.
677 // BLr9: SignExtend(Imm24:'00', 32)
678 // Bcc, BLr9_pred: SignExtend(Imm24:'00', 32) Pred0 Pred1
679 // SMC: ZeroExtend(imm4, 32)
680 // SVC: ZeroExtend(Imm24, 32)
682 // Various coprocessor instructions are assigned BrFrm arbitrarily.
683 // Delegates to DisassembleCoprocessor() helper function.
686 // MSR/MSRsys: Rm mask=Inst{19-16}
688 // MSRi/MSRsysi: so_imm
689 // SRSW/SRS: addrmode4:$addr mode_imm
690 // RFEW/RFE: addrmode4:$addr Rn
691 static bool DisassembleBrFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
692 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
694 if (CoprocessorOpcode(Opcode))
695 return DisassembleCoprocessor(MI, Opcode, insn, NumOps, NumOpsAdded);
697 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
698 if (!OpInfo) return false;
700 // MRS and MRSsys take one GPR reg Rd.
701 if (Opcode == ARM::MRS || Opcode == ARM::MRSsys) {
702 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
703 "Reg operand expected");
704 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
709 // BXJ takes one GPR reg Rm.
710 if (Opcode == ARM::BXJ) {
711 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
712 "Reg operand expected");
713 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
718 // MSR and MSRsys take one GPR reg Rm, followed by the mask.
719 if (Opcode == ARM::MSR || Opcode == ARM::MSRsys) {
720 assert(NumOps >= 1 && OpInfo[0].RegClass == ARM::GPRRegClassID &&
721 "Reg operand expected");
722 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
724 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
728 // MSRi and MSRsysi take one so_imm operand, followed by the mask.
729 if (Opcode == ARM::MSRi || Opcode == ARM::MSRsysi) {
730 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
731 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
732 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
733 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
734 unsigned Imm = insn & 0xFF;
735 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
736 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 16)));
740 // SRSW and SRS requires addrmode4:$addr for ${addr:submode}, followed by the
741 // mode immediate (Inst{4-0}).
742 if (Opcode == ARM::SRSW || Opcode == ARM::SRS ||
743 Opcode == ARM::RFEW || Opcode == ARM::RFE) {
744 // ARMInstPrinter::printAddrMode4Operand() prints special mode string
745 // if the base register is SP; so don't set ARM::SP.
746 MI.addOperand(MCOperand::CreateReg(0));
747 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
748 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
750 if (Opcode == ARM::SRSW || Opcode == ARM::SRS)
751 MI.addOperand(MCOperand::CreateImm(slice(insn, 4, 0)));
753 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
759 assert((Opcode == ARM::Bcc || Opcode == ARM::BLr9 || Opcode == ARM::BLr9_pred
760 || Opcode == ARM::SMC || Opcode == ARM::SVC) &&
761 "Unexpected Opcode");
763 assert(NumOps >= 1 && OpInfo[0].RegClass == 0 && "Reg operand expected");
766 if (Opcode == ARM::SMC) {
767 // ZeroExtend(imm4, 32) where imm24 = Inst{3-0}.
768 Imm32 = slice(insn, 3, 0);
769 } else if (Opcode == ARM::SVC) {
770 // ZeroExtend(imm24, 32) where imm24 = Inst{23-0}.
771 Imm32 = slice(insn, 23, 0);
773 // SignExtend(imm24:'00', 32) where imm24 = Inst{23-0}.
774 unsigned Imm26 = slice(insn, 23, 0) << 2;
775 //Imm32 = signextend<signed int, 26>(Imm26);
776 Imm32 = SignExtend32<26>(Imm26);
778 // When executing an ARM instruction, PC reads as the address of the current
779 // instruction plus 8. The assembler subtracts 8 from the difference
780 // between the branch instruction and the target address, disassembler has
781 // to add 8 to compensate.
785 MI.addOperand(MCOperand::CreateImm(Imm32));
791 // Misc. Branch Instructions.
792 // BR_JTadd, BR_JTr, BR_JTm
795 static bool DisassembleBrMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
796 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
798 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
799 if (!OpInfo) return false;
801 unsigned &OpIdx = NumOpsAdded;
805 // BX_RET has only two predicate operands, do an early return.
806 if (Opcode == ARM::BX_RET)
809 // BLXr9 and BRIND take one GPR reg.
810 if (Opcode == ARM::BLXr9 || Opcode == ARM::BRIND) {
811 assert(NumOps >= 1 && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
812 "Reg operand expected");
813 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
819 // BR_JTadd is an ADD with Rd = PC, (Rn, Rm) as the target and index regs.
820 if (Opcode == ARM::BR_JTadd) {
821 // InOperandList with GPR:$target and GPR:$idx regs.
823 assert(NumOps == 4 && "Expect 4 operands");
824 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
826 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
829 // Fill in the two remaining imm operands to signify build completion.
830 MI.addOperand(MCOperand::CreateImm(0));
831 MI.addOperand(MCOperand::CreateImm(0));
837 // BR_JTr is a MOV with Rd = PC, and Rm as the source register.
838 if (Opcode == ARM::BR_JTr) {
839 // InOperandList with GPR::$target reg.
841 assert(NumOps == 3 && "Expect 3 operands");
842 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
845 // Fill in the two remaining imm operands to signify build completion.
846 MI.addOperand(MCOperand::CreateImm(0));
847 MI.addOperand(MCOperand::CreateImm(0));
853 // BR_JTm is an LDR with Rt = PC.
854 if (Opcode == ARM::BR_JTm) {
855 // This is the reg/reg form, with base reg followed by +/- reg shop imm.
856 // See also ARMAddressingModes.h (Addressing Mode #2).
858 assert(NumOps == 5 && getIBit(insn) == 1 && "Expect 5 operands && I-bit=1");
859 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
862 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
864 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
865 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
867 // Inst{6-5} encodes the shift opcode.
868 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
869 // Inst{11-7} encodes the imm5 shift amount.
870 unsigned ShImm = slice(insn, 11, 7);
872 // A8.4.1. Possible rrx or shift amount of 32...
873 getImmShiftSE(ShOp, ShImm);
874 MI.addOperand(MCOperand::CreateImm(
875 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
877 // Fill in the two remaining imm operands to signify build completion.
878 MI.addOperand(MCOperand::CreateImm(0));
879 MI.addOperand(MCOperand::CreateImm(0));
885 assert(0 && "Unexpected BrMiscFrm Opcode");
889 static inline bool getBFCInvMask(uint32_t insn, uint32_t &mask) {
890 uint32_t lsb = slice(insn, 11, 7);
891 uint32_t msb = slice(insn, 20, 16);
894 errs() << "Encoding error: lsb > msb\n";
898 for (uint32_t i = lsb; i <= msb; ++i)
904 static inline bool SaturateOpcode(unsigned Opcode) {
906 case ARM::SSATlsl: case ARM::SSATasr: case ARM::SSAT16:
907 case ARM::USATlsl: case ARM::USATasr: case ARM::USAT16:
914 static inline unsigned decodeSaturatePos(unsigned Opcode, uint32_t insn) {
918 return slice(insn, 20, 16) + 1;
920 return slice(insn, 19, 16) + 1;
923 return slice(insn, 20, 16);
925 return slice(insn, 19, 16);
927 assert(0 && "Invalid opcode passed in");
932 // A major complication is the fact that some of the saturating add/subtract
933 // operations have Rd Rm Rn, instead of the "normal" Rd Rn Rm.
934 // They are QADD, QDADD, QDSUB, and QSUB.
935 static bool DisassembleDPFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
936 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
938 const TargetInstrDesc &TID = ARMInsts[Opcode];
939 unsigned short NumDefs = TID.getNumDefs();
940 bool isUnary = isUnaryDP(TID.TSFlags);
941 const TargetOperandInfo *OpInfo = TID.OpInfo;
942 unsigned &OpIdx = NumOpsAdded;
946 // Disassemble register def if there is one.
947 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
948 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
953 // Now disassemble the src operands.
957 // SSAT/SSAT16/USAT/USAT16 has imm operand after Rd.
958 if (SaturateOpcode(Opcode)) {
959 MI.addOperand(MCOperand::CreateImm(decodeSaturatePos(Opcode, insn)));
961 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
964 if (Opcode == ARM::SSAT16 || Opcode == ARM::USAT16) {
969 // For SSAT operand reg (Rm) has been disassembled above.
970 // Now disassemble the shift amount.
972 // Inst{11-7} encodes the imm5 shift amount.
973 unsigned ShAmt = slice(insn, 11, 7);
975 // A8.6.183. Possible ASR shift amount of 32...
976 if (Opcode == ARM::SSATasr && ShAmt == 0)
979 MI.addOperand(MCOperand::CreateImm(ShAmt));
985 // Special-case handling of BFC/BFI/SBFX/UBFX.
986 if (Opcode == ARM::BFC || Opcode == ARM::BFI) {
987 // TIED_TO operand skipped for BFC and Inst{3-0} (Reg) for BFI.
988 MI.addOperand(MCOperand::CreateReg(Opcode == ARM::BFC ? 0
989 : getRegisterEnum(ARM::GPRRegClassID,
992 if (!getBFCInvMask(insn, mask))
995 MI.addOperand(MCOperand::CreateImm(mask));
999 if (Opcode == ARM::SBFX || Opcode == ARM::UBFX) {
1000 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1002 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 7)));
1003 MI.addOperand(MCOperand::CreateImm(slice(insn, 20, 16) + 1));
1008 bool RmRn = (Opcode == ARM::QADD || Opcode == ARM::QDADD ||
1009 Opcode == ARM::QDSUB || Opcode == ARM::QSUB);
1011 // BinaryDP has an Rn operand.
1013 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1014 "Reg operand expected");
1015 MI.addOperand(MCOperand::CreateReg(
1016 getRegisterEnum(ARM::GPRRegClassID,
1017 RmRn ? decodeRm(insn) : decodeRn(insn))));
1021 // If this is a two-address operand, skip it, e.g., MOVCCr operand 1.
1022 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1023 MI.addOperand(MCOperand::CreateReg(0));
1027 // Now disassemble operand 2.
1028 if (OpIdx >= NumOps)
1031 if (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) {
1032 // We have a reg/reg form.
1033 // Assert disabled because saturating operations, e.g., A8.6.127 QASX, are
1034 // routed here as well.
1035 // assert(getIBit(insn) == 0 && "I_Bit != '0' reg/reg form");
1036 MI.addOperand(MCOperand::CreateReg(
1037 getRegisterEnum(ARM::GPRRegClassID,
1038 RmRn? decodeRn(insn) : decodeRm(insn))));
1040 } else if (Opcode == ARM::MOVi16 || Opcode == ARM::MOVTi16) {
1041 // We have an imm16 = imm4:imm12 (imm4=Inst{19:16}, imm12 = Inst{11:0}).
1042 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1043 unsigned Imm16 = slice(insn, 19, 16) << 12 | slice(insn, 11, 0);
1044 MI.addOperand(MCOperand::CreateImm(Imm16));
1047 // We have a reg/imm form.
1048 // SOImm is 4-bit rotate amount in bits 11-8 with 8-bit imm in bits 7-0.
1049 // A5.2.4 Rotate amount is twice the numeric value of Inst{11-8}.
1050 // See also ARMAddressingModes.h: getSOImmValImm() and getSOImmValRot().
1051 assert(getIBit(insn) == 1 && "I_Bit != '1' reg/imm form");
1052 unsigned Rot = (insn >> ARMII::SoRotImmShift) & 0xF;
1053 unsigned Imm = insn & 0xFF;
1054 MI.addOperand(MCOperand::CreateImm(ARM_AM::rotr32(Imm, 2*Rot)));
1061 static bool DisassembleDPSoRegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1062 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1064 const TargetInstrDesc &TID = ARMInsts[Opcode];
1065 unsigned short NumDefs = TID.getNumDefs();
1066 bool isUnary = isUnaryDP(TID.TSFlags);
1067 const TargetOperandInfo *OpInfo = TID.OpInfo;
1068 unsigned &OpIdx = NumOpsAdded;
1072 // Disassemble register def if there is one.
1073 if (NumDefs && (OpInfo[OpIdx].RegClass == ARM::GPRRegClassID)) {
1074 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1079 // Disassemble the src operands.
1080 if (OpIdx >= NumOps)
1083 // BinaryDP has an Rn operand.
1085 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1086 "Reg operand expected");
1087 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1092 // If this is a two-address operand, skip it, e.g., MOVCCs operand 1.
1093 if (isUnary && (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1)) {
1094 MI.addOperand(MCOperand::CreateReg(0));
1098 // Disassemble operand 2, which consists of three components.
1099 if (OpIdx + 2 >= NumOps)
1102 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1103 (OpInfo[OpIdx+1].RegClass == ARM::GPRRegClassID) &&
1104 (OpInfo[OpIdx+2].RegClass == 0) &&
1105 "Expect 3 reg operands");
1107 // Register-controlled shifts have Inst{7} = 0 and Inst{4} = 1.
1108 unsigned Rs = slice(insn, 4, 4);
1110 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1113 // Register-controlled shifts: [Rm, Rs, shift].
1114 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1116 // Inst{6-5} encodes the shift opcode.
1117 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1118 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, 0)));
1120 // Constant shifts: [Rm, reg0, shift_imm].
1121 MI.addOperand(MCOperand::CreateReg(0)); // NoRegister
1122 // Inst{6-5} encodes the shift opcode.
1123 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1124 // Inst{11-7} encodes the imm5 shift amount.
1125 unsigned ShImm = slice(insn, 11, 7);
1127 // A8.4.1. Possible rrx or shift amount of 32...
1128 getImmShiftSE(ShOp, ShImm);
1129 MI.addOperand(MCOperand::CreateImm(ARM_AM::getSORegOpc(ShOp, ShImm)));
1136 static bool DisassembleLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1137 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
1139 const TargetInstrDesc &TID = ARMInsts[Opcode];
1140 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1141 const TargetOperandInfo *OpInfo = TID.OpInfo;
1142 if (!OpInfo) return false;
1144 unsigned &OpIdx = NumOpsAdded;
1148 assert(((!isStore && TID.getNumDefs() > 0) ||
1149 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1150 && "Invalid arguments");
1152 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1153 if (isPrePost && isStore) {
1154 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1155 "Reg operand expected");
1156 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1161 // Disassemble the dst/src operand.
1162 if (OpIdx >= NumOps)
1165 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1166 "Reg operand expected");
1167 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1171 // After dst of a pre- and post-indexed load is the address base writeback.
1172 if (isPrePost && !isStore) {
1173 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1174 "Reg operand expected");
1175 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1180 // Disassemble the base operand.
1181 if (OpIdx >= NumOps)
1184 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1185 "Reg operand expected");
1186 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1187 && "Index mode or tied_to operand expected");
1188 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1192 // For reg/reg form, base reg is followed by +/- reg shop imm.
1193 // For immediate form, it is followed by +/- imm12.
1194 // See also ARMAddressingModes.h (Addressing Mode #2).
1195 if (OpIdx + 1 >= NumOps)
1198 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1199 (OpInfo[OpIdx+1].RegClass == 0) &&
1200 "Expect 1 reg operand followed by 1 imm operand");
1202 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1203 if (getIBit(insn) == 0) {
1204 MI.addOperand(MCOperand::CreateReg(0));
1206 // Disassemble the 12-bit immediate offset.
1207 unsigned Imm12 = slice(insn, 11, 0);
1208 unsigned Offset = ARM_AM::getAM2Opc(AddrOpcode, Imm12, ARM_AM::no_shift);
1209 MI.addOperand(MCOperand::CreateImm(Offset));
1211 // Disassemble the offset reg (Rm), shift type, and immediate shift length.
1212 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1214 // Inst{6-5} encodes the shift opcode.
1215 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
1216 // Inst{11-7} encodes the imm5 shift amount.
1217 unsigned ShImm = slice(insn, 11, 7);
1219 // A8.4.1. Possible rrx or shift amount of 32...
1220 getImmShiftSE(ShOp, ShImm);
1221 MI.addOperand(MCOperand::CreateImm(
1222 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
1229 static bool DisassembleLdFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1230 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1231 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
1234 static bool DisassembleStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1235 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1236 return DisassembleLdStFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
1239 static bool HasDualReg(unsigned Opcode) {
1243 case ARM::LDRD: case ARM::LDRD_PRE: case ARM::LDRD_POST:
1244 case ARM::STRD: case ARM::STRD_PRE: case ARM::STRD_POST:
1249 static bool DisassembleLdStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1250 unsigned short NumOps, unsigned &NumOpsAdded, bool isStore) {
1252 const TargetInstrDesc &TID = ARMInsts[Opcode];
1253 bool isPrePost = isPrePostLdSt(TID.TSFlags);
1254 const TargetOperandInfo *OpInfo = TID.OpInfo;
1255 if (!OpInfo) return false;
1257 unsigned &OpIdx = NumOpsAdded;
1261 assert(((!isStore && TID.getNumDefs() > 0) ||
1262 (isStore && (TID.getNumDefs() == 0 || isPrePost)))
1263 && "Invalid arguments");
1265 // Operand 0 of a pre- and post-indexed store is the address base writeback.
1266 if (isPrePost && isStore) {
1267 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1268 "Reg operand expected");
1269 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1274 bool DualReg = HasDualReg(Opcode);
1276 // Disassemble the dst/src operand.
1277 if (OpIdx >= NumOps)
1280 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1281 "Reg operand expected");
1282 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1286 // Fill in LDRD and STRD's second operand.
1288 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1289 decodeRd(insn) + 1)));
1293 // After dst of a pre- and post-indexed load is the address base writeback.
1294 if (isPrePost && !isStore) {
1295 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1296 "Reg operand expected");
1297 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1302 // Disassemble the base operand.
1303 if (OpIdx >= NumOps)
1306 assert(OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
1307 "Reg operand expected");
1308 assert((!isPrePost || (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1))
1309 && "Index mode or tied_to operand expected");
1310 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1314 // For reg/reg form, base reg is followed by +/- reg.
1315 // For immediate form, it is followed by +/- imm8.
1316 // See also ARMAddressingModes.h (Addressing Mode #3).
1317 if (OpIdx + 1 >= NumOps)
1320 assert((OpInfo[OpIdx].RegClass == ARM::GPRRegClassID) &&
1321 (OpInfo[OpIdx+1].RegClass == 0) &&
1322 "Expect 1 reg operand followed by 1 imm operand");
1324 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1325 if (getAM3IBit(insn) == 1) {
1326 MI.addOperand(MCOperand::CreateReg(0));
1328 // Disassemble the 8-bit immediate offset.
1329 unsigned Imm4H = (insn >> ARMII::ImmHiShift) & 0xF;
1330 unsigned Imm4L = insn & 0xF;
1331 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, (Imm4H << 4) | Imm4L);
1332 MI.addOperand(MCOperand::CreateImm(Offset));
1334 // Disassemble the offset reg (Rm).
1335 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1337 unsigned Offset = ARM_AM::getAM3Opc(AddrOpcode, 0);
1338 MI.addOperand(MCOperand::CreateImm(Offset));
1345 static bool DisassembleLdMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1346 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1347 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, false);
1350 static bool DisassembleStMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1351 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1352 return DisassembleLdStMiscFrm(MI, Opcode, insn, NumOps, NumOpsAdded, true);
1355 // The algorithm for disassembly of LdStMulFrm is different from others because
1356 // it explicitly populates the two predicate operands after operand 0 (the base)
1357 // and operand 1 (the AM4 mode imm). After operand 3, we need to populate the
1358 // reglist with each affected register encoded as an MCOperand.
1359 static bool DisassembleLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1360 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1362 assert(NumOps >= 5 && "LdStMulFrm expects NumOps >= 5");
1364 unsigned &OpIdx = NumOpsAdded;
1368 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1370 // Writeback to base, if necessary.
1371 if (Opcode == ARM::LDM_UPD || Opcode == ARM::STM_UPD) {
1372 MI.addOperand(MCOperand::CreateReg(Base));
1376 MI.addOperand(MCOperand::CreateReg(Base));
1378 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1379 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM4ModeImm(SubMode)));
1381 // Handling the two predicate operands before the reglist.
1382 int64_t CondVal = insn >> ARMII::CondShift;
1383 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1384 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1388 // Fill the variadic part of reglist.
1389 unsigned RegListBits = insn & ((1 << 16) - 1);
1390 for (unsigned i = 0; i < 16; ++i) {
1391 if ((RegListBits >> i) & 1) {
1392 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1401 // LDREX, LDREXB, LDREXH: Rd Rn
1402 // LDREXD: Rd Rd+1 Rn
1403 // STREX, STREXB, STREXH: Rd Rm Rn
1404 // STREXD: Rd Rm Rm+1 Rn
1406 // SWP, SWPB: Rd Rm Rn
1407 static bool DisassembleLdStExFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1408 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1410 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1411 if (!OpInfo) return false;
1413 unsigned &OpIdx = NumOpsAdded;
1418 && OpInfo[0].RegClass == ARM::GPRRegClassID
1419 && OpInfo[1].RegClass == ARM::GPRRegClassID
1420 && "Expect 2 reg operands");
1422 bool isStore = slice(insn, 20, 20) == 0;
1423 bool isDW = (Opcode == ARM::LDREXD || Opcode == ARM::STREXD);
1425 // Add the destination operand.
1426 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1430 // Store register Exclusive needs a source operand.
1432 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1437 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1438 decodeRm(insn)+1)));
1442 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1443 decodeRd(insn)+1)));
1447 // Finally add the pointer operand.
1448 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1455 // Misc. Arithmetic Instructions.
1457 // PKHBT, PKHTB: Rd Rn Rm , LSL/ASR #imm5
1458 // RBIT, REV, REV16, REVSH: Rd Rm
1459 static bool DisassembleArithMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1460 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1462 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1463 unsigned &OpIdx = NumOpsAdded;
1468 && OpInfo[0].RegClass == ARM::GPRRegClassID
1469 && OpInfo[1].RegClass == ARM::GPRRegClassID
1470 && "Expect 2 reg operands");
1472 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1474 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1479 assert(NumOps >= 4 && "Expect >= 4 operands");
1480 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1485 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1489 // If there is still an operand info left which is an immediate operand, add
1490 // an additional imm5 LSL/ASR operand.
1491 if (ThreeReg && OpInfo[OpIdx].RegClass == 0
1492 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1493 // Extract the 5-bit immediate field Inst{11-7}.
1494 unsigned ShiftAmt = (insn >> ARMII::ShiftShift) & 0x1F;
1495 MI.addOperand(MCOperand::CreateImm(ShiftAmt));
1502 // Extend instructions.
1503 // SXT* and UXT*: Rd [Rn] Rm [rot_imm].
1504 // The 2nd operand register is Rn and the 3rd operand regsiter is Rm for the
1505 // three register operand form. Otherwise, Rn=0b1111 and only Rm is used.
1506 static bool DisassembleExtFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1507 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1509 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1510 unsigned &OpIdx = NumOpsAdded;
1515 && OpInfo[0].RegClass == ARM::GPRRegClassID
1516 && OpInfo[1].RegClass == ARM::GPRRegClassID
1517 && "Expect 2 reg operands");
1519 bool ThreeReg = NumOps > 2 && OpInfo[2].RegClass == ARM::GPRRegClassID;
1521 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1526 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1531 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1535 // If there is still an operand info left which is an immediate operand, add
1536 // an additional rotate immediate operand.
1537 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
1538 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1539 // Extract the 2-bit rotate field Inst{11-10}.
1540 unsigned rot = (insn >> ARMII::ExtRotImmShift) & 3;
1541 // Rotation by 8, 16, or 24 bits.
1542 MI.addOperand(MCOperand::CreateImm(rot << 3));
1549 /////////////////////////////////////
1551 // Utility Functions For VFP //
1553 /////////////////////////////////////
1555 // Extract/Decode Dd/Sd:
1557 // SP => d = UInt(Vd:D)
1558 // DP => d = UInt(D:Vd)
1559 static unsigned decodeVFPRd(uint32_t insn, bool isSPVFP) {
1560 return isSPVFP ? (decodeRd(insn) << 1 | getDBit(insn))
1561 : (decodeRd(insn) | getDBit(insn) << 4);
1564 // Extract/Decode Dn/Sn:
1566 // SP => n = UInt(Vn:N)
1567 // DP => n = UInt(N:Vn)
1568 static unsigned decodeVFPRn(uint32_t insn, bool isSPVFP) {
1569 return isSPVFP ? (decodeRn(insn) << 1 | getNBit(insn))
1570 : (decodeRn(insn) | getNBit(insn) << 4);
1573 // Extract/Decode Dm/Sm:
1575 // SP => m = UInt(Vm:M)
1576 // DP => m = UInt(M:Vm)
1577 static unsigned decodeVFPRm(uint32_t insn, bool isSPVFP) {
1578 return isSPVFP ? (decodeRm(insn) << 1 | getMBit(insn))
1579 : (decodeRm(insn) | getMBit(insn) << 4);
1584 static uint64_t VFPExpandImm(unsigned char byte, unsigned N) {
1585 assert(N == 32 || N == 64);
1588 unsigned bit6 = slice(byte, 6, 6);
1590 Result = slice(byte, 7, 7) << 31 | slice(byte, 5, 0) << 19;
1592 Result |= 0x1f << 25;
1594 Result |= 0x1 << 30;
1596 Result = (uint64_t)slice(byte, 7, 7) << 63 |
1597 (uint64_t)slice(byte, 5, 0) << 48;
1599 Result |= 0xffL << 54;
1601 Result |= 0x1L << 62;
1607 // VFP Unary Format Instructions:
1609 // VCMP[E]ZD, VCMP[E]ZS: compares one floating-point register with zero
1610 // VCVTDS, VCVTSD: converts between double-precision and single-precision
1611 // The rest of the instructions have homogeneous [VFP]Rd and [VFP]Rm registers.
1612 static bool DisassembleVFPUnaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1613 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1615 assert(NumOps >= 1 && "VFPUnaryFrm expects NumOps >= 1");
1617 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1618 unsigned &OpIdx = NumOpsAdded;
1622 unsigned RegClass = OpInfo[OpIdx].RegClass;
1623 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1624 "Reg operand expected");
1625 bool isSP = (RegClass == ARM::SPRRegClassID);
1627 MI.addOperand(MCOperand::CreateReg(
1628 getRegisterEnum(RegClass, decodeVFPRd(insn, isSP))));
1631 // Early return for compare with zero instructions.
1632 if (Opcode == ARM::VCMPEZD || Opcode == ARM::VCMPEZS
1633 || Opcode == ARM::VCMPZD || Opcode == ARM::VCMPZS)
1636 RegClass = OpInfo[OpIdx].RegClass;
1637 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1638 "Reg operand expected");
1639 isSP = (RegClass == ARM::SPRRegClassID);
1641 MI.addOperand(MCOperand::CreateReg(
1642 getRegisterEnum(RegClass, decodeVFPRm(insn, isSP))));
1648 // All the instructions have homogeneous [VFP]Rd, [VFP]Rn, and [VFP]Rm regs.
1649 // Some of them have operand constraints which tie the first operand in the
1650 // InOperandList to that of the dst. As far as asm printing is concerned, this
1651 // tied_to operand is simply skipped.
1652 static bool DisassembleVFPBinaryFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1653 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1655 assert(NumOps >= 3 && "VFPBinaryFrm expects NumOps >= 3");
1657 const TargetInstrDesc &TID = ARMInsts[Opcode];
1658 const TargetOperandInfo *OpInfo = TID.OpInfo;
1659 unsigned &OpIdx = NumOpsAdded;
1663 unsigned RegClass = OpInfo[OpIdx].RegClass;
1664 assert((RegClass == ARM::SPRRegClassID || RegClass == ARM::DPRRegClassID) &&
1665 "Reg operand expected");
1666 bool isSP = (RegClass == ARM::SPRRegClassID);
1668 MI.addOperand(MCOperand::CreateReg(
1669 getRegisterEnum(RegClass, decodeVFPRd(insn, isSP))));
1672 // Skip tied_to operand constraint.
1673 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
1674 assert(NumOps >= 4 && "Expect >=4 operands");
1675 MI.addOperand(MCOperand::CreateReg(0));
1679 MI.addOperand(MCOperand::CreateReg(
1680 getRegisterEnum(RegClass, decodeVFPRn(insn, isSP))));
1683 MI.addOperand(MCOperand::CreateReg(
1684 getRegisterEnum(RegClass, decodeVFPRm(insn, isSP))));
1690 // A8.6.295 vcvt (floating-point <-> integer)
1691 // Int to FP: VSITOD, VSITOS, VUITOD, VUITOS
1692 // FP to Int: VTOSI[Z|R]D, VTOSI[Z|R]S, VTOUI[Z|R]D, VTOUI[Z|R]S
1694 // A8.6.297 vcvt (floating-point and fixed-point)
1695 // Dd|Sd Dd|Sd(TIED_TO) #fbits(= 16|32 - UInt(imm4:i))
1696 static bool DisassembleVFPConv1Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1697 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1699 assert(NumOps >= 2 && "VFPConv1Frm expects NumOps >= 2");
1701 const TargetInstrDesc &TID = ARMInsts[Opcode];
1702 const TargetOperandInfo *OpInfo = TID.OpInfo;
1703 if (!OpInfo) return false;
1705 bool SP = slice(insn, 8, 8) == 0; // A8.6.295 & A8.6.297
1706 bool fixed_point = slice(insn, 17, 17) == 1; // A8.6.297
1707 unsigned RegClassID = SP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1711 assert(NumOps >= 3 && "Expect >= 3 operands");
1712 int size = slice(insn, 7, 7) == 0 ? 16 : 32;
1713 int fbits = size - (slice(insn,3,0) << 1 | slice(insn,5,5));
1714 MI.addOperand(MCOperand::CreateReg(
1715 getRegisterEnum(RegClassID,
1716 decodeVFPRd(insn, SP))));
1718 assert(TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
1719 "Tied to operand expected");
1720 MI.addOperand(MI.getOperand(0));
1722 assert(OpInfo[2].RegClass == 0 && !OpInfo[2].isPredicate() &&
1723 !OpInfo[2].isOptionalDef() && "Imm operand expected");
1724 MI.addOperand(MCOperand::CreateImm(fbits));
1729 // The Rd (destination) and Rm (source) bits have different interpretations
1730 // depending on their single-precisonness.
1732 if (slice(insn, 18, 18) == 1) { // to_integer operation
1733 d = decodeVFPRd(insn, true /* Is Single Precision */);
1734 MI.addOperand(MCOperand::CreateReg(
1735 getRegisterEnum(ARM::SPRRegClassID, d)));
1736 m = decodeVFPRm(insn, SP);
1737 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, m)));
1739 d = decodeVFPRd(insn, SP);
1740 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, d)));
1741 m = decodeVFPRm(insn, true /* Is Single Precision */);
1742 MI.addOperand(MCOperand::CreateReg(
1743 getRegisterEnum(ARM::SPRRegClassID, m)));
1751 // VMOVRS - A8.6.330
1752 // Rt => Rd; Sn => UInt(Vn:N)
1753 static bool DisassembleVFPConv2Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1754 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1756 assert(NumOps >= 2 && "VFPConv2Frm expects NumOps >= 2");
1758 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1760 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1761 decodeVFPRn(insn, true))));
1766 // VMOVRRD - A8.6.332
1767 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1769 // VMOVRRS - A8.6.331
1770 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1771 static bool DisassembleVFPConv3Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1772 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1774 assert(NumOps >= 3 && "VFPConv3Frm expects NumOps >= 3");
1776 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1777 unsigned &OpIdx = NumOpsAdded;
1779 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1781 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1785 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1786 unsigned Sm = decodeVFPRm(insn, true);
1787 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1789 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1793 MI.addOperand(MCOperand::CreateReg(
1794 getRegisterEnum(ARM::DPRRegClassID,
1795 decodeVFPRm(insn, false))));
1801 // VMOVSR - A8.6.330
1802 // Rt => Rd; Sn => UInt(Vn:N)
1803 static bool DisassembleVFPConv4Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1804 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1806 assert(NumOps >= 2 && "VFPConv4Frm expects NumOps >= 2");
1808 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1809 decodeVFPRn(insn, true))));
1810 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1816 // VMOVDRR - A8.6.332
1817 // Rt => Rd; Rt2 => Rn; Dm => UInt(M:Vm)
1819 // VMOVRRS - A8.6.331
1820 // Rt => Rd; Rt2 => Rn; Sm => UInt(Vm:M); Sm1 = Sm+1
1821 static bool DisassembleVFPConv5Frm(MCInst &MI, unsigned Opcode, uint32_t insn,
1822 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1824 assert(NumOps >= 3 && "VFPConv5Frm expects NumOps >= 3");
1826 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1827 unsigned &OpIdx = NumOpsAdded;
1831 if (OpInfo[OpIdx].RegClass == ARM::SPRRegClassID) {
1832 unsigned Sm = decodeVFPRm(insn, true);
1833 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1835 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::SPRRegClassID,
1839 MI.addOperand(MCOperand::CreateReg(
1840 getRegisterEnum(ARM::DPRRegClassID,
1841 decodeVFPRm(insn, false))));
1845 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1847 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
1853 // VFP Load/Store Instructions.
1854 // VLDRD, VLDRS, VSTRD, VSTRS
1855 static bool DisassembleVFPLdStFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1856 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1858 assert(NumOps >= 3 && "VFPLdStFrm expects NumOps >= 3");
1860 bool isSPVFP = (Opcode == ARM::VLDRS || Opcode == ARM::VSTRS) ? true : false;
1861 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1863 // Extract Dd/Sd for operand 0.
1864 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1866 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID, RegD)));
1868 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1869 MI.addOperand(MCOperand::CreateReg(Base));
1871 // Next comes the AM5 Opcode.
1872 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
1873 unsigned char Imm8 = insn & 0xFF;
1874 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(AddrOpcode, Imm8)));
1881 // VFP Load/Store Multiple Instructions.
1882 // This is similar to the algorithm for LDM/STM in that operand 0 (the base) and
1883 // operand 1 (the AM5 mode imm) is followed by two predicate operands. It is
1884 // followed by a reglist of either DPR(s) or SPR(s).
1886 // VLDMD[_UPD], VLDMS[_UPD], VSTMD[_UPD], VSTMS[_UPD]
1887 static bool DisassembleVFPLdStMulFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1888 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1890 assert(NumOps >= 5 && "VFPLdStMulFrm expects NumOps >= 5");
1892 unsigned &OpIdx = NumOpsAdded;
1896 unsigned Base = getRegisterEnum(ARM::GPRRegClassID, decodeRn(insn));
1898 // Writeback to base, if necessary.
1899 if (Opcode == ARM::VLDMD_UPD || Opcode == ARM::VLDMS_UPD ||
1900 Opcode == ARM::VSTMD_UPD || Opcode == ARM::VSTMS_UPD) {
1901 MI.addOperand(MCOperand::CreateReg(Base));
1905 MI.addOperand(MCOperand::CreateReg(Base));
1907 // Next comes the AM5 Opcode.
1908 ARM_AM::AMSubMode SubMode = getAMSubModeForBits(getPUBits(insn));
1909 unsigned char Imm8 = insn & 0xFF;
1910 MI.addOperand(MCOperand::CreateImm(ARM_AM::getAM5Opc(SubMode, Imm8)));
1912 // Handling the two predicate operands before the reglist.
1913 int64_t CondVal = insn >> ARMII::CondShift;
1914 MI.addOperand(MCOperand::CreateImm(CondVal == 0xF ? 0xE : CondVal));
1915 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
1919 bool isSPVFP = (Opcode == ARM::VLDMS || Opcode == ARM::VLDMS_UPD ||
1920 Opcode == ARM::VSTMS || Opcode == ARM::VSTMS_UPD) ? true : false;
1921 unsigned RegClassID = isSPVFP ? ARM::SPRRegClassID : ARM::DPRRegClassID;
1924 unsigned RegD = decodeVFPRd(insn, isSPVFP);
1926 // Fill the variadic part of reglist.
1927 unsigned Regs = isSPVFP ? Imm8 : Imm8/2;
1928 for (unsigned i = 0; i < Regs; ++i) {
1929 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClassID,
1937 // Misc. VFP Instructions.
1938 // FMSTAT (vmrs with Rt=0b1111, i.e., to apsr_nzcv and no register operand)
1939 // FCONSTD (DPR and a VFPf64Imm operand)
1940 // FCONSTS (SPR and a VFPf32Imm operand)
1941 // VMRS/VMSR (GPR operand)
1942 static bool DisassembleVFPMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
1943 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
1945 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
1946 unsigned &OpIdx = NumOpsAdded;
1950 if (Opcode == ARM::FMSTAT)
1953 assert(NumOps >= 2 && "VFPMiscFrm expects >=2 operands");
1955 unsigned RegEnum = 0;
1956 switch (OpInfo[0].RegClass) {
1957 case ARM::DPRRegClassID:
1958 RegEnum = getRegisterEnum(ARM::DPRRegClassID, decodeVFPRd(insn, false));
1960 case ARM::SPRRegClassID:
1961 RegEnum = getRegisterEnum(ARM::SPRRegClassID, decodeVFPRd(insn, true));
1963 case ARM::GPRRegClassID:
1964 RegEnum = getRegisterEnum(ARM::GPRRegClassID, decodeRd(insn));
1967 assert(0 && "Invalid reg class id");
1971 MI.addOperand(MCOperand::CreateReg(RegEnum));
1974 // Extract/decode the f64/f32 immediate.
1975 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
1976 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
1977 // The asm syntax specifies the before-expanded <imm>.
1978 // Not VFPExpandImm(slice(insn,19,16) << 4 | slice(insn, 3, 0),
1979 // Opcode == ARM::FCONSTD ? 64 : 32)
1980 MI.addOperand(MCOperand::CreateImm(slice(insn,19,16)<<4 | slice(insn,3,0)));
1987 // DisassembleThumbFrm() is defined in ThumbDisassemblerCore.h file.
1988 #include "ThumbDisassemblerCore.h"
1990 /////////////////////////////////////////////////////
1992 // Utility Functions For ARM Advanced SIMD //
1994 /////////////////////////////////////////////////////
1996 // The following NEON namings are based on A8.6.266 VABA, VABAL. Notice that
1997 // A8.6.303 VDUP (ARM core register)'s D/Vd pair is the N/Vn pair of VABA/VABAL.
1999 // A7.3 Register encoding
2001 // Extract/Decode NEON D/Vd:
2003 // Note that for quadword, Qd = UInt(D:Vd<3:1>) = Inst{22:15-13}, whereas for
2004 // doubleword, Dd = UInt(D:Vd). We compensate for this difference by
2005 // handling it in the getRegisterEnum() utility function.
2006 // D = Inst{22}, Vd = Inst{15-12}
2007 static unsigned decodeNEONRd(uint32_t insn) {
2008 return ((insn >> ARMII::NEON_D_BitShift) & 1) << 4
2009 | ((insn >> ARMII::NEON_RegRdShift) & ARMII::NEONRegMask);
2012 // Extract/Decode NEON N/Vn:
2014 // Note that for quadword, Qn = UInt(N:Vn<3:1>) = Inst{7:19-17}, whereas for
2015 // doubleword, Dn = UInt(N:Vn). We compensate for this difference by
2016 // handling it in the getRegisterEnum() utility function.
2017 // N = Inst{7}, Vn = Inst{19-16}
2018 static unsigned decodeNEONRn(uint32_t insn) {
2019 return ((insn >> ARMII::NEON_N_BitShift) & 1) << 4
2020 | ((insn >> ARMII::NEON_RegRnShift) & ARMII::NEONRegMask);
2023 // Extract/Decode NEON M/Vm:
2025 // Note that for quadword, Qm = UInt(M:Vm<3:1>) = Inst{5:3-1}, whereas for
2026 // doubleword, Dm = UInt(M:Vm). We compensate for this difference by
2027 // handling it in the getRegisterEnum() utility function.
2028 // M = Inst{5}, Vm = Inst{3-0}
2029 static unsigned decodeNEONRm(uint32_t insn) {
2030 return ((insn >> ARMII::NEON_M_BitShift) & 1) << 4
2031 | ((insn >> ARMII::NEON_RegRmShift) & ARMII::NEONRegMask);
2042 } // End of unnamed namespace
2044 // size field -> Inst{11-10}
2045 // index_align field -> Inst{7-4}
2047 // The Lane Index interpretation depends on the Data Size:
2048 // 8 (encoded as size = 0b00) -> Index = index_align[3:1]
2049 // 16 (encoded as size = 0b01) -> Index = index_align[3:2]
2050 // 32 (encoded as size = 0b10) -> Index = index_align[3]
2052 // Ref: A8.6.317 VLD4 (single 4-element structure to one lane).
2053 static unsigned decodeLaneIndex(uint32_t insn) {
2054 unsigned size = insn >> 10 & 3;
2055 assert((size == 0 || size == 1 || size == 2) &&
2056 "Encoding error: size should be either 0, 1, or 2");
2058 unsigned index_align = insn >> 4 & 0xF;
2059 return (index_align >> 1) >> size;
2062 // imm64 = AdvSIMDExpandImm(op, cmode, i:imm3:imm4)
2063 // op = Inst{5}, cmode = Inst{11-8}
2064 // i = Inst{24} (ARM architecture)
2065 // imm3 = Inst{18-16}, imm4 = Inst{3-0}
2066 // Ref: Table A7-15 Modified immediate values for Advanced SIMD instructions.
2067 static uint64_t decodeN1VImm(uint32_t insn, ElemSize esize) {
2068 unsigned char cmode = (insn >> 8) & 0xF;
2069 unsigned char Imm8 = ((insn >> 24) & 1) << 7 |
2070 ((insn >> 16) & 7) << 4 |
2079 Imm64 = Imm8 << 8*(cmode >> 1 & 1);
2083 Imm64 = (Imm8 << 8) | 0xFF;
2084 else if (cmode == 13)
2085 Imm64 = (Imm8 << 16) | 0xFFFF;
2087 // Imm8 to be shifted left by how many bytes...
2088 Imm64 = Imm8 << 8*(cmode >> 1 & 3);
2093 for (unsigned i = 0; i < 8; ++i)
2094 if ((Imm8 >> i) & 1)
2095 Imm64 |= 0xFF << 8*i;
2099 assert(0 && "Unreachable code!");
2106 // A8.6.339 VMUL, VMULL (by scalar)
2107 // ESize16 => m = Inst{2-0} (Vm<2:0>) D0-D7
2108 // ESize32 => m = Inst{3-0} (Vm<3:0>) D0-D15
2109 static unsigned decodeRestrictedDm(uint32_t insn, ElemSize esize) {
2116 assert(0 && "Unreachable code!");
2121 // A8.6.339 VMUL, VMULL (by scalar)
2122 // ESize16 => index = Inst{5:3} (M:Vm<3>) D0-D7
2123 // ESize32 => index = Inst{5} (M) D0-D15
2124 static unsigned decodeRestrictedDmIndex(uint32_t insn, ElemSize esize) {
2127 return (((insn >> 5) & 1) << 1) | ((insn >> 3) & 1);
2129 return (insn >> 5) & 1;
2131 assert(0 && "Unreachable code!");
2136 // A8.6.296 VCVT (between floating-point and fixed-point, Advanced SIMD)
2137 // (64 - <fbits>) is encoded as imm6, i.e., Inst{21-16}.
2138 static unsigned decodeVCVTFractionBits(uint32_t insn) {
2139 return 64 - ((insn >> 16) & 0x3F);
2142 // A8.6.302 VDUP (scalar)
2143 // ESize8 => index = Inst{19-17}
2144 // ESize16 => index = Inst{19-18}
2145 // ESize32 => index = Inst{19}
2146 static unsigned decodeNVLaneDupIndex(uint32_t insn, ElemSize esize) {
2149 return (insn >> 17) & 7;
2151 return (insn >> 18) & 3;
2153 return (insn >> 19) & 1;
2155 assert(0 && "Unspecified element size!");
2160 // A8.6.328 VMOV (ARM core register to scalar)
2161 // A8.6.329 VMOV (scalar to ARM core register)
2162 // ESize8 => index = Inst{21:6-5}
2163 // ESize16 => index = Inst{21:6}
2164 // ESize32 => index = Inst{21}
2165 static unsigned decodeNVLaneOpIndex(uint32_t insn, ElemSize esize) {
2168 return ((insn >> 21) & 1) << 2 | ((insn >> 5) & 3);
2170 return ((insn >> 21) & 1) << 1 | ((insn >> 6) & 1);
2172 return ((insn >> 21) & 1);
2174 assert(0 && "Unspecified element size!");
2179 // Imm6 = Inst{21-16}, L = Inst{7}
2181 // LeftShift == true (A8.6.367 VQSHL, A8.6.387 VSLI):
2183 // '0001xxx' => esize = 8; shift_amount = imm6 - 8
2184 // '001xxxx' => esize = 16; shift_amount = imm6 - 16
2185 // '01xxxxx' => esize = 32; shift_amount = imm6 - 32
2186 // '1xxxxxx' => esize = 64; shift_amount = imm6
2188 // LeftShift == false (A8.6.376 VRSHR, A8.6.368 VQSHRN):
2190 // '0001xxx' => esize = 8; shift_amount = 16 - imm6
2191 // '001xxxx' => esize = 16; shift_amount = 32 - imm6
2192 // '01xxxxx' => esize = 32; shift_amount = 64 - imm6
2193 // '1xxxxxx' => esize = 64; shift_amount = 64 - imm6
2195 static unsigned decodeNVSAmt(uint32_t insn, bool LeftShift) {
2196 ElemSize esize = ESizeNA;
2197 unsigned L = (insn >> 7) & 1;
2198 unsigned imm6 = (insn >> 16) & 0x3F;
2202 else if (imm6 >> 4 == 1)
2204 else if (imm6 >> 5 == 1)
2207 assert(0 && "Wrong encoding of Inst{7:21-16}!");
2212 return esize == ESize64 ? imm6 : (imm6 - esize);
2214 return esize == ESize64 ? (esize - imm6) : (2*esize - imm6);
2218 // Imm4 = Inst{11-8}
2219 static unsigned decodeN3VImm(uint32_t insn) {
2220 return (insn >> 8) & 0xF;
2224 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm]
2226 // D[d] D[d2] ... Rn [TIED_TO Rn] align [Rm] TIED_TO ... imm(idx)
2228 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ...
2230 // Rn [TIED_TO Rn] align [Rm] D[d] D[d2] ... [imm(idx)]
2232 // Correctly set VLD*/VST*'s TIED_TO GPR, as the asm printer needs it.
2233 static bool DisassembleNLdSt0(MCInst &MI, unsigned Opcode, uint32_t insn,
2234 unsigned short NumOps, unsigned &NumOpsAdded, bool Store, bool DblSpaced) {
2236 const TargetInstrDesc &TID = ARMInsts[Opcode];
2237 const TargetOperandInfo *OpInfo = TID.OpInfo;
2239 // At least one DPR register plus addressing mode #6.
2240 assert(NumOps >= 3 && "Expect >= 3 operands");
2242 unsigned &OpIdx = NumOpsAdded;
2246 // We have homogeneous NEON registers for Load/Store.
2247 unsigned RegClass = 0;
2249 // Double-spaced registers have increments of 2.
2250 unsigned Inc = DblSpaced ? 2 : 1;
2252 unsigned Rn = decodeRn(insn);
2253 unsigned Rm = decodeRm(insn);
2254 unsigned Rd = decodeNEONRd(insn);
2256 // A7.7.1 Advanced SIMD addressing mode.
2259 // LLVM Addressing Mode #6.
2260 unsigned RmEnum = 0;
2262 RmEnum = getRegisterEnum(ARM::GPRRegClassID, Rm);
2265 // Consume possible WB, AddrMode6, possible increment reg, the DPR/QPR's,
2266 // then possible lane index.
2267 assert(OpIdx < NumOps && OpInfo[0].RegClass == ARM::GPRRegClassID &&
2268 "Reg operand expected");
2271 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2276 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2277 OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
2278 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2280 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2284 MI.addOperand(MCOperand::CreateReg(RmEnum));
2288 assert(OpIdx < NumOps &&
2289 (OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2290 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2291 "Reg operand expected");
2293 RegClass = OpInfo[OpIdx].RegClass;
2294 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2295 if (Opcode >= ARM::VST1q16 && Opcode <= ARM::VST1q8)
2296 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd,true)));
2298 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd)));
2303 // Handle possible lane index.
2304 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2305 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2306 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2311 // Consume the DPR/QPR's, possible WB, AddrMode6, possible incrment reg,
2312 // possible TIED_TO DPR/QPR's (ignored), then possible lane index.
2313 RegClass = OpInfo[0].RegClass;
2315 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2316 if (Opcode >= ARM::VLD1q16 && Opcode <= ARM::VLD1q8)
2317 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd,true)));
2319 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,Rd)));
2325 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2330 assert((OpIdx+1) < NumOps && OpInfo[OpIdx].RegClass == ARM::GPRRegClassID &&
2331 OpInfo[OpIdx + 1].RegClass == 0 && "Addrmode #6 Operands expected");
2332 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2334 MI.addOperand(MCOperand::CreateImm(0)); // Alignment ignored?
2338 MI.addOperand(MCOperand::CreateReg(RmEnum));
2342 while (OpIdx < NumOps && OpInfo[OpIdx].RegClass == RegClass) {
2343 assert(TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1 &&
2344 "Tied to operand expected");
2345 MI.addOperand(MCOperand::CreateReg(0));
2349 // Handle possible lane index.
2350 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2351 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2352 MI.addOperand(MCOperand::CreateImm(decodeLaneIndex(insn)));
2361 // If L (Inst{21}) == 0, store instructions.
2362 // Find out about double-spaced-ness of the Opcode and pass it on to
2363 // DisassembleNLdSt0().
2364 static bool DisassembleNLdSt(MCInst &MI, unsigned Opcode, uint32_t insn,
2365 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2367 const StringRef Name = ARMInsts[Opcode].Name;
2368 bool DblSpaced = false;
2370 if (Name.find("LN") != std::string::npos) {
2371 // To one lane instructions.
2372 // See, for example, 8.6.317 VLD4 (single 4-element structure to one lane).
2374 // <size> == 16 && Inst{5} == 1 --> DblSpaced = true
2375 if (Name.endswith("16") || Name.endswith("16_UPD"))
2376 DblSpaced = slice(insn, 5, 5) == 1;
2378 // <size> == 32 && Inst{6} == 1 --> DblSpaced = true
2379 if (Name.endswith("32") || Name.endswith("32_UPD"))
2380 DblSpaced = slice(insn, 6, 6) == 1;
2383 // Multiple n-element structures with type encoded as Inst{11-8}.
2384 // See, for example, A8.6.316 VLD4 (multiple 4-element structures).
2386 // n == 2 && type == 0b1001 -> DblSpaced = true
2387 if (Name.startswith("VST2") || Name.startswith("VLD2"))
2388 DblSpaced = slice(insn, 11, 8) == 9;
2390 // n == 3 && type == 0b0101 -> DblSpaced = true
2391 if (Name.startswith("VST3") || Name.startswith("VLD3"))
2392 DblSpaced = slice(insn, 11, 8) == 5;
2394 // n == 4 && type == 0b0001 -> DblSpaced = true
2395 if (Name.startswith("VST4") || Name.startswith("VLD4"))
2396 DblSpaced = slice(insn, 11, 8) == 1;
2399 return DisassembleNLdSt0(MI, Opcode, insn, NumOps, NumOpsAdded,
2400 slice(insn, 21, 21) == 0, DblSpaced);
2405 static bool DisassembleN1RegModImmFrm(MCInst &MI, unsigned Opcode,
2406 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2408 const TargetInstrDesc &TID = ARMInsts[Opcode];
2409 const TargetOperandInfo *OpInfo = TID.OpInfo;
2411 assert(NumOps >= 2 &&
2412 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2413 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2414 (OpInfo[1].RegClass == 0) &&
2415 "Expect 1 reg operand followed by 1 imm operand");
2417 // Qd/Dd = Inst{22:15-12} => NEON Rd
2418 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[0].RegClass,
2419 decodeNEONRd(insn))));
2421 ElemSize esize = ESizeNA;
2424 case ARM::VMOVv16i8:
2427 case ARM::VMOVv4i16:
2428 case ARM::VMOVv8i16:
2431 case ARM::VMOVv2i32:
2432 case ARM::VMOVv4i32:
2435 case ARM::VMOVv1i64:
2436 case ARM::VMOVv2i64:
2439 assert(0 && "Unreachable code!");
2443 // One register and a modified immediate value.
2444 // Add the imm operand.
2445 MI.addOperand(MCOperand::CreateImm(decodeN1VImm(insn, esize)));
2455 N2V_VectorConvert_Between_Float_Fixed
2457 } // End of unnamed namespace
2459 // Vector Convert [between floating-point and fixed-point]
2460 // Qd/Dd Qm/Dm [fbits]
2462 // Vector Duplicate Lane (from scalar to all elements) Instructions.
2463 // VDUPLN16d, VDUPLN16q, VDUPLN32d, VDUPLN32q, VDUPLN8d, VDUPLN8q:
2466 // Vector Move Long:
2469 // Vector Move Narrow:
2473 static bool DisassembleNVdVmOptImm(MCInst &MI, unsigned Opc, uint32_t insn,
2474 unsigned short NumOps, unsigned &NumOpsAdded, N2VFlag Flag = N2V_None) {
2476 const TargetInstrDesc &TID = ARMInsts[Opc];
2477 const TargetOperandInfo *OpInfo = TID.OpInfo;
2479 assert(NumOps >= 2 &&
2480 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2481 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2482 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2483 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2484 "Expect >= 2 operands and first 2 as reg operands");
2486 unsigned &OpIdx = NumOpsAdded;
2490 ElemSize esize = ESizeNA;
2491 if (Flag == N2V_VectorDupLane) {
2492 // VDUPLN has its index embedded. Its size can be inferred from the Opcode.
2493 assert(Opc >= ARM::VDUPLN16d && Opc <= ARM::VDUPLN8q &&
2494 "Unexpected Opcode");
2495 esize = (Opc == ARM::VDUPLN8d || Opc == ARM::VDUPLN8q) ? ESize8
2496 : ((Opc == ARM::VDUPLN16d || Opc == ARM::VDUPLN16q) ? ESize16
2500 // Qd/Dd = Inst{22:15-12} => NEON Rd
2501 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2502 decodeNEONRd(insn))));
2506 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2508 MI.addOperand(MCOperand::CreateReg(0));
2512 // Dm = Inst{5:3-0} => NEON Rm
2513 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2514 decodeNEONRm(insn))));
2517 // VZIP and others have two TIED_TO reg operands.
2519 while (OpIdx < NumOps &&
2520 (Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2521 // Add TIED_TO operand.
2522 MI.addOperand(MI.getOperand(Idx));
2526 // Add the imm operand, if required.
2527 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2528 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2530 unsigned imm = 0xFFFFFFFF;
2532 if (Flag == N2V_VectorDupLane)
2533 imm = decodeNVLaneDupIndex(insn, esize);
2534 if (Flag == N2V_VectorConvert_Between_Float_Fixed)
2535 imm = decodeVCVTFractionBits(insn);
2537 assert(imm != 0xFFFFFFFF && "Internal error");
2538 MI.addOperand(MCOperand::CreateImm(imm));
2545 static bool DisassembleN2RegFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2546 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2548 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded);
2550 static bool DisassembleNVCVTFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2551 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2553 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2554 N2V_VectorConvert_Between_Float_Fixed);
2556 static bool DisassembleNVecDupLnFrm(MCInst &MI, unsigned Opc, uint32_t insn,
2557 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2559 return DisassembleNVdVmOptImm(MI, Opc, insn, NumOps, NumOpsAdded,
2563 // Vector Shift [Accumulate] Instructions.
2564 // Qd/Dd [Qd/Dd (TIED_TO)] Qm/Dm ShiftAmt
2566 // Vector Shift Left Long (with maximum shift count) Instructions.
2567 // VSHLLi16, VSHLLi32, VSHLLi8: Qd Dm imm (== size)
2569 static bool DisassembleNVectorShift(MCInst &MI, unsigned Opcode, uint32_t insn,
2570 unsigned short NumOps, unsigned &NumOpsAdded, bool LeftShift) {
2572 const TargetInstrDesc &TID = ARMInsts[Opcode];
2573 const TargetOperandInfo *OpInfo = TID.OpInfo;
2575 assert(NumOps >= 3 &&
2576 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2577 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2578 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2579 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2580 "Expect >= 3 operands and first 2 as reg operands");
2582 unsigned &OpIdx = NumOpsAdded;
2586 // Qd/Dd = Inst{22:15-12} => NEON Rd
2587 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2588 decodeNEONRd(insn))));
2591 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2593 MI.addOperand(MCOperand::CreateReg(0));
2597 assert((OpInfo[OpIdx].RegClass == ARM::DPRRegClassID ||
2598 OpInfo[OpIdx].RegClass == ARM::QPRRegClassID) &&
2599 "Reg operand expected");
2601 // Qm/Dm = Inst{5:3-0} => NEON Rm
2602 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2603 decodeNEONRm(insn))));
2606 assert(OpInfo[OpIdx].RegClass == 0 && "Imm operand expected");
2608 // Add the imm operand.
2610 // VSHLL has maximum shift count as the imm, inferred from its size.
2614 Imm = decodeNVSAmt(insn, LeftShift);
2626 MI.addOperand(MCOperand::CreateImm(Imm));
2632 // Left shift instructions.
2633 static bool DisassembleN2RegVecShLFrm(MCInst &MI, unsigned Opcode,
2634 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2636 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, true);
2638 // Right shift instructions have different shift amount interpretation.
2639 static bool DisassembleN2RegVecShRFrm(MCInst &MI, unsigned Opcode,
2640 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2642 return DisassembleNVectorShift(MI, Opcode, insn, NumOps, NumOpsAdded, false);
2650 N3V_Multiply_By_Scalar
2652 } // End of unnamed namespace
2654 // NEON Three Register Instructions with Optional Immediate Operand
2656 // Vector Extract Instructions.
2657 // Qd/Dd Qn/Dn Qm/Dm imm4
2659 // Vector Shift (Register) Instructions.
2660 // Qd/Dd Qm/Dm Qn/Dn (notice the order of m, n)
2662 // Vector Multiply [Accumulate/Subtract] [Long] By Scalar Instructions.
2663 // Qd/Dd Qn/Dn RestrictedDm index
2666 static bool DisassembleNVdVnVmOptImm(MCInst &MI, unsigned Opcode, uint32_t insn,
2667 unsigned short NumOps, unsigned &NumOpsAdded, N3VFlag Flag = N3V_None) {
2669 const TargetInstrDesc &TID = ARMInsts[Opcode];
2670 const TargetOperandInfo *OpInfo = TID.OpInfo;
2672 // No checking for OpInfo[2] because of MOVDneon/MOVQ with only two regs.
2673 assert(NumOps >= 3 &&
2674 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2675 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2676 (OpInfo[1].RegClass == ARM::DPRRegClassID ||
2677 OpInfo[1].RegClass == ARM::QPRRegClassID) &&
2678 "Expect >= 3 operands and first 2 as reg operands");
2680 unsigned &OpIdx = NumOpsAdded;
2684 bool VdVnVm = Flag == N3V_VectorShift ? false : true;
2685 bool IsImm4 = Flag == N3V_VectorExtract ? true : false;
2686 bool IsDmRestricted = Flag == N3V_Multiply_By_Scalar ? true : false;
2687 ElemSize esize = ESizeNA;
2688 if (Flag == N3V_Multiply_By_Scalar) {
2689 unsigned size = (insn >> 20) & 3;
2690 if (size == 1) esize = ESize16;
2691 if (size == 2) esize = ESize32;
2692 assert (esize == ESize16 || esize == ESize32);
2695 // Qd/Dd = Inst{22:15-12} => NEON Rd
2696 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(OpInfo[OpIdx].RegClass,
2697 decodeNEONRd(insn))));
2700 // VABA, VABAL, VBSLd, VBSLq, ...
2701 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
2703 MI.addOperand(MCOperand::CreateReg(0));
2707 // Dn = Inst{7:19-16} => NEON Rn
2709 // Dm = Inst{5:3-0} => NEON Rm
2710 MI.addOperand(MCOperand::CreateReg(
2711 getRegisterEnum(OpInfo[OpIdx].RegClass,
2712 VdVnVm ? decodeNEONRn(insn)
2713 : decodeNEONRm(insn))));
2716 // Special case handling for VMOVDneon and VMOVQ because they are marked as
2718 if (Opcode == ARM::VMOVDneon || Opcode == ARM::VMOVQ)
2721 // Dm = Inst{5:3-0} => NEON Rm
2723 // Dm is restricted to D0-D7 if size is 16, D0-D15 otherwise
2725 // Dn = Inst{7:19-16} => NEON Rn
2726 unsigned m = VdVnVm ? (IsDmRestricted ? decodeRestrictedDm(insn, esize)
2727 : decodeNEONRm(insn))
2728 : decodeNEONRn(insn);
2730 MI.addOperand(MCOperand::CreateReg(
2731 getRegisterEnum(OpInfo[OpIdx].RegClass, m)));
2734 if (OpIdx < NumOps && OpInfo[OpIdx].RegClass == 0
2735 && !OpInfo[OpIdx].isPredicate() && !OpInfo[OpIdx].isOptionalDef()) {
2736 // Add the imm operand.
2739 Imm = decodeN3VImm(insn);
2740 else if (IsDmRestricted)
2741 Imm = decodeRestrictedDmIndex(insn, esize);
2743 assert(0 && "Internal error: unreachable code!");
2747 MI.addOperand(MCOperand::CreateImm(Imm));
2754 static bool DisassembleN3RegFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2755 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2757 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded);
2759 static bool DisassembleN3RegVecShFrm(MCInst &MI, unsigned Opcode,
2760 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2762 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2765 static bool DisassembleNVecExtractFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2766 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2768 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2771 static bool DisassembleNVecMulScalarFrm(MCInst &MI, unsigned Opcode,
2772 uint32_t insn, unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2774 return DisassembleNVdVnVmOptImm(MI, Opcode, insn, NumOps, NumOpsAdded,
2775 N3V_Multiply_By_Scalar);
2778 // Vector Table Lookup
2780 // VTBL1, VTBX1: Dd [Dd(TIED_TO)] Dn Dm
2781 // VTBL2, VTBX2: Dd [Dd(TIED_TO)] Dn Dn+1 Dm
2782 // VTBL3, VTBX3: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dm
2783 // VTBL4, VTBX4: Dd [Dd(TIED_TO)] Dn Dn+1 Dn+2 Dn+3 Dm
2784 static bool DisassembleNVTBLFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2785 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2787 const TargetInstrDesc &TID = ARMInsts[Opcode];
2788 const TargetOperandInfo *OpInfo = TID.OpInfo;
2789 if (!OpInfo) return false;
2791 assert(NumOps >= 3 &&
2792 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2793 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2794 OpInfo[2].RegClass == ARM::DPRRegClassID &&
2795 "Expect >= 3 operands and first 3 as reg operands");
2797 unsigned &OpIdx = NumOpsAdded;
2801 unsigned Rn = decodeNEONRn(insn);
2803 // {Dn} encoded as len = 0b00
2804 // {Dn Dn+1} encoded as len = 0b01
2805 // {Dn Dn+1 Dn+2 } encoded as len = 0b10
2806 // {Dn Dn+1 Dn+2 Dn+3} encoded as len = 0b11
2807 unsigned Len = slice(insn, 9, 8) + 1;
2809 // Dd (the destination vector)
2810 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2811 decodeNEONRd(insn))));
2814 // Process tied_to operand constraint.
2816 if ((Idx = TID.getOperandConstraint(OpIdx, TOI::TIED_TO)) != -1) {
2817 MI.addOperand(MI.getOperand(Idx));
2821 // Do the <list> now.
2822 for (unsigned i = 0; i < Len; ++i) {
2823 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2824 "Reg operand expected");
2825 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2830 // Dm (the index vector)
2831 assert(OpIdx < NumOps && OpInfo[OpIdx].RegClass == ARM::DPRRegClassID &&
2832 "Reg operand (index vector) expected");
2833 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2834 decodeNEONRm(insn))));
2840 static bool DisassembleNEONFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2841 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2842 assert(0 && "Unreachable code!");
2846 // Vector Get Lane (move scalar to ARM core register) Instructions.
2847 // VGETLNi32, VGETLNs16, VGETLNs8, VGETLNu16, VGETLNu8: Rt Dn index
2848 static bool DisassembleNEONGetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2849 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2851 const TargetInstrDesc &TID = ARMInsts[Opcode];
2852 const TargetOperandInfo *OpInfo = TID.OpInfo;
2853 if (!OpInfo) return false;
2855 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2856 OpInfo[0].RegClass == ARM::GPRRegClassID &&
2857 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2858 OpInfo[2].RegClass == 0 &&
2859 "Expect >= 3 operands with one dst operand");
2862 Opcode == ARM::VGETLNi32 ? ESize32
2863 : ((Opcode == ARM::VGETLNs16 || Opcode == ARM::VGETLNu16) ? ESize16
2866 // Rt = Inst{15-12} => ARM Rd
2867 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2870 // Dn = Inst{7:19-16} => NEON Rn
2871 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2872 decodeNEONRn(insn))));
2874 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2880 // Vector Set Lane (move ARM core register to scalar) Instructions.
2881 // VSETLNi16, VSETLNi32, VSETLNi8: Dd Dd (TIED_TO) Rt index
2882 static bool DisassembleNEONSetLnFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2883 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2885 const TargetInstrDesc &TID = ARMInsts[Opcode];
2886 const TargetOperandInfo *OpInfo = TID.OpInfo;
2887 if (!OpInfo) return false;
2889 assert(TID.getNumDefs() == 1 && NumOps >= 3 &&
2890 OpInfo[0].RegClass == ARM::DPRRegClassID &&
2891 OpInfo[1].RegClass == ARM::DPRRegClassID &&
2892 TID.getOperandConstraint(1, TOI::TIED_TO) != -1 &&
2893 OpInfo[2].RegClass == ARM::GPRRegClassID &&
2894 OpInfo[3].RegClass == 0 &&
2895 "Expect >= 3 operands with one dst operand");
2898 Opcode == ARM::VSETLNi8 ? ESize8
2899 : (Opcode == ARM::VSETLNi16 ? ESize16
2902 // Dd = Inst{7:19-16} => NEON Rn
2903 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::DPRRegClassID,
2904 decodeNEONRn(insn))));
2907 MI.addOperand(MCOperand::CreateReg(0));
2909 // Rt = Inst{15-12} => ARM Rd
2910 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2913 MI.addOperand(MCOperand::CreateImm(decodeNVLaneOpIndex(insn, esize)));
2919 // Vector Duplicate Instructions (from ARM core register to all elements).
2920 // VDUP8d, VDUP16d, VDUP32d, VDUP8q, VDUP16q, VDUP32q: Qd/Dd Rt
2921 static bool DisassembleNEONDupFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2922 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
2924 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
2926 assert(NumOps >= 2 &&
2927 (OpInfo[0].RegClass == ARM::DPRRegClassID ||
2928 OpInfo[0].RegClass == ARM::QPRRegClassID) &&
2929 OpInfo[1].RegClass == ARM::GPRRegClassID &&
2930 "Expect >= 2 operands and first 2 as reg operand");
2932 unsigned RegClass = OpInfo[0].RegClass;
2934 // Qd/Dd = Inst{7:19-16} => NEON Rn
2935 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(RegClass,
2936 decodeNEONRn(insn))));
2938 // Rt = Inst{15-12} => ARM Rd
2939 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2949 static inline bool MemBarrierInstr(uint32_t insn) {
2950 unsigned op7_4 = slice(insn, 7, 4);
2951 if (slice(insn, 31, 20) == 0xf57 && (op7_4 >= 4 && op7_4 <= 6))
2957 static inline bool PreLoadOpcode(unsigned Opcode) {
2959 case ARM::PLDi: case ARM::PLDr:
2960 case ARM::PLDWi: case ARM::PLDWr:
2961 case ARM::PLIi: case ARM::PLIr:
2968 static bool DisassemblePreLoadFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
2969 unsigned short NumOps, unsigned &NumOpsAdded) {
2971 // Preload Data/Instruction requires either 2 or 4 operands.
2972 // PLDi, PLDWi, PLIi: Rn [+/-]imm12 add = (U == '1')
2973 // PLDr[a|m], PLDWr[a|m], PLIr[a|m]: Rn Rm addrmode2_opc
2975 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2978 if (Opcode == ARM::PLDi || Opcode == ARM::PLDWi || Opcode == ARM::PLIi) {
2979 unsigned Imm12 = slice(insn, 11, 0);
2980 bool Negative = getUBit(insn) == 0;
2981 int Offset = Negative ? -1 - Imm12 : 1 * Imm12;
2982 MI.addOperand(MCOperand::CreateImm(Offset));
2985 MI.addOperand(MCOperand::CreateReg(getRegisterEnum(ARM::GPRRegClassID,
2988 ARM_AM::AddrOpc AddrOpcode = getUBit(insn) ? ARM_AM::add : ARM_AM::sub;
2990 // Inst{6-5} encodes the shift opcode.
2991 ARM_AM::ShiftOpc ShOp = getShiftOpcForBits(slice(insn, 6, 5));
2992 // Inst{11-7} encodes the imm5 shift amount.
2993 unsigned ShImm = slice(insn, 11, 7);
2995 // A8.4.1. Possible rrx or shift amount of 32...
2996 getImmShiftSE(ShOp, ShImm);
2997 MI.addOperand(MCOperand::CreateImm(
2998 ARM_AM::getAM2Opc(AddrOpcode, ShImm, ShOp)));
3005 static bool DisassembleMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3006 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
3008 if (MemBarrierInstr(insn))
3026 // CPS has a singleton $opt operand that contains the following information:
3027 // opt{4-0} = mode from Inst{4-0}
3028 // opt{5} = changemode from Inst{17}
3029 // opt{8-6} = AIF from Inst{8-6}
3030 // opt{10-9} = imod from Inst{19-18} with 0b10 as enable and 0b11 as disable
3031 if (Opcode == ARM::CPS) {
3032 unsigned Option = slice(insn, 4, 0) | slice(insn, 17, 17) << 5 |
3033 slice(insn, 8, 6) << 6 | slice(insn, 19, 18) << 9;
3034 MI.addOperand(MCOperand::CreateImm(Option));
3039 // DBG has its option specified in Inst{3-0}.
3040 if (Opcode == ARM::DBG) {
3041 MI.addOperand(MCOperand::CreateImm(slice(insn, 3, 0)));
3046 // BKPT takes an imm32 val equal to ZeroExtend(Inst{19-8:3-0}).
3047 if (Opcode == ARM::BKPT) {
3048 MI.addOperand(MCOperand::CreateImm(slice(insn, 19, 8) << 4 |
3049 slice(insn, 3, 0)));
3054 if (PreLoadOpcode(Opcode))
3055 return DisassemblePreLoadFrm(MI, Opcode, insn, NumOps, NumOpsAdded);
3057 assert(0 && "Unexpected misc instruction!");
3061 static bool DisassembleThumbMiscFrm(MCInst &MI, unsigned Opcode, uint32_t insn,
3062 unsigned short NumOps, unsigned &NumOpsAdded, BO) {
3064 assert(0 && "Unexpected thumb misc. instruction!");
3068 /// FuncPtrs - FuncPtrs maps ARMFormat to its corresponding DisassembleFP.
3069 /// We divide the disassembly task into different categories, with each one
3070 /// corresponding to a specific instruction encoding format. There could be
3071 /// exceptions when handling a specific format, and that is why the Opcode is
3072 /// also present in the function prototype.
3073 static const DisassembleFP FuncPtrs[] = {
3077 &DisassembleBrMiscFrm,
3079 &DisassembleDPSoRegFrm,
3082 &DisassembleLdMiscFrm,
3083 &DisassembleStMiscFrm,
3084 &DisassembleLdStMulFrm,
3085 &DisassembleLdStExFrm,
3086 &DisassembleArithMiscFrm,
3088 &DisassembleVFPUnaryFrm,
3089 &DisassembleVFPBinaryFrm,
3090 &DisassembleVFPConv1Frm,
3091 &DisassembleVFPConv2Frm,
3092 &DisassembleVFPConv3Frm,
3093 &DisassembleVFPConv4Frm,
3094 &DisassembleVFPConv5Frm,
3095 &DisassembleVFPLdStFrm,
3096 &DisassembleVFPLdStMulFrm,
3097 &DisassembleVFPMiscFrm,
3098 &DisassembleThumbFrm,
3099 &DisassembleNEONFrm,
3100 &DisassembleNEONGetLnFrm,
3101 &DisassembleNEONSetLnFrm,
3102 &DisassembleNEONDupFrm,
3103 &DisassembleMiscFrm,
3104 &DisassembleThumbMiscFrm,
3106 // VLD and VST (including one lane) Instructions.
3109 // A7.4.6 One register and a modified immediate value
3110 // 1-Register Instructions with imm.
3111 // LLVM only defines VMOVv instructions.
3112 &DisassembleN1RegModImmFrm,
3114 // 2-Register Instructions with no imm.
3115 &DisassembleN2RegFrm,
3117 // 2-Register Instructions with imm (vector convert float/fixed point).
3118 &DisassembleNVCVTFrm,
3120 // 2-Register Instructions with imm (vector dup lane).
3121 &DisassembleNVecDupLnFrm,
3123 // Vector Shift Left Instructions.
3124 &DisassembleN2RegVecShLFrm,
3126 // Vector Shift Righ Instructions, which has different interpretation of the
3127 // shift amount from the imm6 field.
3128 &DisassembleN2RegVecShRFrm,
3130 // 3-Register Data-Processing Instructions.
3131 &DisassembleN3RegFrm,
3133 // Vector Shift (Register) Instructions.
3134 // D:Vd M:Vm N:Vn (notice that M:Vm is the first operand)
3135 &DisassembleN3RegVecShFrm,
3137 // Vector Extract Instructions.
3138 &DisassembleNVecExtractFrm,
3140 // Vector [Saturating Rounding Doubling] Multiply [Accumulate/Subtract] [Long]
3141 // By Scalar Instructions.
3142 &DisassembleNVecMulScalarFrm,
3144 // Vector Table Lookup uses byte indexes in a control vector to look up byte
3145 // values in a table and generate a new vector.
3146 &DisassembleNVTBLFrm,
3151 /// BuildIt - BuildIt performs the build step for this ARM Basic MC Builder.
3152 /// The general idea is to set the Opcode for the MCInst, followed by adding
3153 /// the appropriate MCOperands to the MCInst. ARM Basic MC Builder delegates
3154 /// to the Format-specific disassemble function for disassembly, followed by
3155 /// TryPredicateAndSBitModifier() to do PredicateOperand and OptionalDefOperand
3156 /// which follow the Dst/Src Operands.
3157 bool ARMBasicMCBuilder::BuildIt(MCInst &MI, uint32_t insn) {
3158 // Stage 1 sets the Opcode.
3159 MI.setOpcode(Opcode);
3160 // If the number of operands is zero, we're done!
3164 // Stage 2 calls the format-specific disassemble function to build the operand
3168 unsigned NumOpsAdded = 0;
3169 bool OK = (*Disasm)(MI, Opcode, insn, NumOps, NumOpsAdded, this);
3171 if (!OK) return false;
3172 if (NumOpsAdded >= NumOps)
3175 // Stage 3 deals with operands unaccounted for after stage 2 is finished.
3176 // FIXME: Should this be done selectively?
3177 return TryPredicateAndSBitModifier(MI, Opcode, insn, NumOps - NumOpsAdded);
3180 bool ARMBasicMCBuilder::TryPredicateAndSBitModifier(MCInst& MI, unsigned Opcode,
3181 uint32_t insn, unsigned short NumOpsRemaining) {
3183 assert(NumOpsRemaining > 0 && "Invalid argument");
3185 const TargetOperandInfo *OpInfo = ARMInsts[Opcode].OpInfo;
3186 const std::string &Name = ARMInsts[Opcode].Name;
3187 unsigned Idx = MI.getNumOperands();
3189 // First, we check whether this instr specifies the PredicateOperand through
3190 // a pair of TargetOperandInfos with isPredicate() property.
3191 if (NumOpsRemaining >= 2 &&
3192 OpInfo[Idx].isPredicate() && OpInfo[Idx+1].isPredicate() &&
3193 OpInfo[Idx].RegClass == 0 && OpInfo[Idx+1].RegClass == ARM::CCRRegClassID)
3195 // If we are inside an IT block, get the IT condition bits maintained via
3196 // ARMBasicMCBuilder::ITState[7:0], through ARMBasicMCBuilder::GetITCond().
3199 MI.addOperand(MCOperand::CreateImm(GetITCond()));
3201 if (Name.length() > 1 && Name[0] == 't') {
3202 // Thumb conditional branch instructions have their cond field embedded,
3206 if (Name == "t2Bcc")
3207 MI.addOperand(MCOperand::CreateImm(slice(insn, 25, 22)));
3208 else if (Name == "tBcc")
3209 MI.addOperand(MCOperand::CreateImm(slice(insn, 11, 8)));
3211 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3213 // ARM Instructions. Check condition field.
3214 int64_t CondVal = getCondField(insn);
3216 MI.addOperand(MCOperand::CreateImm(ARMCC::AL));
3218 MI.addOperand(MCOperand::CreateImm(CondVal));
3221 MI.addOperand(MCOperand::CreateReg(ARM::CPSR));
3223 NumOpsRemaining -= 2;
3224 if (NumOpsRemaining == 0)
3228 // Next, if OptionalDefOperand exists, we check whether the 'S' bit is set.
3229 if (OpInfo[Idx].isOptionalDef() && OpInfo[Idx].RegClass==ARM::CCRRegClassID) {
3230 MI.addOperand(MCOperand::CreateReg(getSBit(insn) == 1 ? ARM::CPSR : 0));
3234 if (NumOpsRemaining == 0)
3240 /// RunBuildAfterHook - RunBuildAfterHook performs operations deemed necessary
3241 /// after BuildIt is finished.
3242 bool ARMBasicMCBuilder::RunBuildAfterHook(bool Status, MCInst &MI,
3245 if (!SP) return Status;
3247 if (Opcode == ARM::t2IT)
3248 SP->InitIT(slice(insn, 7, 0));
3249 else if (InITBlock())
3255 /// Opcode, Format, and NumOperands make up an ARM Basic MCBuilder.
3256 ARMBasicMCBuilder::ARMBasicMCBuilder(unsigned opc, ARMFormat format,
3258 : Opcode(opc), Format(format), NumOps(num), SP(0) {
3259 unsigned Idx = (unsigned)format;
3260 assert(Idx < (array_lengthof(FuncPtrs) - 1) && "Unknown format");
3261 Disasm = FuncPtrs[Idx];
3264 /// CreateMCBuilder - Return an ARMBasicMCBuilder that can build up the MC
3265 /// infrastructure of an MCInst given the Opcode and Format of the instr.
3266 /// Return NULL if it fails to create/return a proper builder. API clients
3267 /// are responsible for freeing up of the allocated memory. Cacheing can be
3268 /// performed by the API clients to improve performance.
3269 ARMBasicMCBuilder *llvm::CreateMCBuilder(unsigned Opcode, ARMFormat Format) {
3270 // For "Unknown format", fail by returning a NULL pointer.
3271 if ((unsigned)Format >= (array_lengthof(FuncPtrs) - 1))
3274 return new ARMBasicMCBuilder(Opcode, Format,
3275 ARMInsts[Opcode].getNumOperands());