1 //===-- X86CodeEmitter.cpp - Convert X86 code to machine code -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the pass that transforms the X86 machine instructions into
11 // relocatable machine code.
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrInfo.h"
17 #include "X86JITInfo.h"
18 #include "X86Relocations.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/JITCodeEmitter.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/MC/MCCodeEmitter.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/PassManager.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetOptions.h"
38 #define DEBUG_TYPE "x86-emitter"
40 STATISTIC(NumEmitted, "Number of machine instructions emitted");
43 template<class CodeEmitter>
44 class Emitter : public MachineFunctionPass {
45 const X86InstrInfo *II;
49 MachineModuleInfo *MMI;
50 intptr_t PICBaseOffset;
55 explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce)
56 : MachineFunctionPass(ID), II(nullptr), TD(nullptr), TM(tm),
57 MCE(mce), PICBaseOffset(0), Is64BitMode(false),
58 IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
60 bool runOnMachineFunction(MachineFunction &MF) override;
62 const char *getPassName() const override {
63 return "X86 Machine Code Emitter";
66 void emitOpcodePrefix(uint64_t TSFlags, int MemOperand,
67 const MachineInstr &MI,
68 const MCInstrDesc *Desc) const;
70 void emitVEXOpcodePrefix(uint64_t TSFlags, int MemOperand,
71 const MachineInstr &MI,
72 const MCInstrDesc *Desc) const;
74 void emitSegmentOverridePrefix(uint64_t TSFlags,
76 const MachineInstr &MI) const;
78 void emitInstruction(MachineInstr &MI, const MCInstrDesc *Desc);
80 void getAnalysisUsage(AnalysisUsage &AU) const override {
82 AU.addRequired<MachineModuleInfo>();
83 MachineFunctionPass::getAnalysisUsage(AU);
87 void emitPCRelativeBlockAddress(MachineBasicBlock *MBB);
88 void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc,
89 intptr_t Disp = 0, intptr_t PCAdj = 0,
90 bool Indirect = false);
91 void emitExternalSymbolAddress(const char *ES, unsigned Reloc);
92 void emitConstPoolAddress(unsigned CPI, unsigned Reloc, intptr_t Disp = 0,
94 void emitJumpTableAddress(unsigned JTI, unsigned Reloc,
97 void emitDisplacementField(const MachineOperand *RelocOp, int DispVal,
98 intptr_t Adj = 0, bool IsPCRel = true);
100 void emitRegModRMByte(unsigned ModRMReg, unsigned RegOpcodeField);
101 void emitRegModRMByte(unsigned RegOpcodeField);
102 void emitSIBByte(unsigned SS, unsigned Index, unsigned Base);
103 void emitConstant(uint64_t Val, unsigned Size);
105 void emitMemModRMByte(const MachineInstr &MI,
106 unsigned Op, unsigned RegOpcodeField,
109 unsigned getX86RegNum(unsigned RegNo) const {
110 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
111 return TRI->getEncodingValue(RegNo) & 0x7;
114 unsigned char getVEXRegisterEncoding(const MachineInstr &MI,
115 unsigned OpNum) const;
118 template<class CodeEmitter>
119 char Emitter<CodeEmitter>::ID = 0;
120 } // end anonymous namespace.
122 /// createX86CodeEmitterPass - Return a pass that emits the collected X86 code
123 /// to the specified JITCodeEmitter object.
124 FunctionPass *llvm::createX86JITCodeEmitterPass(X86TargetMachine &TM,
125 JITCodeEmitter &JCE) {
126 return new Emitter<JITCodeEmitter>(TM, JCE);
129 template<class CodeEmitter>
130 bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
131 MMI = &getAnalysis<MachineModuleInfo>();
132 MCE.setModuleInfo(MMI);
134 II = TM.getSubtargetImpl()->getInstrInfo();
135 TD = TM.getSubtargetImpl()->getDataLayout();
136 Is64BitMode = TM.getSubtarget<X86Subtarget>().is64Bit();
137 IsPIC = TM.getRelocationModel() == Reloc::PIC_;
140 DEBUG(dbgs() << "JITTing function '" << MF.getName() << "'\n");
141 MCE.startFunction(MF);
142 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
144 MCE.StartMachineBasicBlock(MBB);
145 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
147 const MCInstrDesc &Desc = I->getDesc();
148 emitInstruction(*I, &Desc);
149 // MOVPC32r is basically a call plus a pop instruction.
150 if (Desc.getOpcode() == X86::MOVPC32r)
151 emitInstruction(*I, &II->get(X86::POP32r));
152 ++NumEmitted; // Keep track of the # of mi's emitted
155 } while (MCE.finishFunction(MF));
160 /// determineREX - Determine if the MachineInstr has to be encoded with a X86-64
161 /// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
162 /// size, and 3) use of X86-64 extended registers.
163 static unsigned determineREX(const MachineInstr &MI) {
165 const MCInstrDesc &Desc = MI.getDesc();
167 // Pseudo instructions do not need REX prefix byte.
168 if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo)
170 if (Desc.TSFlags & X86II::REX_W)
173 unsigned NumOps = Desc.getNumOperands();
175 bool isTwoAddr = NumOps > 1 &&
176 Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
178 // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
179 unsigned i = isTwoAddr ? 1 : 0;
180 for (unsigned e = NumOps; i != e; ++i) {
181 const MachineOperand& MO = MI.getOperand(i);
183 unsigned Reg = MO.getReg();
184 if (X86II::isX86_64NonExtLowByteReg(Reg))
189 switch (Desc.TSFlags & X86II::FormMask) {
190 case X86II::MRMSrcReg: {
191 if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
193 i = isTwoAddr ? 2 : 1;
194 for (unsigned e = NumOps; i != e; ++i) {
195 const MachineOperand& MO = MI.getOperand(i);
196 if (X86InstrInfo::isX86_64ExtendedReg(MO))
201 case X86II::MRMSrcMem: {
202 if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
205 i = isTwoAddr ? 2 : 1;
206 for (; i != NumOps; ++i) {
207 const MachineOperand& MO = MI.getOperand(i);
209 if (X86InstrInfo::isX86_64ExtendedReg(MO))
217 case X86II::MRM0m: case X86II::MRM1m:
218 case X86II::MRM2m: case X86II::MRM3m:
219 case X86II::MRM4m: case X86II::MRM5m:
220 case X86II::MRM6m: case X86II::MRM7m:
221 case X86II::MRMDestMem: {
222 unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
223 i = isTwoAddr ? 1 : 0;
224 if (NumOps > e && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e)))
227 for (; i != e; ++i) {
228 const MachineOperand& MO = MI.getOperand(i);
230 if (X86InstrInfo::isX86_64ExtendedReg(MO))
238 if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0)))
240 i = isTwoAddr ? 2 : 1;
241 for (unsigned e = NumOps; i != e; ++i) {
242 const MachineOperand& MO = MI.getOperand(i);
243 if (X86InstrInfo::isX86_64ExtendedReg(MO))
254 /// emitPCRelativeBlockAddress - This method keeps track of the information
255 /// necessary to resolve the address of this block later and emits a dummy
258 template<class CodeEmitter>
259 void Emitter<CodeEmitter>::emitPCRelativeBlockAddress(MachineBasicBlock *MBB) {
260 // Remember where this reference was and where it is to so we can
261 // deal with it later.
262 MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(),
263 X86::reloc_pcrel_word, MBB));
267 /// emitGlobalAddress - Emit the specified address to the code stream assuming
268 /// this is part of a "take the address of a global" instruction.
270 template<class CodeEmitter>
271 void Emitter<CodeEmitter>::emitGlobalAddress(const GlobalValue *GV,
273 intptr_t Disp /* = 0 */,
274 intptr_t PCAdj /* = 0 */,
275 bool Indirect /* = false */) {
276 intptr_t RelocCST = Disp;
277 if (Reloc == X86::reloc_picrel_word)
278 RelocCST = PICBaseOffset;
279 else if (Reloc == X86::reloc_pcrel_word)
281 MachineRelocation MR = Indirect
282 ? MachineRelocation::getIndirectSymbol(MCE.getCurrentPCOffset(), Reloc,
283 const_cast<GlobalValue *>(GV),
285 : MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc,
286 const_cast<GlobalValue *>(GV), RelocCST, false);
287 MCE.addRelocation(MR);
288 // The relocated value will be added to the displacement
289 if (Reloc == X86::reloc_absolute_dword)
290 MCE.emitDWordLE(Disp);
292 MCE.emitWordLE((int32_t)Disp);
295 /// emitExternalSymbolAddress - Arrange for the address of an external symbol to
296 /// be emitted to the current location in the function, and allow it to be PC
298 template<class CodeEmitter>
299 void Emitter<CodeEmitter>::emitExternalSymbolAddress(const char *ES,
301 intptr_t RelocCST = (Reloc == X86::reloc_picrel_word) ? PICBaseOffset : 0;
303 // X86 never needs stubs because instruction selection will always pick
304 // an instruction sequence that is large enough to hold any address
306 // (see X86ISelLowering.cpp, near 2039: X86TargetLowering::LowerCall)
307 bool NeedStub = false;
308 MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(),
311 if (Reloc == X86::reloc_absolute_dword)
317 /// emitConstPoolAddress - Arrange for the address of an constant pool
318 /// to be emitted to the current location in the function, and allow it to be PC
320 template<class CodeEmitter>
321 void Emitter<CodeEmitter>::emitConstPoolAddress(unsigned CPI, unsigned Reloc,
322 intptr_t Disp /* = 0 */,
323 intptr_t PCAdj /* = 0 */) {
324 intptr_t RelocCST = 0;
325 if (Reloc == X86::reloc_picrel_word)
326 RelocCST = PICBaseOffset;
327 else if (Reloc == X86::reloc_pcrel_word)
329 MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(),
330 Reloc, CPI, RelocCST));
331 // The relocated value will be added to the displacement
332 if (Reloc == X86::reloc_absolute_dword)
333 MCE.emitDWordLE(Disp);
335 MCE.emitWordLE((int32_t)Disp);
338 /// emitJumpTableAddress - Arrange for the address of a jump table to
339 /// be emitted to the current location in the function, and allow it to be PC
341 template<class CodeEmitter>
342 void Emitter<CodeEmitter>::emitJumpTableAddress(unsigned JTI, unsigned Reloc,
343 intptr_t PCAdj /* = 0 */) {
344 intptr_t RelocCST = 0;
345 if (Reloc == X86::reloc_picrel_word)
346 RelocCST = PICBaseOffset;
347 else if (Reloc == X86::reloc_pcrel_word)
349 MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(),
350 Reloc, JTI, RelocCST));
351 // The relocated value will be added to the displacement
352 if (Reloc == X86::reloc_absolute_dword)
358 inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
360 assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
361 return RM | (RegOpcode << 3) | (Mod << 6);
364 template<class CodeEmitter>
365 void Emitter<CodeEmitter>::emitRegModRMByte(unsigned ModRMReg,
366 unsigned RegOpcodeFld){
367 MCE.emitByte(ModRMByte(3, RegOpcodeFld, getX86RegNum(ModRMReg)));
370 template<class CodeEmitter>
371 void Emitter<CodeEmitter>::emitRegModRMByte(unsigned RegOpcodeFld) {
372 MCE.emitByte(ModRMByte(3, RegOpcodeFld, 0));
375 template<class CodeEmitter>
376 void Emitter<CodeEmitter>::emitSIBByte(unsigned SS,
379 // SIB byte is in the same format as the ModRMByte...
380 MCE.emitByte(ModRMByte(SS, Index, Base));
383 template<class CodeEmitter>
384 void Emitter<CodeEmitter>::emitConstant(uint64_t Val, unsigned Size) {
385 // Output the constant in little endian byte order...
386 for (unsigned i = 0; i != Size; ++i) {
387 MCE.emitByte(Val & 255);
392 /// isDisp8 - Return true if this signed displacement fits in a 8-bit
393 /// sign-extended field.
394 static bool isDisp8(int Value) {
395 return Value == (signed char)Value;
398 static bool gvNeedsNonLazyPtr(const MachineOperand &GVOp,
399 const TargetMachine &TM) {
400 // For Darwin-64, simulate the linktime GOT by using the same non-lazy-pointer
401 // mechanism as 32-bit mode.
402 if (TM.getSubtarget<X86Subtarget>().is64Bit() &&
403 !TM.getSubtarget<X86Subtarget>().isTargetDarwin())
406 // Return true if this is a reference to a stub containing the address of the
407 // global, not the global itself.
408 return isGlobalStubReference(GVOp.getTargetFlags());
411 template<class CodeEmitter>
412 void Emitter<CodeEmitter>::emitDisplacementField(const MachineOperand *RelocOp,
414 intptr_t Adj /* = 0 */,
415 bool IsPCRel /* = true */) {
416 // If this is a simple integer displacement that doesn't require a relocation,
419 emitConstant(DispVal, 4);
423 // Otherwise, this is something that requires a relocation. Emit it as such
425 unsigned RelocType = Is64BitMode ?
426 (IsPCRel ? X86::reloc_pcrel_word : X86::reloc_absolute_word_sext)
427 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
428 if (RelocOp->isGlobal()) {
429 // In 64-bit static small code model, we could potentially emit absolute.
430 // But it's probably not beneficial. If the MCE supports using RIP directly
431 // do it, otherwise fallback to absolute (this is determined by IsPCRel).
432 // 89 05 00 00 00 00 mov %eax,0(%rip) # PC-relative
433 // 89 04 25 00 00 00 00 mov %eax,0x0 # Absolute
434 bool Indirect = gvNeedsNonLazyPtr(*RelocOp, TM);
435 emitGlobalAddress(RelocOp->getGlobal(), RelocType, RelocOp->getOffset(),
437 } else if (RelocOp->isSymbol()) {
438 emitExternalSymbolAddress(RelocOp->getSymbolName(), RelocType);
439 } else if (RelocOp->isCPI()) {
440 emitConstPoolAddress(RelocOp->getIndex(), RelocType,
441 RelocOp->getOffset(), Adj);
443 assert(RelocOp->isJTI() && "Unexpected machine operand!");
444 emitJumpTableAddress(RelocOp->getIndex(), RelocType, Adj);
448 template<class CodeEmitter>
449 void Emitter<CodeEmitter>::emitMemModRMByte(const MachineInstr &MI,
450 unsigned Op,unsigned RegOpcodeField,
452 const MachineOperand &Op3 = MI.getOperand(Op+3);
454 const MachineOperand *DispForReloc = nullptr;
456 // Figure out what sort of displacement we have to handle here.
457 if (Op3.isGlobal()) {
459 } else if (Op3.isSymbol()) {
461 } else if (Op3.isCPI()) {
462 if (!MCE.earlyResolveAddresses() || Is64BitMode || IsPIC) {
465 DispVal += MCE.getConstantPoolEntryAddress(Op3.getIndex());
466 DispVal += Op3.getOffset();
468 } else if (Op3.isJTI()) {
469 if (!MCE.earlyResolveAddresses() || Is64BitMode || IsPIC) {
472 DispVal += MCE.getJumpTableEntryAddress(Op3.getIndex());
475 DispVal = Op3.getImm();
478 const MachineOperand &Base = MI.getOperand(Op);
479 const MachineOperand &Scale = MI.getOperand(Op+1);
480 const MachineOperand &IndexReg = MI.getOperand(Op+2);
482 unsigned BaseReg = Base.getReg();
484 // Handle %rip relative addressing.
485 if (BaseReg == X86::RIP ||
486 (Is64BitMode && DispForReloc)) { // [disp32+RIP] in X86-64 mode
487 assert(IndexReg.getReg() == 0 && Is64BitMode &&
488 "Invalid rip-relative address");
489 MCE.emitByte(ModRMByte(0, RegOpcodeField, 5));
490 emitDisplacementField(DispForReloc, DispVal, PCAdj, true);
494 // Indicate that the displacement will use an pcrel or absolute reference
495 // by default. MCEs able to resolve addresses on-the-fly use pcrel by default
496 // while others, unless explicit asked to use RIP, use absolute references.
497 bool IsPCRel = MCE.earlyResolveAddresses() ? true : false;
499 // Is a SIB byte needed?
500 // If no BaseReg, issue a RIP relative instruction only if the MCE can
501 // resolve addresses on-the-fly, otherwise use SIB (Intel Manual 2A, table
502 // 2-7) and absolute references.
503 unsigned BaseRegNo = -1U;
504 if (BaseReg != 0 && BaseReg != X86::RIP)
505 BaseRegNo = getX86RegNum(BaseReg);
507 if (// The SIB byte must be used if there is an index register.
508 IndexReg.getReg() == 0 &&
509 // The SIB byte must be used if the base is ESP/RSP/R12, all of which
510 // encode to an R/M value of 4, which indicates that a SIB byte is
512 BaseRegNo != N86::ESP &&
513 // If there is no base register and we're in 64-bit mode, we need a SIB
514 // byte to emit an addr that is just 'disp32' (the non-RIP relative form).
515 (!Is64BitMode || BaseReg != 0)) {
516 if (BaseReg == 0 || // [disp32] in X86-32 mode
517 BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
518 MCE.emitByte(ModRMByte(0, RegOpcodeField, 5));
519 emitDisplacementField(DispForReloc, DispVal, PCAdj, true);
523 // If the base is not EBP/ESP and there is no displacement, use simple
524 // indirect register encoding, this handles addresses like [EAX]. The
525 // encoding for [EBP] with no displacement means [disp32] so we handle it
526 // by emitting a displacement of 0 below.
527 if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) {
528 MCE.emitByte(ModRMByte(0, RegOpcodeField, BaseRegNo));
532 // Otherwise, if the displacement fits in a byte, encode as [REG+disp8].
533 if (!DispForReloc && isDisp8(DispVal)) {
534 MCE.emitByte(ModRMByte(1, RegOpcodeField, BaseRegNo));
535 emitConstant(DispVal, 1);
539 // Otherwise, emit the most general non-SIB encoding: [REG+disp32]
540 MCE.emitByte(ModRMByte(2, RegOpcodeField, BaseRegNo));
541 emitDisplacementField(DispForReloc, DispVal, PCAdj, IsPCRel);
545 // Otherwise we need a SIB byte, so start by outputting the ModR/M byte first.
546 assert(IndexReg.getReg() != X86::ESP &&
547 IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!");
549 bool ForceDisp32 = false;
550 bool ForceDisp8 = false;
552 // If there is no base register, we emit the special case SIB byte with
553 // MOD=0, BASE=4, to JUST get the index, scale, and displacement.
554 MCE.emitByte(ModRMByte(0, RegOpcodeField, 4));
556 } else if (DispForReloc) {
557 // Emit the normal disp32 encoding.
558 MCE.emitByte(ModRMByte(2, RegOpcodeField, 4));
560 } else if (DispVal == 0 && BaseRegNo != N86::EBP) {
561 // Emit no displacement ModR/M byte
562 MCE.emitByte(ModRMByte(0, RegOpcodeField, 4));
563 } else if (isDisp8(DispVal)) {
564 // Emit the disp8 encoding...
565 MCE.emitByte(ModRMByte(1, RegOpcodeField, 4));
566 ForceDisp8 = true; // Make sure to force 8 bit disp if Base=EBP
568 // Emit the normal disp32 encoding...
569 MCE.emitByte(ModRMByte(2, RegOpcodeField, 4));
572 // Calculate what the SS field value should be...
573 static const unsigned SSTable[] = { ~0U, 0, 1, ~0U, 2, ~0U, ~0U, ~0U, 3 };
574 unsigned SS = SSTable[Scale.getImm()];
577 // Handle the SIB byte for the case where there is no base, see Intel
578 // Manual 2A, table 2-7. The displacement has already been output.
580 if (IndexReg.getReg())
581 IndexRegNo = getX86RegNum(IndexReg.getReg());
582 else // Examples: [ESP+1*<noreg>+4] or [scaled idx]+disp32 (MOD=0,BASE=5)
584 emitSIBByte(SS, IndexRegNo, 5);
586 unsigned BaseRegNo = getX86RegNum(BaseReg);
588 if (IndexReg.getReg())
589 IndexRegNo = getX86RegNum(IndexReg.getReg());
591 IndexRegNo = 4; // For example [ESP+1*<noreg>+4]
592 emitSIBByte(SS, IndexRegNo, BaseRegNo);
595 // Do we need to output a displacement?
597 emitConstant(DispVal, 1);
598 } else if (DispVal != 0 || ForceDisp32) {
599 emitDisplacementField(DispForReloc, DispVal, PCAdj, IsPCRel);
603 static const MCInstrDesc *UpdateOp(MachineInstr &MI, const X86InstrInfo *II,
605 const MCInstrDesc *Desc = &II->get(Opcode);
610 /// Is16BitMemOperand - Return true if the specified instruction has
611 /// a 16-bit memory operand. Op specifies the operand # of the memoperand.
612 static bool Is16BitMemOperand(const MachineInstr &MI, unsigned Op) {
613 const MachineOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
614 const MachineOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
616 if ((BaseReg.getReg() != 0 &&
617 X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg.getReg())) ||
618 (IndexReg.getReg() != 0 &&
619 X86MCRegisterClasses[X86::GR16RegClassID].contains(IndexReg.getReg())))
624 /// Is32BitMemOperand - Return true if the specified instruction has
625 /// a 32-bit memory operand. Op specifies the operand # of the memoperand.
626 static bool Is32BitMemOperand(const MachineInstr &MI, unsigned Op) {
627 const MachineOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
628 const MachineOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
630 if ((BaseReg.getReg() != 0 &&
631 X86MCRegisterClasses[X86::GR32RegClassID].contains(BaseReg.getReg())) ||
632 (IndexReg.getReg() != 0 &&
633 X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
638 /// Is64BitMemOperand - Return true if the specified instruction has
639 /// a 64-bit memory operand. Op specifies the operand # of the memoperand.
641 static bool Is64BitMemOperand(const MachineInstr &MI, unsigned Op) {
642 const MachineOperand &BaseReg = MI.getOperand(Op+X86::AddrBaseReg);
643 const MachineOperand &IndexReg = MI.getOperand(Op+X86::AddrIndexReg);
645 if ((BaseReg.getReg() != 0 &&
646 X86MCRegisterClasses[X86::GR64RegClassID].contains(BaseReg.getReg())) ||
647 (IndexReg.getReg() != 0 &&
648 X86MCRegisterClasses[X86::GR64RegClassID].contains(IndexReg.getReg())))
654 template<class CodeEmitter>
655 void Emitter<CodeEmitter>::emitOpcodePrefix(uint64_t TSFlags,
657 const MachineInstr &MI,
658 const MCInstrDesc *Desc) const {
659 // Emit the operand size opcode prefix as needed.
660 if (((TSFlags & X86II::OpSizeMask) >> X86II::OpSizeShift) == X86II::OpSize16)
663 switch (Desc->TSFlags & X86II::OpPrefixMask) {
664 case X86II::PD: // 66
667 case X86II::XS: // F3
670 case X86II::XD: // F2
675 // Handle REX prefix.
677 if (unsigned REX = determineREX(MI))
678 MCE.emitByte(0x40 | REX);
681 // 0x0F escape code must be emitted just before the opcode.
682 switch (Desc->TSFlags & X86II::OpMapMask) {
683 case X86II::TB: // Two-byte opcode map
684 case X86II::T8: // 0F 38
685 case X86II::TA: // 0F 3A
690 switch (Desc->TSFlags & X86II::OpMapMask) {
691 case X86II::T8: // 0F 38
694 case X86II::TA: // 0F 3A
700 // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
701 // 0-7 and the difference between the 2 groups is given by the REX prefix.
702 // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
703 // in 1's complement form, example:
705 // ModRM field => XMM9 => 1
706 // VEX.VVVV => XMM9 => ~9
708 // See table 4-35 of Intel AVX Programming Reference for details.
709 template<class CodeEmitter>
711 Emitter<CodeEmitter>::getVEXRegisterEncoding(const MachineInstr &MI,
712 unsigned OpNum) const {
713 unsigned SrcReg = MI.getOperand(OpNum).getReg();
714 unsigned SrcRegNum = getX86RegNum(MI.getOperand(OpNum).getReg());
715 if (X86II::isX86_64ExtendedReg(SrcReg))
718 // The registers represented through VEX_VVVV should
719 // be encoded in 1's complement form.
720 return (~SrcRegNum) & 0xf;
723 /// EmitSegmentOverridePrefix - Emit segment override opcode prefix as needed
724 template<class CodeEmitter>
725 void Emitter<CodeEmitter>::emitSegmentOverridePrefix(uint64_t TSFlags,
727 const MachineInstr &MI) const {
729 return; // No memory operand
731 // Check for explicit segment override on memory operand.
732 switch (MI.getOperand(MemOperand+X86::AddrSegmentReg).getReg()) {
733 default: llvm_unreachable("Unknown segment register!");
735 case X86::CS: MCE.emitByte(0x2E); break;
736 case X86::SS: MCE.emitByte(0x36); break;
737 case X86::DS: MCE.emitByte(0x3E); break;
738 case X86::ES: MCE.emitByte(0x26); break;
739 case X86::FS: MCE.emitByte(0x64); break;
740 case X86::GS: MCE.emitByte(0x65); break;
744 template<class CodeEmitter>
745 void Emitter<CodeEmitter>::emitVEXOpcodePrefix(uint64_t TSFlags,
747 const MachineInstr &MI,
748 const MCInstrDesc *Desc) const {
749 unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
750 X86II::EncodingShift;
751 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
752 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
753 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
755 // VEX_R: opcode externsion equivalent to REX.R in
756 // 1's complement (inverted) form
758 // 1: Same as REX_R=0 (must be 1 in 32-bit mode)
759 // 0: Same as REX_R=1 (64 bit mode only)
761 unsigned char VEX_R = 0x1;
763 // VEX_X: equivalent to REX.X, only used when a
764 // register is used for index in SIB Byte.
766 // 1: Same as REX.X=0 (must be 1 in 32-bit mode)
767 // 0: Same as REX.X=1 (64-bit mode only)
768 unsigned char VEX_X = 0x1;
772 // 1: Same as REX_B=0 (ignored in 32-bit mode)
773 // 0: Same as REX_B=1 (64 bit mode only)
775 unsigned char VEX_B = 0x1;
777 // VEX_W: opcode specific (use like REX.W, or used for
778 // opcode extension, or ignored, depending on the opcode byte)
779 unsigned char VEX_W = 0;
781 // VEX_5M (VEX m-mmmmm field):
783 // 0b00000: Reserved for future use
784 // 0b00001: implied 0F leading opcode
785 // 0b00010: implied 0F 38 leading opcode bytes
786 // 0b00011: implied 0F 3A leading opcode bytes
787 // 0b00100-0b11111: Reserved for future use
788 // 0b01000: XOP map select - 08h instructions with imm byte
789 // 0b01001: XOP map select - 09h instructions with no imm byte
790 // 0b01010: XOP map select - 0Ah instructions with imm dword
791 unsigned char VEX_5M = 0;
793 // VEX_4V (VEX vvvv field): a register specifier
794 // (in 1's complement form) or 1111 if unused.
795 unsigned char VEX_4V = 0xf;
797 // VEX_L (Vector Length):
799 // 0: scalar or 128-bit vector
802 unsigned char VEX_L = 0;
804 // VEX_PP: opcode extension providing equivalent
805 // functionality of a SIMD prefix
812 unsigned char VEX_PP = 0;
814 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_W)
817 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_L)
820 switch (TSFlags & X86II::OpPrefixMask) {
821 default: break; // VEX_PP already correct
822 case X86II::PD: VEX_PP = 0x1; break; // 66
823 case X86II::XS: VEX_PP = 0x2; break; // F3
824 case X86II::XD: VEX_PP = 0x3; break; // F2
827 switch (TSFlags & X86II::OpMapMask) {
828 default: llvm_unreachable("Invalid prefix!");
829 case X86II::TB: VEX_5M = 0x1; break; // 0F
830 case X86II::T8: VEX_5M = 0x2; break; // 0F 38
831 case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
832 case X86II::XOP8: VEX_5M = 0x8; break;
833 case X86II::XOP9: VEX_5M = 0x9; break;
834 case X86II::XOPA: VEX_5M = 0xA; break;
837 // Classify VEX_B, VEX_4V, VEX_R, VEX_X
838 unsigned NumOps = Desc->getNumOperands();
840 if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) == 0)
842 else if (NumOps > 3 && Desc->getOperandConstraint(2, MCOI::TIED_TO) == 0) {
843 assert(Desc->getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
844 // Special case for GATHER with 2 TIED_TO operands
845 // Skip the first 2 operands: dst, mask_wb
849 switch (TSFlags & X86II::FormMask) {
850 default: llvm_unreachable("Unexpected form in emitVEXOpcodePrefix!");
853 case X86II::MRMDestMem: {
854 // MRMDestMem instructions forms:
855 // MemAddr, src1(ModR/M)
856 // MemAddr, src1(VEX_4V), src2(ModR/M)
857 // MemAddr, src1(ModR/M), imm8
859 if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrBaseReg).getReg()))
861 if (X86II::isX86_64ExtendedReg(MI.getOperand(X86::AddrIndexReg).getReg()))
864 CurOp = X86::AddrNumOperands;
866 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
868 const MachineOperand &MO = MI.getOperand(CurOp);
869 if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
873 case X86II::MRMSrcMem:
874 // MRMSrcMem instructions forms:
875 // src1(ModR/M), MemAddr
876 // src1(ModR/M), src2(VEX_4V), MemAddr
877 // src1(ModR/M), MemAddr, imm8
878 // src1(ModR/M), MemAddr, src2(VEX_I8IMM)
881 // dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
882 // dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
883 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
888 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
892 if (X86II::isX86_64ExtendedReg(
893 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
895 if (X86II::isX86_64ExtendedReg(
896 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
900 VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
902 case X86II::MRM0m: case X86II::MRM1m:
903 case X86II::MRM2m: case X86II::MRM3m:
904 case X86II::MRM4m: case X86II::MRM5m:
905 case X86II::MRM6m: case X86II::MRM7m: {
906 // MRM[0-9]m instructions forms:
908 // src1(VEX_4V), MemAddr
910 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
912 if (X86II::isX86_64ExtendedReg(
913 MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
915 if (X86II::isX86_64ExtendedReg(
916 MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
920 case X86II::MRMSrcReg:
921 // MRMSrcReg instructions forms:
922 // dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
923 // dst(ModR/M), src1(ModR/M)
924 // dst(ModR/M), src1(ModR/M), imm8
926 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
931 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
933 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
936 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
940 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
942 case X86II::MRMDestReg:
943 // MRMDestReg instructions forms:
944 // dst(ModR/M), src(ModR/M)
945 // dst(ModR/M), src(ModR/M), imm8
946 // dst(ModR/M), src1(VEX_4V), src2(ModR/M)
947 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
952 VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
954 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
957 case X86II::MRM0r: case X86II::MRM1r:
958 case X86II::MRM2r: case X86II::MRM3r:
959 case X86II::MRM4r: case X86II::MRM5r:
960 case X86II::MRM6r: case X86II::MRM7r:
961 // MRM0r-MRM7r instructions forms:
962 // dst(VEX_4V), src(ModR/M), imm8
963 VEX_4V = getVEXRegisterEncoding(MI, CurOp);
966 if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
971 // Emit segment override opcode prefix as needed.
972 emitSegmentOverridePrefix(TSFlags, MemOperand, MI);
974 // VEX opcode prefix can have 2 or 3 bytes
977 // +-----+ +--------------+ +-------------------+
978 // | C4h | | RXB | m-mmmm | | W | vvvv | L | pp |
979 // +-----+ +--------------+ +-------------------+
981 // +-----+ +-------------------+
982 // | C5h | | R | vvvv | L | pp |
983 // +-----+ +-------------------+
985 // XOP uses a similar prefix:
986 // +-----+ +--------------+ +-------------------+
987 // | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
988 // +-----+ +--------------+ +-------------------+
989 unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
991 // Can this use the 2 byte VEX prefix?
992 if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
994 MCE.emitByte(LastByte | (VEX_R << 7));
999 MCE.emitByte(Encoding == X86II::XOP ? 0x8F : 0xC4);
1000 MCE.emitByte(VEX_R << 7 | VEX_X << 6 | VEX_B << 5 | VEX_5M);
1001 MCE.emitByte(LastByte | (VEX_W << 7));
1004 template<class CodeEmitter>
1005 void Emitter<CodeEmitter>::emitInstruction(MachineInstr &MI,
1006 const MCInstrDesc *Desc) {
1007 DEBUG(dbgs() << MI);
1009 // If this is a pseudo instruction, lower it.
1010 switch (Desc->getOpcode()) {
1011 case X86::ADD16rr_DB: Desc = UpdateOp(MI, II, X86::OR16rr); break;
1012 case X86::ADD32rr_DB: Desc = UpdateOp(MI, II, X86::OR32rr); break;
1013 case X86::ADD64rr_DB: Desc = UpdateOp(MI, II, X86::OR64rr); break;
1014 case X86::ADD16ri_DB: Desc = UpdateOp(MI, II, X86::OR16ri); break;
1015 case X86::ADD32ri_DB: Desc = UpdateOp(MI, II, X86::OR32ri); break;
1016 case X86::ADD64ri32_DB: Desc = UpdateOp(MI, II, X86::OR64ri32); break;
1017 case X86::ADD16ri8_DB: Desc = UpdateOp(MI, II, X86::OR16ri8); break;
1018 case X86::ADD32ri8_DB: Desc = UpdateOp(MI, II, X86::OR32ri8); break;
1019 case X86::ADD64ri8_DB: Desc = UpdateOp(MI, II, X86::OR64ri8); break;
1020 case X86::ACQUIRE_MOV8rm: Desc = UpdateOp(MI, II, X86::MOV8rm); break;
1021 case X86::ACQUIRE_MOV16rm: Desc = UpdateOp(MI, II, X86::MOV16rm); break;
1022 case X86::ACQUIRE_MOV32rm: Desc = UpdateOp(MI, II, X86::MOV32rm); break;
1023 case X86::ACQUIRE_MOV64rm: Desc = UpdateOp(MI, II, X86::MOV64rm); break;
1024 case X86::RELEASE_MOV8mr: Desc = UpdateOp(MI, II, X86::MOV8mr); break;
1025 case X86::RELEASE_MOV16mr: Desc = UpdateOp(MI, II, X86::MOV16mr); break;
1026 case X86::RELEASE_MOV32mr: Desc = UpdateOp(MI, II, X86::MOV32mr); break;
1027 case X86::RELEASE_MOV64mr: Desc = UpdateOp(MI, II, X86::MOV64mr); break;
1031 MCE.processDebugLoc(MI.getDebugLoc(), true);
1033 unsigned Opcode = Desc->Opcode;
1035 // If this is a two-address instruction, skip one of the register operands.
1036 unsigned NumOps = Desc->getNumOperands();
1038 if (NumOps > 1 && Desc->getOperandConstraint(1, MCOI::TIED_TO) == 0)
1040 else if (NumOps > 3 && Desc->getOperandConstraint(2, MCOI::TIED_TO) == 0) {
1041 assert(Desc->getOperandConstraint(NumOps - 1, MCOI::TIED_TO) == 1);
1042 // Special case for GATHER with 2 TIED_TO operands
1043 // Skip the first 2 operands: dst, mask_wb
1047 uint64_t TSFlags = Desc->TSFlags;
1049 // Encoding type for this instruction.
1050 unsigned char Encoding = (TSFlags & X86II::EncodingMask) >>
1051 X86II::EncodingShift;
1053 // It uses the VEX.VVVV field?
1054 bool HasVEX_4V = (TSFlags >> X86II::VEXShift) & X86II::VEX_4V;
1055 bool HasVEX_4VOp3 = (TSFlags >> X86II::VEXShift) & X86II::VEX_4VOp3;
1056 bool HasMemOp4 = (TSFlags >> X86II::VEXShift) & X86II::MemOp4;
1057 const unsigned MemOp4_I8IMMOperand = 2;
1059 // Determine where the memory operand starts, if present.
1060 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
1061 if (MemoryOperand != -1) MemoryOperand += CurOp;
1063 // Emit the lock opcode prefix as needed.
1064 if (Desc->TSFlags & X86II::LOCK)
1067 // Emit segment override opcode prefix as needed.
1068 emitSegmentOverridePrefix(TSFlags, MemoryOperand, MI);
1070 // Emit the repeat opcode prefix as needed.
1071 if (Desc->TSFlags & X86II::REP)
1074 // Emit the address size opcode prefix as needed.
1075 bool need_address_override;
1076 if (TSFlags & X86II::AdSize) {
1077 need_address_override = true;
1078 } else if (MemoryOperand < 0) {
1079 need_address_override = false;
1080 } else if (Is64BitMode) {
1081 assert(!Is16BitMemOperand(MI, MemoryOperand));
1082 need_address_override = Is32BitMemOperand(MI, MemoryOperand);
1084 assert(!Is64BitMemOperand(MI, MemoryOperand));
1085 need_address_override = Is16BitMemOperand(MI, MemoryOperand);
1088 if (need_address_override)
1092 emitOpcodePrefix(TSFlags, MemoryOperand, MI, Desc);
1094 emitVEXOpcodePrefix(TSFlags, MemoryOperand, MI, Desc);
1096 unsigned char BaseOpcode = X86II::getBaseOpcodeFor(Desc->TSFlags);
1097 switch (TSFlags & X86II::FormMask) {
1099 llvm_unreachable("Unknown FormMask value in X86 MachineCodeEmitter!");
1101 // Remember the current PC offset, this is the PIC relocation
1105 llvm_unreachable("pseudo instructions should be removed before code"
1107 // Do nothing for Int_MemBarrier - it's just a comment. Add a debug
1108 // to make it slightly easier to see.
1109 case X86::Int_MemBarrier:
1110 DEBUG(dbgs() << "#MEMBARRIER\n");
1113 case TargetOpcode::INLINEASM:
1114 // We allow inline assembler nodes with empty bodies - they can
1115 // implicitly define registers, which is ok for JIT.
1116 if (MI.getOperand(0).getSymbolName()[0]) {
1117 DebugLoc DL = MI.getDebugLoc();
1118 DL.print(MI.getParent()->getParent()->getFunction()->getContext(),
1120 report_fatal_error("JIT does not support inline asm!");
1123 case TargetOpcode::DBG_VALUE:
1124 case TargetOpcode::CFI_INSTRUCTION:
1126 case TargetOpcode::GC_LABEL:
1127 case TargetOpcode::EH_LABEL:
1128 MCE.emitLabel(MI.getOperand(0).getMCSymbol());
1131 case TargetOpcode::IMPLICIT_DEF:
1132 case TargetOpcode::KILL:
1135 case X86::SEH_PushReg:
1136 case X86::SEH_SaveReg:
1137 case X86::SEH_SaveXMM:
1138 case X86::SEH_StackAlloc:
1139 case X86::SEH_SetFrame:
1140 case X86::SEH_PushFrame:
1141 case X86::SEH_EndPrologue:
1142 case X86::SEH_Epilogue:
1145 case X86::MOVPC32r: {
1146 // This emits the "call" portion of this pseudo instruction.
1147 MCE.emitByte(BaseOpcode);
1148 emitConstant(0, X86II::getSizeOfImm(Desc->TSFlags));
1149 // Remember PIC base.
1150 PICBaseOffset = (intptr_t) MCE.getCurrentPCOffset();
1151 X86JITInfo *JTI = TM.getSubtargetImpl()->getJITInfo();
1152 JTI->setPICBase(MCE.getCurrentPCValue());
1158 case X86II::RawFrm: {
1159 MCE.emitByte(BaseOpcode);
1161 if (CurOp == NumOps)
1164 const MachineOperand &MO = MI.getOperand(CurOp++);
1166 DEBUG(dbgs() << "RawFrm CurOp " << CurOp << "\n");
1167 DEBUG(dbgs() << "isMBB " << MO.isMBB() << "\n");
1168 DEBUG(dbgs() << "isGlobal " << MO.isGlobal() << "\n");
1169 DEBUG(dbgs() << "isSymbol " << MO.isSymbol() << "\n");
1170 DEBUG(dbgs() << "isImm " << MO.isImm() << "\n");
1173 emitPCRelativeBlockAddress(MO.getMBB());
1177 if (MO.isGlobal()) {
1178 emitGlobalAddress(MO.getGlobal(), X86::reloc_pcrel_word,
1183 if (MO.isSymbol()) {
1184 emitExternalSymbolAddress(MO.getSymbolName(), X86::reloc_pcrel_word);
1188 // FIXME: Only used by hackish MCCodeEmitter, remove when dead.
1190 emitJumpTableAddress(MO.getIndex(), X86::reloc_pcrel_word);
1194 assert(MO.isImm() && "Unknown RawFrm operand!");
1195 if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32) {
1196 // Fix up immediate operand for pc relative calls.
1197 intptr_t Imm = (intptr_t)MO.getImm();
1198 Imm = Imm - MCE.getCurrentPCValue() - 4;
1199 emitConstant(Imm, X86II::getSizeOfImm(Desc->TSFlags));
1201 emitConstant(MO.getImm(), X86II::getSizeOfImm(Desc->TSFlags));
1205 case X86II::AddRegFrm: {
1206 MCE.emitByte(BaseOpcode +
1207 getX86RegNum(MI.getOperand(CurOp++).getReg()));
1209 if (CurOp == NumOps)
1212 const MachineOperand &MO1 = MI.getOperand(CurOp++);
1213 unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
1215 emitConstant(MO1.getImm(), Size);
1219 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
1220 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
1221 if (Opcode == X86::MOV32ri64)
1222 rt = X86::reloc_absolute_word; // FIXME: add X86II flag?
1223 // This should not occur on Darwin for relocatable objects.
1224 if (Opcode == X86::MOV64ri)
1225 rt = X86::reloc_absolute_dword; // FIXME: add X86II flag?
1226 if (MO1.isGlobal()) {
1227 bool Indirect = gvNeedsNonLazyPtr(MO1, TM);
1228 emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
1230 } else if (MO1.isSymbol())
1231 emitExternalSymbolAddress(MO1.getSymbolName(), rt);
1232 else if (MO1.isCPI())
1233 emitConstPoolAddress(MO1.getIndex(), rt);
1234 else if (MO1.isJTI())
1235 emitJumpTableAddress(MO1.getIndex(), rt);
1239 case X86II::MRMDestReg: {
1240 MCE.emitByte(BaseOpcode);
1242 unsigned SrcRegNum = CurOp+1;
1243 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1246 emitRegModRMByte(MI.getOperand(CurOp).getReg(),
1247 getX86RegNum(MI.getOperand(SrcRegNum).getReg()));
1248 CurOp = SrcRegNum + 1;
1251 case X86II::MRMDestMem: {
1252 MCE.emitByte(BaseOpcode);
1254 unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
1255 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1257 emitMemModRMByte(MI, CurOp,
1258 getX86RegNum(MI.getOperand(SrcRegNum).getReg()));
1259 CurOp = SrcRegNum + 1;
1263 case X86II::MRMSrcReg: {
1264 MCE.emitByte(BaseOpcode);
1266 unsigned SrcRegNum = CurOp+1;
1267 if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
1270 if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
1273 emitRegModRMByte(MI.getOperand(SrcRegNum).getReg(),
1274 getX86RegNum(MI.getOperand(CurOp).getReg()));
1275 // 2 operands skipped with HasMemOp4, compensate accordingly
1276 CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
1281 case X86II::MRMSrcMem: {
1282 int AddrOperands = X86::AddrNumOperands;
1283 unsigned FirstMemOp = CurOp+1;
1286 ++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
1288 if (HasMemOp4) // Skip second register source (encoded in I8IMM)
1291 MCE.emitByte(BaseOpcode);
1293 intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
1294 X86II::getSizeOfImm(Desc->TSFlags) : 0;
1295 emitMemModRMByte(MI, FirstMemOp,
1296 getX86RegNum(MI.getOperand(CurOp).getReg()),PCAdj);
1297 CurOp += AddrOperands + 1;
1304 case X86II::MRM0r: case X86II::MRM1r:
1305 case X86II::MRM2r: case X86II::MRM3r:
1306 case X86II::MRM4r: case X86II::MRM5r:
1307 case X86II::MRM6r: case X86II::MRM7r: {
1308 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1310 MCE.emitByte(BaseOpcode);
1311 uint64_t Form = (Desc->TSFlags & X86II::FormMask);
1312 emitRegModRMByte(MI.getOperand(CurOp++).getReg(),
1313 (Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r);
1315 if (CurOp == NumOps)
1318 const MachineOperand &MO1 = MI.getOperand(CurOp++);
1319 unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
1321 emitConstant(MO1.getImm(), Size);
1325 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
1326 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
1327 if (Opcode == X86::MOV64ri32)
1328 rt = X86::reloc_absolute_word_sext; // FIXME: add X86II flag?
1329 if (MO1.isGlobal()) {
1330 bool Indirect = gvNeedsNonLazyPtr(MO1, TM);
1331 emitGlobalAddress(MO1.getGlobal(), rt, MO1.getOffset(), 0,
1333 } else if (MO1.isSymbol())
1334 emitExternalSymbolAddress(MO1.getSymbolName(), rt);
1335 else if (MO1.isCPI())
1336 emitConstPoolAddress(MO1.getIndex(), rt);
1337 else if (MO1.isJTI())
1338 emitJumpTableAddress(MO1.getIndex(), rt);
1343 case X86II::MRM0m: case X86II::MRM1m:
1344 case X86II::MRM2m: case X86II::MRM3m:
1345 case X86II::MRM4m: case X86II::MRM5m:
1346 case X86II::MRM6m: case X86II::MRM7m: {
1347 if (HasVEX_4V) // Skip the register dst (which is encoded in VEX_VVVV).
1349 intptr_t PCAdj = (CurOp + X86::AddrNumOperands != NumOps) ?
1350 (MI.getOperand(CurOp+X86::AddrNumOperands).isImm() ?
1351 X86II::getSizeOfImm(Desc->TSFlags) : 4) : 0;
1353 MCE.emitByte(BaseOpcode);
1354 uint64_t Form = (Desc->TSFlags & X86II::FormMask);
1355 emitMemModRMByte(MI, CurOp, (Form==X86II::MRMXm) ? 0 : Form - X86II::MRM0m,
1357 CurOp += X86::AddrNumOperands;
1359 if (CurOp == NumOps)
1362 const MachineOperand &MO = MI.getOperand(CurOp++);
1363 unsigned Size = X86II::getSizeOfImm(Desc->TSFlags);
1365 emitConstant(MO.getImm(), Size);
1369 unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
1370 : (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
1371 if (Opcode == X86::MOV64mi32)
1372 rt = X86::reloc_absolute_word_sext; // FIXME: add X86II flag?
1373 if (MO.isGlobal()) {
1374 bool Indirect = gvNeedsNonLazyPtr(MO, TM);
1375 emitGlobalAddress(MO.getGlobal(), rt, MO.getOffset(), 0,
1377 } else if (MO.isSymbol())
1378 emitExternalSymbolAddress(MO.getSymbolName(), rt);
1379 else if (MO.isCPI())
1380 emitConstPoolAddress(MO.getIndex(), rt);
1381 else if (MO.isJTI())
1382 emitJumpTableAddress(MO.getIndex(), rt);
1386 case X86II::MRM_C0: case X86II::MRM_C1: case X86II::MRM_C2:
1387 case X86II::MRM_C3: case X86II::MRM_C4: case X86II::MRM_C8:
1388 case X86II::MRM_C9: case X86II::MRM_CA: case X86II::MRM_CB:
1389 case X86II::MRM_CF: case X86II::MRM_D0: case X86II::MRM_D1:
1390 case X86II::MRM_D4: case X86II::MRM_D5: case X86II::MRM_D6:
1391 case X86II::MRM_D7: case X86II::MRM_D8: case X86II::MRM_D9:
1392 case X86II::MRM_DA: case X86II::MRM_DB: case X86II::MRM_DC:
1393 case X86II::MRM_DD: case X86II::MRM_DE: case X86II::MRM_DF:
1394 case X86II::MRM_E0: case X86II::MRM_E1: case X86II::MRM_E2:
1395 case X86II::MRM_E3: case X86II::MRM_E4: case X86II::MRM_E5:
1396 case X86II::MRM_E8: case X86II::MRM_E9: case X86II::MRM_EA:
1397 case X86II::MRM_EB: case X86II::MRM_EC: case X86II::MRM_ED:
1398 case X86II::MRM_EE: case X86II::MRM_F0: case X86II::MRM_F1:
1399 case X86II::MRM_F2: case X86II::MRM_F3: case X86II::MRM_F4:
1400 case X86II::MRM_F5: case X86II::MRM_F6: case X86II::MRM_F7:
1401 case X86II::MRM_F8: case X86II::MRM_F9: case X86II::MRM_FA:
1402 case X86II::MRM_FB: case X86II::MRM_FC: case X86II::MRM_FD:
1403 case X86II::MRM_FE: case X86II::MRM_FF:
1404 MCE.emitByte(BaseOpcode);
1407 switch (TSFlags & X86II::FormMask) {
1408 default: llvm_unreachable("Invalid Form");
1409 case X86II::MRM_C0: MRM = 0xC0; break;
1410 case X86II::MRM_C1: MRM = 0xC1; break;
1411 case X86II::MRM_C2: MRM = 0xC2; break;
1412 case X86II::MRM_C3: MRM = 0xC3; break;
1413 case X86II::MRM_C4: MRM = 0xC4; break;
1414 case X86II::MRM_C8: MRM = 0xC8; break;
1415 case X86II::MRM_C9: MRM = 0xC9; break;
1416 case X86II::MRM_CA: MRM = 0xCA; break;
1417 case X86II::MRM_CB: MRM = 0xCB; break;
1418 case X86II::MRM_CF: MRM = 0xCF; break;
1419 case X86II::MRM_D0: MRM = 0xD0; break;
1420 case X86II::MRM_D1: MRM = 0xD1; break;
1421 case X86II::MRM_D4: MRM = 0xD4; break;
1422 case X86II::MRM_D5: MRM = 0xD5; break;
1423 case X86II::MRM_D6: MRM = 0xD6; break;
1424 case X86II::MRM_D7: MRM = 0xD7; break;
1425 case X86II::MRM_D8: MRM = 0xD8; break;
1426 case X86II::MRM_D9: MRM = 0xD9; break;
1427 case X86II::MRM_DA: MRM = 0xDA; break;
1428 case X86II::MRM_DB: MRM = 0xDB; break;
1429 case X86II::MRM_DC: MRM = 0xDC; break;
1430 case X86II::MRM_DD: MRM = 0xDD; break;
1431 case X86II::MRM_DE: MRM = 0xDE; break;
1432 case X86II::MRM_DF: MRM = 0xDF; break;
1433 case X86II::MRM_E0: MRM = 0xE0; break;
1434 case X86II::MRM_E1: MRM = 0xE1; break;
1435 case X86II::MRM_E2: MRM = 0xE2; break;
1436 case X86II::MRM_E3: MRM = 0xE3; break;
1437 case X86II::MRM_E4: MRM = 0xE4; break;
1438 case X86II::MRM_E5: MRM = 0xE5; break;
1439 case X86II::MRM_E8: MRM = 0xE8; break;
1440 case X86II::MRM_E9: MRM = 0xE9; break;
1441 case X86II::MRM_EA: MRM = 0xEA; break;
1442 case X86II::MRM_EB: MRM = 0xEB; break;
1443 case X86II::MRM_EC: MRM = 0xEC; break;
1444 case X86II::MRM_ED: MRM = 0xED; break;
1445 case X86II::MRM_EE: MRM = 0xEE; break;
1446 case X86II::MRM_F0: MRM = 0xF0; break;
1447 case X86II::MRM_F1: MRM = 0xF1; break;
1448 case X86II::MRM_F2: MRM = 0xF2; break;
1449 case X86II::MRM_F3: MRM = 0xF3; break;
1450 case X86II::MRM_F4: MRM = 0xF4; break;
1451 case X86II::MRM_F5: MRM = 0xF5; break;
1452 case X86II::MRM_F6: MRM = 0xF6; break;
1453 case X86II::MRM_F7: MRM = 0xF7; break;
1454 case X86II::MRM_F8: MRM = 0xF8; break;
1455 case X86II::MRM_F9: MRM = 0xF9; break;
1456 case X86II::MRM_FA: MRM = 0xFA; break;
1457 case X86II::MRM_FB: MRM = 0xFB; break;
1458 case X86II::MRM_FC: MRM = 0xFC; break;
1459 case X86II::MRM_FD: MRM = 0xFD; break;
1460 case X86II::MRM_FE: MRM = 0xFE; break;
1461 case X86II::MRM_FF: MRM = 0xFF; break;
1467 while (CurOp != NumOps && NumOps - CurOp <= 2) {
1468 // The last source register of a 4 operand instruction in AVX is encoded
1469 // in bits[7:4] of a immediate byte.
1470 if ((TSFlags >> X86II::VEXShift) & X86II::VEX_I8IMM) {
1471 const MachineOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
1474 unsigned RegNum = getX86RegNum(MO.getReg()) << 4;
1475 if (X86II::isX86_64ExtendedReg(MO.getReg()))
1477 // If there is an additional 5th operand it must be an immediate, which
1478 // is encoded in bits[3:0]
1479 if (CurOp != NumOps) {
1480 const MachineOperand &MIMM = MI.getOperand(CurOp++);
1482 unsigned Val = MIMM.getImm();
1483 assert(Val < 16 && "Immediate operand value out of range");
1487 emitConstant(RegNum, 1);
1489 emitConstant(MI.getOperand(CurOp++).getImm(),
1490 X86II::getSizeOfImm(Desc->TSFlags));
1494 if (!MI.isVariadic() && CurOp != NumOps) {
1496 dbgs() << "Cannot encode all operands of: " << MI << "\n";
1498 llvm_unreachable(nullptr);
1501 MCE.processDebugLoc(MI.getDebugLoc(), false);