1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "MCTargetDesc/X86FixupKinds.h"
12 #include "llvm/ADT/StringSwitch.h"
13 #include "llvm/MC/MCAsmBackend.h"
14 #include "llvm/MC/MCAssembler.h"
15 #include "llvm/MC/MCELFObjectWriter.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCFixupKindInfo.h"
18 #include "llvm/MC/MCMachObjectWriter.h"
19 #include "llvm/MC/MCObjectWriter.h"
20 #include "llvm/MC/MCSectionCOFF.h"
21 #include "llvm/MC/MCSectionELF.h"
22 #include "llvm/MC/MCSectionMachO.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ELF.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/MachO.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
31 // Option to allow disabling arithmetic relaxation to workaround PR9807, which
32 // is useful when running bitwise comparison experiments on Darwin. We should be
33 // able to remove this once PR9807 is resolved.
35 MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
36 cl::desc("Disable relaxation of arithmetic instruction for X86"));
38 static unsigned getFixupKindLog2Size(unsigned Kind) {
41 llvm_unreachable("invalid fixup kind!");
51 case X86::reloc_riprel_4byte:
52 case X86::reloc_riprel_4byte_movq_load:
53 case X86::reloc_signed_4byte:
54 case X86::reloc_global_offset_table:
67 class X86ELFObjectWriter : public MCELFObjectTargetWriter {
69 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
70 bool HasRelocationAddend, bool foobar)
71 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
74 class X86AsmBackend : public MCAsmBackend {
78 X86AsmBackend(const Target &T, StringRef _CPU)
79 : MCAsmBackend(), CPU(_CPU) {
80 HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
81 CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
82 CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
83 CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" &&
84 CPU != "c3" && CPU != "c3-2";
87 unsigned getNumFixupKinds() const override {
88 return X86::NumTargetFixupKinds;
91 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
92 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
93 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
94 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
95 { "reloc_signed_4byte", 0, 4 * 8, 0},
96 { "reloc_global_offset_table", 0, 4 * 8, 0}
99 if (Kind < FirstTargetFixupKind)
100 return MCAsmBackend::getFixupKindInfo(Kind);
102 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
104 return Infos[Kind - FirstTargetFixupKind];
107 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
108 uint64_t Value, bool IsPCRel) const override {
109 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
111 assert(Fixup.getOffset() + Size <= DataSize &&
112 "Invalid fixup offset!");
114 // Check that uppper bits are either all zeros or all ones.
115 // Specifically ignore overflow/underflow as long as the leakage is
116 // limited to the lower bits. This is to remain compatible with
118 assert(isIntN(Size * 8 + 1, Value) &&
119 "Value does not fit in the Fixup field");
121 for (unsigned i = 0; i != Size; ++i)
122 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
125 bool mayNeedRelaxation(const MCInst &Inst) const override;
127 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
128 const MCRelaxableFragment *DF,
129 const MCAsmLayout &Layout) const override;
131 void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
133 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
135 } // end anonymous namespace
137 static unsigned getRelaxedOpcodeBranch(unsigned Op) {
142 case X86::JAE_1: return X86::JAE_4;
143 case X86::JA_1: return X86::JA_4;
144 case X86::JBE_1: return X86::JBE_4;
145 case X86::JB_1: return X86::JB_4;
146 case X86::JE_1: return X86::JE_4;
147 case X86::JGE_1: return X86::JGE_4;
148 case X86::JG_1: return X86::JG_4;
149 case X86::JLE_1: return X86::JLE_4;
150 case X86::JL_1: return X86::JL_4;
151 case X86::JMP_1: return X86::JMP_4;
152 case X86::JNE_1: return X86::JNE_4;
153 case X86::JNO_1: return X86::JNO_4;
154 case X86::JNP_1: return X86::JNP_4;
155 case X86::JNS_1: return X86::JNS_4;
156 case X86::JO_1: return X86::JO_4;
157 case X86::JP_1: return X86::JP_4;
158 case X86::JS_1: return X86::JS_4;
162 static unsigned getRelaxedOpcodeArith(unsigned Op) {
168 case X86::IMUL16rri8: return X86::IMUL16rri;
169 case X86::IMUL16rmi8: return X86::IMUL16rmi;
170 case X86::IMUL32rri8: return X86::IMUL32rri;
171 case X86::IMUL32rmi8: return X86::IMUL32rmi;
172 case X86::IMUL64rri8: return X86::IMUL64rri32;
173 case X86::IMUL64rmi8: return X86::IMUL64rmi32;
176 case X86::AND16ri8: return X86::AND16ri;
177 case X86::AND16mi8: return X86::AND16mi;
178 case X86::AND32ri8: return X86::AND32ri;
179 case X86::AND32mi8: return X86::AND32mi;
180 case X86::AND64ri8: return X86::AND64ri32;
181 case X86::AND64mi8: return X86::AND64mi32;
184 case X86::OR16ri8: return X86::OR16ri;
185 case X86::OR16mi8: return X86::OR16mi;
186 case X86::OR32ri8: return X86::OR32ri;
187 case X86::OR32mi8: return X86::OR32mi;
188 case X86::OR64ri8: return X86::OR64ri32;
189 case X86::OR64mi8: return X86::OR64mi32;
192 case X86::XOR16ri8: return X86::XOR16ri;
193 case X86::XOR16mi8: return X86::XOR16mi;
194 case X86::XOR32ri8: return X86::XOR32ri;
195 case X86::XOR32mi8: return X86::XOR32mi;
196 case X86::XOR64ri8: return X86::XOR64ri32;
197 case X86::XOR64mi8: return X86::XOR64mi32;
200 case X86::ADD16ri8: return X86::ADD16ri;
201 case X86::ADD16mi8: return X86::ADD16mi;
202 case X86::ADD32ri8: return X86::ADD32ri;
203 case X86::ADD32mi8: return X86::ADD32mi;
204 case X86::ADD64ri8: return X86::ADD64ri32;
205 case X86::ADD64mi8: return X86::ADD64mi32;
208 case X86::SUB16ri8: return X86::SUB16ri;
209 case X86::SUB16mi8: return X86::SUB16mi;
210 case X86::SUB32ri8: return X86::SUB32ri;
211 case X86::SUB32mi8: return X86::SUB32mi;
212 case X86::SUB64ri8: return X86::SUB64ri32;
213 case X86::SUB64mi8: return X86::SUB64mi32;
216 case X86::CMP16ri8: return X86::CMP16ri;
217 case X86::CMP16mi8: return X86::CMP16mi;
218 case X86::CMP32ri8: return X86::CMP32ri;
219 case X86::CMP32mi8: return X86::CMP32mi;
220 case X86::CMP64ri8: return X86::CMP64ri32;
221 case X86::CMP64mi8: return X86::CMP64mi32;
224 case X86::PUSH32i8: return X86::PUSHi32;
225 case X86::PUSH16i8: return X86::PUSHi16;
226 case X86::PUSH64i8: return X86::PUSH64i32;
227 case X86::PUSH64i16: return X86::PUSH64i32;
231 static unsigned getRelaxedOpcode(unsigned Op) {
232 unsigned R = getRelaxedOpcodeArith(Op);
235 return getRelaxedOpcodeBranch(Op);
238 bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
239 // Branches can always be relaxed.
240 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
243 if (MCDisableArithRelaxation)
246 // Check if this instruction is ever relaxable.
247 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
251 // Check if it has an expression and is not RIP relative.
254 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
255 const MCOperand &Op = Inst.getOperand(i);
259 if (Op.isReg() && Op.getReg() == X86::RIP)
263 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
264 // how we do relaxations?
265 return hasExp && !hasRIP;
268 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
270 const MCRelaxableFragment *DF,
271 const MCAsmLayout &Layout) const {
272 // Relax if the value is too big for a (signed) i8.
273 return int64_t(Value) != int64_t(int8_t(Value));
276 // FIXME: Can tblgen help at all here to verify there aren't other instructions
278 void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
279 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
280 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
282 if (RelaxedOp == Inst.getOpcode()) {
283 SmallString<256> Tmp;
284 raw_svector_ostream OS(Tmp);
285 Inst.dump_pretty(OS);
287 report_fatal_error("unexpected instruction to relax: " + OS.str());
291 Res.setOpcode(RelaxedOp);
294 /// \brief Write a sequence of optimal nops to the output, covering \p Count
296 /// \return - true on success, false on failure
297 bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
298 static const uint8_t Nops[10][10] = {
306 {0x0f, 0x1f, 0x40, 0x00},
307 // nopl 0(%[re]ax,%[re]ax,1)
308 {0x0f, 0x1f, 0x44, 0x00, 0x00},
309 // nopw 0(%[re]ax,%[re]ax,1)
310 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
312 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
313 // nopl 0L(%[re]ax,%[re]ax,1)
314 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
315 // nopw 0L(%[re]ax,%[re]ax,1)
316 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
317 // nopw %cs:0L(%[re]ax,%[re]ax,1)
318 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
321 // This CPU doesn't support long nops. If needed add more.
322 // FIXME: Can we get this from the subtarget somehow?
323 // FIXME: We could generated something better than plain 0x90.
325 for (uint64_t i = 0; i < Count; ++i)
330 // 15 is the longest single nop instruction. Emit as many 15-byte nops as
331 // needed, then emit a nop of the remaining length.
333 const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15);
334 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
335 for (uint8_t i = 0; i < Prefixes; i++)
337 const uint8_t Rest = ThisNopLength - Prefixes;
338 for (uint8_t i = 0; i < Rest; i++)
339 OW->Write8(Nops[Rest - 1][i]);
340 Count -= ThisNopLength;
341 } while (Count != 0);
350 class ELFX86AsmBackend : public X86AsmBackend {
353 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU)
354 : X86AsmBackend(T, CPU), OSABI(_OSABI) {}
357 class ELFX86_32AsmBackend : public ELFX86AsmBackend {
359 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
360 : ELFX86AsmBackend(T, OSABI, CPU) {}
362 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
363 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
367 class ELFX86_64AsmBackend : public ELFX86AsmBackend {
369 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
370 : ELFX86AsmBackend(T, OSABI, CPU) {}
372 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
373 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
377 class WindowsX86AsmBackend : public X86AsmBackend {
381 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU)
382 : X86AsmBackend(T, CPU)
386 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
387 return createX86WinCOFFObjectWriter(OS, Is64Bit);
393 /// Compact unwind encoding values.
394 enum CompactUnwindEncodings {
395 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
396 /// the return address, then [RE]SP is moved to [RE]BP.
397 UNWIND_MODE_BP_FRAME = 0x01000000,
399 /// A frameless function with a small constant stack size.
400 UNWIND_MODE_STACK_IMMD = 0x02000000,
402 /// A frameless function with a large constant stack size.
403 UNWIND_MODE_STACK_IND = 0x03000000,
405 /// No compact unwind encoding is available.
406 UNWIND_MODE_DWARF = 0x04000000,
408 /// Mask for encoding the frame registers.
409 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
411 /// Mask for encoding the frameless registers.
412 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
415 } // end CU namespace
417 class DarwinX86AsmBackend : public X86AsmBackend {
418 const MCRegisterInfo &MRI;
420 /// \brief Number of registers that can be saved in a compact unwind encoding.
421 enum { CU_NUM_SAVED_REGS = 6 };
423 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
426 unsigned OffsetSize; ///< Offset of a "push" instruction.
427 unsigned PushInstrSize; ///< Size of a "push" instruction.
428 unsigned MoveInstrSize; ///< Size of a "move" instruction.
429 unsigned StackDivide; ///< Amount to adjust stack stize by.
431 /// \brief Implementation of algorithm to generate the compact unwind encoding
432 /// for the CFI instructions.
434 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const {
435 if (Instrs.empty()) return 0;
437 // Reset the saved registers.
438 unsigned SavedRegIdx = 0;
439 memset(SavedRegs, 0, sizeof(SavedRegs));
443 // Encode that we are using EBP/RBP as the frame pointer.
444 uint32_t CompactUnwindEncoding = 0;
446 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
447 unsigned InstrOffset = 0;
448 unsigned StackAdjust = 0;
449 unsigned StackSize = 0;
450 unsigned PrevStackSize = 0;
451 unsigned NumDefCFAOffsets = 0;
453 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
454 const MCCFIInstruction &Inst = Instrs[i];
456 switch (Inst.getOperation()) {
458 // Any other CFI directives indicate a frame that we aren't prepared
459 // to represent via compact unwind, so just bail out.
461 case MCCFIInstruction::OpDefCfaRegister: {
462 // Defines a frame pointer. E.g.
466 // .cfi_def_cfa_register %rbp
469 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) ==
470 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!");
473 memset(SavedRegs, 0, sizeof(SavedRegs));
476 InstrOffset += MoveInstrSize;
479 case MCCFIInstruction::OpDefCfaOffset: {
480 // Defines a new offset for the CFA. E.g.
486 // .cfi_def_cfa_offset 16
492 // .cfi_def_cfa_offset 80
494 PrevStackSize = StackSize;
495 StackSize = std::abs(Inst.getOffset()) / StackDivide;
499 case MCCFIInstruction::OpOffset: {
500 // Defines a "push" of a callee-saved register. E.g.
508 // .cfi_offset %rbx, -40
509 // .cfi_offset %r14, -32
510 // .cfi_offset %r15, -24
512 if (SavedRegIdx == CU_NUM_SAVED_REGS)
513 // If there are too many saved registers, we cannot use a compact
515 return CU::UNWIND_MODE_DWARF;
517 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
518 SavedRegs[SavedRegIdx++] = Reg;
519 StackAdjust += OffsetSize;
520 InstrOffset += PushInstrSize;
526 StackAdjust /= StackDivide;
529 if ((StackAdjust & 0xFF) != StackAdjust)
530 // Offset was too big for a compact unwind encoding.
531 return CU::UNWIND_MODE_DWARF;
533 // Get the encoding of the saved registers when we have a frame pointer.
534 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
535 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
537 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
538 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
539 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
541 // If the amount of the stack allocation is the size of a register, then
542 // we "push" the RAX/EAX register onto the stack instead of adjusting the
543 // stack pointer with a SUB instruction. We don't support the push of the
544 // RAX/EAX register with compact unwind. So we check for that situation
546 if ((NumDefCFAOffsets == SavedRegIdx + 1 &&
547 StackSize - PrevStackSize == 1) ||
548 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2))
549 return CU::UNWIND_MODE_DWARF;
551 SubtractInstrIdx += InstrOffset;
554 if ((StackSize & 0xFF) == StackSize) {
555 // Frameless stack with a small stack size.
556 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
558 // Encode the stack size.
559 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
561 if ((StackAdjust & 0x7) != StackAdjust)
562 // The extra stack adjustments are too big for us to handle.
563 return CU::UNWIND_MODE_DWARF;
565 // Frameless stack with an offset too large for us to encode compactly.
566 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
568 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
570 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
572 // Encode any extra stack stack adjustments (done via push
574 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
577 // Encode the number of registers saved. (Reverse the list first.)
578 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
579 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
581 // Get the encoding of the saved registers when we don't have a frame
583 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
584 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
586 // Encode the register encoding.
587 CompactUnwindEncoding |=
588 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
591 return CompactUnwindEncoding;
595 /// \brief Get the compact unwind number for a given register. The number
596 /// corresponds to the enum lists in compact_unwind_encoding.h.
597 int getCompactUnwindRegNum(unsigned Reg) const {
598 static const uint16_t CU32BitRegs[7] = {
599 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
601 static const uint16_t CU64BitRegs[] = {
602 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
604 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
605 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
612 /// \brief Return the registers encoded for a compact encoding with a frame
614 uint32_t encodeCompactUnwindRegistersWithFrame() const {
615 // Encode the registers in the order they were saved --- 3-bits per
616 // register. The list of saved registers is assumed to be in reverse
617 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
619 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
620 unsigned Reg = SavedRegs[i];
623 int CURegNum = getCompactUnwindRegNum(Reg);
624 if (CURegNum == -1) return ~0U;
626 // Encode the 3-bit register number in order, skipping over 3-bits for
628 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
631 assert((RegEnc & 0x3FFFF) == RegEnc &&
632 "Invalid compact register encoding!");
636 /// \brief Create the permutation encoding used with frameless stacks. It is
637 /// passed the number of registers to be saved and an array of the registers
639 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
640 // The saved registers are numbered from 1 to 6. In order to encode the
641 // order in which they were saved, we re-number them according to their
642 // place in the register order. The re-numbering is relative to the last
643 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
653 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
654 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
655 if (CUReg == -1) return ~0U;
656 SavedRegs[i] = CUReg;
660 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
662 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
663 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
664 unsigned Countless = 0;
665 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
666 if (SavedRegs[j] < SavedRegs[i])
669 RenumRegs[i] = SavedRegs[i] - Countless - 1;
672 // Take the renumbered values and encode them into a 10-bit number.
673 uint32_t permutationEncoding = 0;
676 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
677 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
681 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
682 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
686 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
687 + 3 * RenumRegs[4] + RenumRegs[5];
690 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
694 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
697 permutationEncoding |= RenumRegs[5];
701 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
702 "Invalid compact register encoding!");
703 return permutationEncoding;
707 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU,
709 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) {
710 memset(SavedRegs, 0, sizeof(SavedRegs));
711 OffsetSize = Is64Bit ? 8 : 4;
712 MoveInstrSize = Is64Bit ? 3 : 2;
713 StackDivide = Is64Bit ? 8 : 4;
718 class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
721 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
722 StringRef CPU, bool SupportsCU)
723 : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {}
725 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
726 return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
727 MachO::CPU_TYPE_I386,
728 MachO::CPU_SUBTYPE_I386_ALL);
731 /// \brief Generate the compact unwind encoding for the CFI instructions.
732 uint32_t generateCompactUnwindEncoding(
733 ArrayRef<MCCFIInstruction> Instrs) const override {
734 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
738 class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
740 const MachO::CPUSubTypeX86 Subtype;
742 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
743 StringRef CPU, bool SupportsCU,
744 MachO::CPUSubTypeX86 st)
745 : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU),
749 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
750 return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
751 MachO::CPU_TYPE_X86_64, Subtype);
754 bool doesSectionRequireSymbols(const MCSection &Section) const override {
755 // Temporary labels in the string literals sections require symbols. The
756 // issue is that the x86_64 relocation format does not allow symbol +
757 // offset, and so the linker does not have enough information to resolve the
758 // access to the appropriate atom unless an external relocation is used. For
759 // non-cstring sections, we expect the compiler to use a non-temporary label
760 // for anything that could have an addend pointing outside the symbol.
762 // See <rdar://problem/4765733>.
763 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
764 return SMO.getType() == MachO::S_CSTRING_LITERALS;
767 bool isSectionAtomizable(const MCSection &Section) const override {
768 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
769 // Fixed sized data sections are uniqued, they cannot be diced into atoms.
770 switch (SMO.getType()) {
774 case MachO::S_4BYTE_LITERALS:
775 case MachO::S_8BYTE_LITERALS:
776 case MachO::S_16BYTE_LITERALS:
777 case MachO::S_LITERAL_POINTERS:
778 case MachO::S_NON_LAZY_SYMBOL_POINTERS:
779 case MachO::S_LAZY_SYMBOL_POINTERS:
780 case MachO::S_MOD_INIT_FUNC_POINTERS:
781 case MachO::S_MOD_TERM_FUNC_POINTERS:
782 case MachO::S_INTERPOSING:
787 /// \brief Generate the compact unwind encoding for the CFI instructions.
788 uint32_t generateCompactUnwindEncoding(
789 ArrayRef<MCCFIInstruction> Instrs) const override {
790 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
794 } // end anonymous namespace
796 MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
797 const MCRegisterInfo &MRI,
800 Triple TheTriple(TT);
802 if (TheTriple.isOSBinFormatMachO())
803 return new DarwinX86_32AsmBackend(T, MRI, CPU,
804 TheTriple.isMacOSX() &&
805 !TheTriple.isMacOSXVersionLT(10, 7));
807 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
808 return new WindowsX86AsmBackend(T, false, CPU);
810 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
811 return new ELFX86_32AsmBackend(T, OSABI, CPU);
814 MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
815 const MCRegisterInfo &MRI,
818 Triple TheTriple(TT);
820 if (TheTriple.isOSBinFormatMachO()) {
821 MachO::CPUSubTypeX86 CS =
822 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
823 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H)
824 .Default(MachO::CPU_SUBTYPE_X86_64_ALL);
825 return new DarwinX86_64AsmBackend(T, MRI, CPU,
826 TheTriple.isMacOSX() &&
827 !TheTriple.isMacOSXVersionLT(10, 7), CS);
830 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
831 return new WindowsX86AsmBackend(T, true, CPU);
833 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
834 return new ELFX86_64AsmBackend(T, OSABI, CPU);