1 //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "MCTargetDesc/X86FixupKinds.h"
12 #include "llvm/MC/MCAsmBackend.h"
13 #include "llvm/MC/MCAssembler.h"
14 #include "llvm/MC/MCELFObjectWriter.h"
15 #include "llvm/MC/MCExpr.h"
16 #include "llvm/MC/MCFixupKindInfo.h"
17 #include "llvm/MC/MCMachObjectWriter.h"
18 #include "llvm/MC/MCObjectWriter.h"
19 #include "llvm/MC/MCSectionCOFF.h"
20 #include "llvm/MC/MCSectionELF.h"
21 #include "llvm/MC/MCSectionMachO.h"
22 #include "llvm/Object/MachOFormat.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ELF.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/TargetRegistry.h"
27 #include "llvm/Support/raw_ostream.h"
30 // Option to allow disabling arithmetic relaxation to workaround PR9807, which
31 // is useful when running bitwise comparison experiments on Darwin. We should be
32 // able to remove this once PR9807 is resolved.
34 MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
35 cl::desc("Disable relaxation of arithmetic instruction for X86"));
37 static unsigned getFixupKindLog2Size(unsigned Kind) {
39 default: llvm_unreachable("invalid fixup kind!");
42 case FK_Data_1: return 0;
45 case FK_Data_2: return 1;
47 case X86::reloc_riprel_4byte:
48 case X86::reloc_riprel_4byte_movq_load:
49 case X86::reloc_signed_4byte:
50 case X86::reloc_global_offset_table:
52 case FK_Data_4: return 2;
55 case FK_Data_8: return 3;
61 class X86ELFObjectWriter : public MCELFObjectTargetWriter {
63 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
64 bool HasRelocationAddend, bool foobar)
65 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
68 class X86AsmBackend : public MCAsmBackend {
70 X86AsmBackend(const Target &T)
73 unsigned getNumFixupKinds() const {
74 return X86::NumTargetFixupKinds;
77 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
78 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
79 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
80 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
81 { "reloc_signed_4byte", 0, 4 * 8, 0},
82 { "reloc_global_offset_table", 0, 4 * 8, 0}
85 if (Kind < FirstTargetFixupKind)
86 return MCAsmBackend::getFixupKindInfo(Kind);
88 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
90 return Infos[Kind - FirstTargetFixupKind];
93 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
94 uint64_t Value) const {
95 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
97 assert(Fixup.getOffset() + Size <= DataSize &&
98 "Invalid fixup offset!");
100 // Check that uppper bits are either all zeros or all ones.
101 // Specifically ignore overflow/underflow as long as the leakage is
102 // limited to the lower bits. This is to remain compatible with
104 assert(isIntN(Size * 8 + 1, Value) &&
105 "Value does not fit in the Fixup field");
107 for (unsigned i = 0; i != Size; ++i)
108 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
111 bool mayNeedRelaxation(const MCInst &Inst) const;
113 bool fixupNeedsRelaxation(const MCFixup &Fixup,
115 const MCInstFragment *DF,
116 const MCAsmLayout &Layout) const;
118 void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
120 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
122 } // end anonymous namespace
124 static unsigned getRelaxedOpcodeBranch(unsigned Op) {
129 case X86::JAE_1: return X86::JAE_4;
130 case X86::JA_1: return X86::JA_4;
131 case X86::JBE_1: return X86::JBE_4;
132 case X86::JB_1: return X86::JB_4;
133 case X86::JE_1: return X86::JE_4;
134 case X86::JGE_1: return X86::JGE_4;
135 case X86::JG_1: return X86::JG_4;
136 case X86::JLE_1: return X86::JLE_4;
137 case X86::JL_1: return X86::JL_4;
138 case X86::JMP_1: return X86::JMP_4;
139 case X86::JNE_1: return X86::JNE_4;
140 case X86::JNO_1: return X86::JNO_4;
141 case X86::JNP_1: return X86::JNP_4;
142 case X86::JNS_1: return X86::JNS_4;
143 case X86::JO_1: return X86::JO_4;
144 case X86::JP_1: return X86::JP_4;
145 case X86::JS_1: return X86::JS_4;
149 static unsigned getRelaxedOpcodeArith(unsigned Op) {
155 case X86::IMUL16rri8: return X86::IMUL16rri;
156 case X86::IMUL16rmi8: return X86::IMUL16rmi;
157 case X86::IMUL32rri8: return X86::IMUL32rri;
158 case X86::IMUL32rmi8: return X86::IMUL32rmi;
159 case X86::IMUL64rri8: return X86::IMUL64rri32;
160 case X86::IMUL64rmi8: return X86::IMUL64rmi32;
163 case X86::AND16ri8: return X86::AND16ri;
164 case X86::AND16mi8: return X86::AND16mi;
165 case X86::AND32ri8: return X86::AND32ri;
166 case X86::AND32mi8: return X86::AND32mi;
167 case X86::AND64ri8: return X86::AND64ri32;
168 case X86::AND64mi8: return X86::AND64mi32;
171 case X86::OR16ri8: return X86::OR16ri;
172 case X86::OR16mi8: return X86::OR16mi;
173 case X86::OR32ri8: return X86::OR32ri;
174 case X86::OR32mi8: return X86::OR32mi;
175 case X86::OR64ri8: return X86::OR64ri32;
176 case X86::OR64mi8: return X86::OR64mi32;
179 case X86::XOR16ri8: return X86::XOR16ri;
180 case X86::XOR16mi8: return X86::XOR16mi;
181 case X86::XOR32ri8: return X86::XOR32ri;
182 case X86::XOR32mi8: return X86::XOR32mi;
183 case X86::XOR64ri8: return X86::XOR64ri32;
184 case X86::XOR64mi8: return X86::XOR64mi32;
187 case X86::ADD16ri8: return X86::ADD16ri;
188 case X86::ADD16mi8: return X86::ADD16mi;
189 case X86::ADD32ri8: return X86::ADD32ri;
190 case X86::ADD32mi8: return X86::ADD32mi;
191 case X86::ADD64ri8: return X86::ADD64ri32;
192 case X86::ADD64mi8: return X86::ADD64mi32;
195 case X86::SUB16ri8: return X86::SUB16ri;
196 case X86::SUB16mi8: return X86::SUB16mi;
197 case X86::SUB32ri8: return X86::SUB32ri;
198 case X86::SUB32mi8: return X86::SUB32mi;
199 case X86::SUB64ri8: return X86::SUB64ri32;
200 case X86::SUB64mi8: return X86::SUB64mi32;
203 case X86::CMP16ri8: return X86::CMP16ri;
204 case X86::CMP16mi8: return X86::CMP16mi;
205 case X86::CMP32ri8: return X86::CMP32ri;
206 case X86::CMP32mi8: return X86::CMP32mi;
207 case X86::CMP64ri8: return X86::CMP64ri32;
208 case X86::CMP64mi8: return X86::CMP64mi32;
211 case X86::PUSHi8: return X86::PUSHi32;
212 case X86::PUSHi16: return X86::PUSHi32;
213 case X86::PUSH64i8: return X86::PUSH64i32;
214 case X86::PUSH64i16: return X86::PUSH64i32;
218 static unsigned getRelaxedOpcode(unsigned Op) {
219 unsigned R = getRelaxedOpcodeArith(Op);
222 return getRelaxedOpcodeBranch(Op);
225 bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
226 // Branches can always be relaxed.
227 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
230 if (MCDisableArithRelaxation)
233 // Check if this instruction is ever relaxable.
234 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
238 // Check if it has an expression and is not RIP relative.
241 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
242 const MCOperand &Op = Inst.getOperand(i);
246 if (Op.isReg() && Op.getReg() == X86::RIP)
250 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
251 // how we do relaxations?
252 return hasExp && !hasRIP;
255 bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
257 const MCInstFragment *DF,
258 const MCAsmLayout &Layout) const {
259 // Relax if the value is too big for a (signed) i8.
260 return int64_t(Value) != int64_t(int8_t(Value));
263 // FIXME: Can tblgen help at all here to verify there aren't other instructions
265 void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
266 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
267 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
269 if (RelaxedOp == Inst.getOpcode()) {
270 SmallString<256> Tmp;
271 raw_svector_ostream OS(Tmp);
272 Inst.dump_pretty(OS);
274 report_fatal_error("unexpected instruction to relax: " + OS.str());
278 Res.setOpcode(RelaxedOp);
281 /// writeNopData - Write optimal nops to the output file for the \p Count
282 /// bytes. This returns the number of bytes written. It may return 0 if
283 /// the \p Count is more than the maximum optimal nops.
284 bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
285 static const uint8_t Nops[10][10] = {
293 {0x0f, 0x1f, 0x40, 0x00},
294 // nopl 0(%[re]ax,%[re]ax,1)
295 {0x0f, 0x1f, 0x44, 0x00, 0x00},
296 // nopw 0(%[re]ax,%[re]ax,1)
297 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
299 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
300 // nopl 0L(%[re]ax,%[re]ax,1)
301 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
302 // nopw 0L(%[re]ax,%[re]ax,1)
303 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
304 // nopw %cs:0L(%[re]ax,%[re]ax,1)
305 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
308 // Write an optimal sequence for the first 15 bytes.
309 const uint64_t OptimalCount = (Count < 16) ? Count : 15;
310 const uint64_t Prefixes = OptimalCount <= 10 ? 0 : OptimalCount - 10;
311 for (uint64_t i = 0, e = Prefixes; i != e; i++)
313 const uint64_t Rest = OptimalCount - Prefixes;
314 for (uint64_t i = 0, e = Rest; i != e; i++)
315 OW->Write8(Nops[Rest - 1][i]);
317 // Finish with single byte nops.
318 for (uint64_t i = OptimalCount, e = Count; i != e; ++i)
327 class ELFX86AsmBackend : public X86AsmBackend {
330 ELFX86AsmBackend(const Target &T, uint8_t _OSABI)
331 : X86AsmBackend(T), OSABI(_OSABI) {
332 HasReliableSymbolDifference = true;
335 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
336 const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section);
337 return ES.getFlags() & ELF::SHF_MERGE;
341 class ELFX86_32AsmBackend : public ELFX86AsmBackend {
343 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI)
344 : ELFX86AsmBackend(T, OSABI) {}
346 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
347 return createX86ELFObjectWriter(OS, /*Is64Bit*/ false, OSABI);
351 class ELFX86_64AsmBackend : public ELFX86AsmBackend {
353 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI)
354 : ELFX86AsmBackend(T, OSABI) {}
356 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
357 return createX86ELFObjectWriter(OS, /*Is64Bit*/ true, OSABI);
361 class WindowsX86AsmBackend : public X86AsmBackend {
365 WindowsX86AsmBackend(const Target &T, bool is64Bit)
370 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
371 return createX86WinCOFFObjectWriter(OS, Is64Bit);
375 class DarwinX86AsmBackend : public X86AsmBackend {
377 DarwinX86AsmBackend(const Target &T)
378 : X86AsmBackend(T) { }
381 class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
383 DarwinX86_32AsmBackend(const Target &T)
384 : DarwinX86AsmBackend(T) {}
386 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
387 return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
388 object::mach::CTM_i386,
389 object::mach::CSX86_ALL);
393 class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
395 DarwinX86_64AsmBackend(const Target &T)
396 : DarwinX86AsmBackend(T) {
397 HasReliableSymbolDifference = true;
400 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
401 return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
402 object::mach::CTM_x86_64,
403 object::mach::CSX86_ALL);
406 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
407 // Temporary labels in the string literals sections require symbols. The
408 // issue is that the x86_64 relocation format does not allow symbol +
409 // offset, and so the linker does not have enough information to resolve the
410 // access to the appropriate atom unless an external relocation is used. For
411 // non-cstring sections, we expect the compiler to use a non-temporary label
412 // for anything that could have an addend pointing outside the symbol.
414 // See <rdar://problem/4765733>.
415 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
416 return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
419 virtual bool isSectionAtomizable(const MCSection &Section) const {
420 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
421 // Fixed sized data sections are uniqued, they cannot be diced into atoms.
422 switch (SMO.getType()) {
426 case MCSectionMachO::S_4BYTE_LITERALS:
427 case MCSectionMachO::S_8BYTE_LITERALS:
428 case MCSectionMachO::S_16BYTE_LITERALS:
429 case MCSectionMachO::S_LITERAL_POINTERS:
430 case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
431 case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
432 case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
433 case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
434 case MCSectionMachO::S_INTERPOSING:
440 } // end anonymous namespace
442 MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, StringRef TT) {
443 Triple TheTriple(TT);
445 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
446 return new DarwinX86_32AsmBackend(T);
448 if (TheTriple.isOSWindows())
449 return new WindowsX86AsmBackend(T, false);
451 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
452 return new ELFX86_32AsmBackend(T, OSABI);
455 MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, StringRef TT) {
456 Triple TheTriple(TT);
458 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
459 return new DarwinX86_64AsmBackend(T);
461 if (TheTriple.isOSWindows())
462 return new WindowsX86AsmBackend(T, true);
464 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
465 return new ELFX86_64AsmBackend(T, OSABI);