1 //===-- X86JITInfo.cpp - Implement the JIT interfaces for the X86 target --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the JIT interfaces for the X86 target.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "jit"
15 #include "X86JITInfo.h"
16 #include "X86Relocations.h"
17 #include "X86Subtarget.h"
18 #include "llvm/Function.h"
19 #include "llvm/Config/alloca.h"
20 #include "llvm/Support/Compiler.h"
25 // Determine the platform we're running on
26 #if defined (__x86_64__) || defined (_M_AMD64)
28 #elif defined(__i386__) || defined(i386) || defined(_M_IX86)
32 void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
33 unsigned char *OldByte = (unsigned char *)Old;
34 *OldByte++ = 0xE9; // Emit JMP opcode.
35 unsigned *OldWord = (unsigned *)OldByte;
36 unsigned NewAddr = (intptr_t)New;
37 unsigned OldAddr = (intptr_t)OldWord;
38 *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
42 /// JITCompilerFunction - This contains the address of the JIT function used to
43 /// compile a function lazily.
44 static TargetJITInfo::JITCompilerFn JITCompilerFunction;
46 // Get the ASMPREFIX for the current host. This is often '_'.
47 #ifndef __USER_LABEL_PREFIX__
48 #define __USER_LABEL_PREFIX__
50 #define GETASMPREFIX2(X) #X
51 #define GETASMPREFIX(X) GETASMPREFIX2(X)
52 #define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)
54 // Check if building with -fPIC
55 #if defined(__PIC__) && __PIC__ && defined(__linux__)
56 #define ASMCALLSUFFIX "@PLT"
61 // For ELF targets, use a .size and .type directive, to let tools
62 // know the extent of functions defined in assembler.
64 # define SIZE(sym) ".size " #sym ", . - " #sym "\n"
65 # define TYPE_FUNCTION(sym) ".type " #sym ", @function\n"
68 # define TYPE_FUNCTION(sym)
71 // Provide a convenient way for disabling usage of CFI directives.
72 // This is needed for old/broken assemblers (for example, gas on
73 // Darwin is pretty old and doesn't support these directives)
74 #if defined(__APPLE__)
77 // FIXME: Disable this until we really want to use it. Also, we will
78 // need to add some workarounds for compilers, which support
79 // only subset of these directives.
83 // Provide a wrapper for X86CompilationCallback2 that saves non-traditional
84 // callee saved registers, for the fastcc calling convention.
86 #if defined(X86_64_JIT)
88 // No need to save EAX/EDX for X86-64.
89 void X86CompilationCallback(void);
93 ".globl " ASMPREFIX "X86CompilationCallback\n"
94 TYPE_FUNCTION(X86CompilationCallback)
95 ASMPREFIX "X86CompilationCallback:\n"
96 CFI(".cfi_startproc\n")
99 CFI(".cfi_def_cfa_offset 16\n")
100 CFI(".cfi_offset %rbp, -16\n")
103 CFI(".cfi_def_cfa_register %rbp\n")
104 // Save all int arg registers
106 CFI(".cfi_rel_offset %rdi, 0\n")
108 CFI(".cfi_rel_offset %rsi, 8\n")
110 CFI(".cfi_rel_offset %rdx, 16\n")
112 CFI(".cfi_rel_offset %rcx, 24\n")
114 CFI(".cfi_rel_offset %r8, 32\n")
116 CFI(".cfi_rel_offset %r9, 40\n")
117 // Align stack on 16-byte boundary. ESP might not be properly aligned
118 // (8 byte) if this is called from an indirect stub.
120 // Save all XMM arg registers
122 "movaps %xmm0, (%rsp)\n"
123 "movaps %xmm1, 16(%rsp)\n"
124 "movaps %xmm2, 32(%rsp)\n"
125 "movaps %xmm3, 48(%rsp)\n"
126 "movaps %xmm4, 64(%rsp)\n"
127 "movaps %xmm5, 80(%rsp)\n"
128 "movaps %xmm6, 96(%rsp)\n"
129 "movaps %xmm7, 112(%rsp)\n"
131 "movq %rbp, %rdi\n" // Pass prev frame and return address
132 "movq 8(%rbp), %rsi\n"
133 "call " ASMPREFIX "X86CompilationCallback2" ASMCALLSUFFIX "\n"
134 // Restore all XMM arg registers
135 "movaps 112(%rsp), %xmm7\n"
136 "movaps 96(%rsp), %xmm6\n"
137 "movaps 80(%rsp), %xmm5\n"
138 "movaps 64(%rsp), %xmm4\n"
139 "movaps 48(%rsp), %xmm3\n"
140 "movaps 32(%rsp), %xmm2\n"
141 "movaps 16(%rsp), %xmm1\n"
142 "movaps (%rsp), %xmm0\n"
145 CFI(".cfi_def_cfa_register %rsp\n")
146 // Restore all int arg registers
148 CFI(".cfi_adjust_cfa_offset 48\n")
150 CFI(".cfi_adjust_cfa_offset -8\n")
151 CFI(".cfi_restore %r9\n")
153 CFI(".cfi_adjust_cfa_offset -8\n")
154 CFI(".cfi_restore %r8\n")
156 CFI(".cfi_adjust_cfa_offset -8\n")
157 CFI(".cfi_restore %rcx\n")
159 CFI(".cfi_adjust_cfa_offset -8\n")
160 CFI(".cfi_restore %rdx\n")
162 CFI(".cfi_adjust_cfa_offset -8\n")
163 CFI(".cfi_restore %rsi\n")
165 CFI(".cfi_adjust_cfa_offset -8\n")
166 CFI(".cfi_restore %rdi\n")
169 CFI(".cfi_adjust_cfa_offset -8\n")
170 CFI(".cfi_restore %rbp\n")
172 CFI(".cfi_endproc\n")
173 SIZE(X86CompilationCallback)
176 // No inline assembler support on this platform. The routine is in external
178 void X86CompilationCallback();
181 #elif defined (X86_32_JIT)
183 void X86CompilationCallback(void);
187 ".globl " ASMPREFIX "X86CompilationCallback\n"
188 TYPE_FUNCTION(X86CompilationCallback)
189 ASMPREFIX "X86CompilationCallback:\n"
190 CFI(".cfi_startproc\n")
192 CFI(".cfi_def_cfa_offset 8\n")
193 CFI(".cfi_offset %ebp, -8\n")
194 "movl %esp, %ebp\n" // Standard prologue
195 CFI(".cfi_def_cfa_register %ebp\n")
197 CFI(".cfi_rel_offset %eax, 0\n")
198 "pushl %edx\n" // Save EAX/EDX/ECX
199 CFI(".cfi_rel_offset %edx, 4\n")
201 CFI(".cfi_rel_offset %ecx, 8\n")
202 # if defined(__APPLE__)
203 "andl $-16, %esp\n" // Align ESP on 16-byte boundary
206 "movl 4(%ebp), %eax\n" // Pass prev frame and return address
207 "movl %eax, 4(%esp)\n"
208 "movl %ebp, (%esp)\n"
209 "call " ASMPREFIX "X86CompilationCallback2" ASMCALLSUFFIX "\n"
210 "movl %ebp, %esp\n" // Restore ESP
211 CFI(".cfi_def_cfa_register %esp\n")
213 CFI(".cfi_adjust_cfa_offset 12\n")
215 CFI(".cfi_adjust_cfa_offset -4\n")
216 CFI(".cfi_restore %ecx\n")
218 CFI(".cfi_adjust_cfa_offset -4\n")
219 CFI(".cfi_restore %edx\n")
221 CFI(".cfi_adjust_cfa_offset -4\n")
222 CFI(".cfi_restore %eax\n")
224 CFI(".cfi_adjust_cfa_offset -4\n")
225 CFI(".cfi_restore %ebp\n")
227 CFI(".cfi_endproc\n")
228 SIZE(X86CompilationCallback)
231 // Same as X86CompilationCallback but also saves XMM argument registers.
232 void X86CompilationCallback_SSE(void);
236 ".globl " ASMPREFIX "X86CompilationCallback_SSE\n"
237 TYPE_FUNCTION(X86CompilationCallback_SSE)
238 ASMPREFIX "X86CompilationCallback_SSE:\n"
239 CFI(".cfi_startproc\n")
241 CFI(".cfi_def_cfa_offset 8\n")
242 CFI(".cfi_offset %ebp, -8\n")
243 "movl %esp, %ebp\n" // Standard prologue
244 CFI(".cfi_def_cfa_register %ebp\n")
246 CFI(".cfi_rel_offset %eax, 0\n")
247 "pushl %edx\n" // Save EAX/EDX/ECX
248 CFI(".cfi_rel_offset %edx, 4\n")
250 CFI(".cfi_rel_offset %ecx, 8\n")
251 "andl $-16, %esp\n" // Align ESP on 16-byte boundary
252 // Save all XMM arg registers
254 // FIXME: provide frame move information for xmm registers.
255 // This can be tricky, because CFA register is ebp (unaligned)
256 // and we need to produce offsets relative to it.
257 "movaps %xmm0, (%esp)\n"
258 "movaps %xmm1, 16(%esp)\n"
259 "movaps %xmm2, 32(%esp)\n"
260 "movaps %xmm3, 48(%esp)\n"
262 "movl 4(%ebp), %eax\n" // Pass prev frame and return address
263 "movl %eax, 4(%esp)\n"
264 "movl %ebp, (%esp)\n"
265 "call " ASMPREFIX "X86CompilationCallback2" ASMCALLSUFFIX "\n"
267 "movaps 48(%esp), %xmm3\n"
268 CFI(".cfi_restore %xmm3\n")
269 "movaps 32(%esp), %xmm2\n"
270 CFI(".cfi_restore %xmm2\n")
271 "movaps 16(%esp), %xmm1\n"
272 CFI(".cfi_restore %xmm1\n")
273 "movaps (%esp), %xmm0\n"
274 CFI(".cfi_restore %xmm0\n")
275 "movl %ebp, %esp\n" // Restore ESP
276 CFI(".cfi_def_cfa_register esp\n")
278 CFI(".cfi_adjust_cfa_offset 12\n")
280 CFI(".cfi_adjust_cfa_offset -4\n")
281 CFI(".cfi_restore %ecx\n")
283 CFI(".cfi_adjust_cfa_offset -4\n")
284 CFI(".cfi_restore %edx\n")
286 CFI(".cfi_adjust_cfa_offset -4\n")
287 CFI(".cfi_restore %eax\n")
289 CFI(".cfi_adjust_cfa_offset -4\n")
290 CFI(".cfi_restore %ebp\n")
292 CFI(".cfi_endproc\n")
293 SIZE(X86CompilationCallback_SSE)
296 void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr);
298 _declspec(naked) void X86CompilationCallback(void) {
306 mov eax, dword ptr [ebp+4]
307 mov dword ptr [esp+4], eax
308 mov dword ptr [esp], ebp
309 call X86CompilationCallback2
322 #else // Not an i386 host
323 void X86CompilationCallback() {
324 assert(0 && "Cannot call X86CompilationCallback() on a non-x86 arch!\n");
330 /// X86CompilationCallback2 - This is the target-specific function invoked by the
331 /// function stub when we did not know the real target of a call. This function
332 /// must locate the start of the stub or call site and pass it into the JIT
333 /// compiler function.
334 extern "C" void ATTRIBUTE_USED
335 X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
336 intptr_t *RetAddrLoc = &StackPtr[1];
337 assert(*RetAddrLoc == RetAddr &&
338 "Could not find return address on the stack!");
340 // It's a stub if there is an interrupt marker after the call.
341 bool isStub = ((unsigned char*)RetAddr)[0] == 0xCD;
343 // The call instruction should have pushed the return value onto the stack...
344 #if defined (X86_64_JIT)
345 RetAddr--; // Backtrack to the reference itself...
347 RetAddr -= 4; // Backtrack to the reference itself...
351 DOUT << "In callback! Addr=" << (void*)RetAddr
352 << " ESP=" << (void*)StackPtr
353 << ": Resolving call to function: "
354 << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n";
357 // Sanity check to make sure this really is a call instruction.
358 #if defined (X86_64_JIT)
359 assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!");
360 assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!");
362 assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!");
365 intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr);
367 // Rewrite the call target... so that we don't end up here every time we
369 #if defined (X86_64_JIT)
371 *(intptr_t *)(RetAddr - 0xa) = NewVal;
373 *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4);
377 // If this is a stub, rewrite the call into an unconditional branch
378 // instruction so that two return addresses are not pushed onto the stack
379 // when the requested function finally gets called. This also makes the
380 // 0xCD byte (interrupt) dead, so the marker doesn't effect anything.
381 #if defined (X86_64_JIT)
382 // If the target address is within 32-bit range of the stub, use a
383 // PC-relative branch instead of loading the actual address. (This is
384 // considerably shorter than the 64-bit immediate load already there.)
385 // We assume here intptr_t is 64 bits.
386 intptr_t diff = NewVal-RetAddr+7;
387 if (diff >= -2147483648LL && diff <= 2147483647LL) {
388 *(unsigned char*)(RetAddr-0xc) = 0xE9;
389 *(intptr_t *)(RetAddr-0xb) = diff & 0xffffffff;
391 *(intptr_t *)(RetAddr - 0xa) = NewVal;
392 ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
395 ((unsigned char*)RetAddr)[-1] = 0xE9;
399 // Change the return address to reexecute the call instruction...
400 #if defined (X86_64_JIT)
407 TargetJITInfo::LazyResolverFn
408 X86JITInfo::getLazyResolverFunction(JITCompilerFn F) {
409 JITCompilerFunction = F;
411 #if defined (X86_32_JIT) && !defined (_MSC_VER)
412 unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
418 if (!X86::GetCpuIDAndInfo(0, &EAX, text.u+0, text.u+2, text.u+1)) {
419 // FIXME: support for AMD family of processors.
420 if (memcmp(text.c, "GenuineIntel", 12) == 0) {
421 X86::GetCpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
422 if ((EDX >> 25) & 0x1)
423 return X86CompilationCallback_SSE;
428 return X86CompilationCallback;
431 void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
432 JITCodeEmitter &JCE) {
433 #if defined (X86_64_JIT)
434 JCE.startGVStub(GV, 8, 8);
435 JCE.emitWordLE((unsigned)(intptr_t)ptr);
436 JCE.emitWordLE((unsigned)(((intptr_t)ptr) >> 32));
438 JCE.startGVStub(GV, 4, 4);
439 JCE.emitWordLE((intptr_t)ptr);
441 return JCE.finishGVStub(GV);
444 void *X86JITInfo::emitFunctionStub(const Function* F, void *Fn,
445 JITCodeEmitter &JCE) {
446 // Note, we cast to intptr_t here to silence a -pedantic warning that
447 // complains about casting a function pointer to a normal pointer.
448 #if defined (X86_32_JIT) && !defined (_MSC_VER)
449 bool NotCC = (Fn != (void*)(intptr_t)X86CompilationCallback &&
450 Fn != (void*)(intptr_t)X86CompilationCallback_SSE);
452 bool NotCC = Fn != (void*)(intptr_t)X86CompilationCallback;
455 #if defined (X86_64_JIT)
456 JCE.startGVStub(F, 13, 4);
457 JCE.emitByte(0x49); // REX prefix
458 JCE.emitByte(0xB8+2); // movabsq r10
459 JCE.emitWordLE((unsigned)(intptr_t)Fn);
460 JCE.emitWordLE((unsigned)(((intptr_t)Fn) >> 32));
461 JCE.emitByte(0x41); // REX prefix
462 JCE.emitByte(0xFF); // jmpq *r10
463 JCE.emitByte(2 | (4 << 3) | (3 << 6));
465 JCE.startGVStub(F, 5, 4);
467 JCE.emitWordLE((intptr_t)Fn-JCE.getCurrentPCValue()-4);
469 return JCE.finishGVStub(F);
472 #if defined (X86_64_JIT)
473 JCE.startGVStub(F, 14, 4);
474 JCE.emitByte(0x49); // REX prefix
475 JCE.emitByte(0xB8+2); // movabsq r10
476 JCE.emitWordLE((unsigned)(intptr_t)Fn);
477 JCE.emitWordLE((unsigned)(((intptr_t)Fn) >> 32));
478 JCE.emitByte(0x41); // REX prefix
479 JCE.emitByte(0xFF); // callq *r10
480 JCE.emitByte(2 | (2 << 3) | (3 << 6));
482 JCE.startGVStub(F, 6, 4);
483 JCE.emitByte(0xE8); // Call with 32 bit pc-rel destination...
485 JCE.emitWordLE((intptr_t)Fn-JCE.getCurrentPCValue()-4);
488 JCE.emitByte(0xCD); // Interrupt - Just a marker identifying the stub!
489 return JCE.finishGVStub(F);
492 void X86JITInfo::emitFunctionStubAtAddr(const Function* F, void *Fn, void *Stub,
493 JITCodeEmitter &JCE) {
494 // Note, we cast to intptr_t here to silence a -pedantic warning that
495 // complains about casting a function pointer to a normal pointer.
496 JCE.startGVStub(F, Stub, 5);
498 #if defined (X86_64_JIT)
499 assert(((((intptr_t)Fn-JCE.getCurrentPCValue()-5) << 32) >> 32) ==
500 ((intptr_t)Fn-JCE.getCurrentPCValue()-5)
501 && "PIC displacement does not fit in displacement field!");
503 JCE.emitWordLE((intptr_t)Fn-JCE.getCurrentPCValue()-4);
507 /// getPICJumpTableEntry - Returns the value of the jumptable entry for the
508 /// specific basic block.
509 uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) {
510 #if defined(X86_64_JIT)
517 /// relocate - Before the JIT can run a block of code that has been emitted,
518 /// it must rewrite the code to contain the actual addresses of any
519 /// referenced global symbols.
520 void X86JITInfo::relocate(void *Function, MachineRelocation *MR,
521 unsigned NumRelocs, unsigned char* GOTBase) {
522 for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
523 void *RelocPos = (char*)Function + MR->getMachineCodeOffset();
524 intptr_t ResultPtr = (intptr_t)MR->getResultPointer();
525 switch ((X86::RelocationType)MR->getRelocationType()) {
526 case X86::reloc_pcrel_word: {
527 // PC relative relocation, add the relocated value to the value already in
528 // memory, after we adjust it for where the PC is.
529 ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal();
530 *((unsigned*)RelocPos) += (unsigned)ResultPtr;
533 case X86::reloc_picrel_word: {
534 // PIC base relative relocation, add the relocated value to the value
535 // already in memory, after we adjust it for where the PIC base is.
536 ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal());
537 *((unsigned*)RelocPos) += (unsigned)ResultPtr;
540 case X86::reloc_absolute_word:
541 // Absolute relocation, just add the relocated value to the value already
543 *((unsigned*)RelocPos) += (unsigned)ResultPtr;
545 case X86::reloc_absolute_dword:
546 *((intptr_t*)RelocPos) += ResultPtr;
552 char* X86JITInfo::allocateThreadLocalMemory(size_t size) {
553 #if defined(X86_32_JIT) && !defined(__APPLE__) && !defined(_MSC_VER)
557 assert(0 && "Cannot allocate thread local storage on this arch!\n");