X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86JITInfo.cpp;h=a082c4f8b0effc66c0344b321af7230d9034c3df;hb=5379c6930dc7b3ddb93a42384474cc0e65f7e4c5;hp=a14c155f174563426f8512bd7b48972e9ab8503f;hpb=0261d795f83a45dd53d82e511ae672d6d1f4e298;p=oota-llvm.git diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp index a14c155f174..a082c4f8b0e 100644 --- a/lib/Target/X86/X86JITInfo.cpp +++ b/lib/Target/X86/X86JITInfo.cpp @@ -11,18 +11,20 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "jit" #include "X86JITInfo.h" #include "X86Relocations.h" #include "X86Subtarget.h" #include "X86TargetMachine.h" -#include "llvm/Function.h" +#include "llvm/IR/Function.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/Valgrind.h" #include #include using namespace llvm; +#define DEBUG_TYPE "jit" + // Determine the platform we're running on #if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64) # define X86_64_JIT @@ -37,6 +39,10 @@ void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) { unsigned NewAddr = (intptr_t)New; unsigned OldAddr = (intptr_t)OldWord; *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code. + + // X86 doesn't need to invalidate the processor cache, so just invalidate + // Valgrind's cache directly. + sys::ValgrindDiscardTranslations(Old, 5); } @@ -74,7 +80,7 @@ static TargetJITInfo::JITCompilerFn JITCompilerFunction; # define CFI(x) #endif -// Provide a wrapper for X86CompilationCallback2 that saves non-traditional +// Provide a wrapper for LLVMX86CompilationCallback2 that saves non-traditional // callee saved registers, for the fastcc calling convention. extern "C" { #if defined(X86_64_JIT) @@ -122,9 +128,17 @@ extern "C" { "movaps %xmm6, 96(%rsp)\n" "movaps %xmm7, 112(%rsp)\n" // JIT callee +#if defined(_WIN64) || defined(__CYGWIN__) + "subq $32, %rsp\n" + "movq %rbp, %rcx\n" // Pass prev frame and return address + "movq 8(%rbp), %rdx\n" + "call " ASMPREFIX "LLVMX86CompilationCallback2\n" + "addq $32, %rsp\n" +#else "movq %rbp, %rdi\n" // Pass prev frame and return address "movq 8(%rbp), %rsi\n" - "call " ASMPREFIX "X86CompilationCallback2\n" + "call " ASMPREFIX "LLVMX86CompilationCallback2\n" +#endif // Restore all XMM arg registers "movaps 112(%rsp), %xmm7\n" "movaps 96(%rsp), %xmm6\n" @@ -200,7 +214,7 @@ extern "C" { "movl 4(%ebp), %eax\n" // Pass prev frame and return address "movl %eax, 4(%esp)\n" "movl %ebp, (%esp)\n" - "call " ASMPREFIX "X86CompilationCallback2\n" + "call " ASMPREFIX "LLVMX86CompilationCallback2\n" "movl %ebp, %esp\n" // Restore ESP CFI(".cfi_def_cfa_register %esp\n") "subl $12, %esp\n" @@ -256,7 +270,7 @@ extern "C" { "movl 4(%ebp), %eax\n" // Pass prev frame and return address "movl %eax, 4(%esp)\n" "movl %ebp, (%esp)\n" - "call " ASMPREFIX "X86CompilationCallback2\n" + "call " ASMPREFIX "LLVMX86CompilationCallback2\n" "addl $16, %esp\n" "movaps 48(%esp), %xmm3\n" CFI(".cfi_restore %xmm3\n") @@ -287,7 +301,7 @@ extern "C" { SIZE(X86CompilationCallback_SSE) ); # else - void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr); + void LLVMX86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr); _declspec(naked) void X86CompilationCallback(void) { __asm { @@ -297,10 +311,11 @@ extern "C" { push edx push ecx and esp, -16 + sub esp, 16 mov eax, dword ptr [ebp+4] mov dword ptr [esp+4], eax mov dword ptr [esp], ebp - call X86CompilationCallback2 + call LLVMX86CompilationCallback2 mov esp, ebp sub esp, 12 pop ecx @@ -320,20 +335,18 @@ extern "C" { #endif } -/// X86CompilationCallback2 - This is the target-specific function invoked by the +/// This is the target-specific function invoked by the /// function stub when we did not know the real target of a call. This function /// must locate the start of the stub or call site and pass it into the JIT /// compiler function. extern "C" { -#if !(defined (X86_64_JIT) && defined(_MSC_VER)) - // the following function is called only from this translation unit, - // unless we are under 64bit Windows with MSC, where there is - // no support for inline assembly -static -#endif -void ATTRIBUTE_USED -X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) { +LLVM_ATTRIBUTE_USED // Referenced from inline asm. +LLVM_LIBRARY_VISIBILITY void LLVMX86CompilationCallback2(intptr_t *StackPtr, + intptr_t RetAddr) { intptr_t *RetAddrLoc = &StackPtr[1]; + // We are reading raw stack data here. Tell MemorySanitizer that it is + // sufficiently initialized. + __msan_unpoison(RetAddrLoc, sizeof(*RetAddrLoc)); assert(*RetAddrLoc == RetAddr && "Could not find return address on the stack!"); @@ -348,7 +361,7 @@ X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) { #endif #if 0 - DEBUG(errs() << "In callback! Addr=" << (void*)RetAddr + DEBUG(dbgs() << "In callback! Addr=" << (void*)RetAddr << " ESP=" << (void*)StackPtr << ": Resolving call to function: " << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n"); @@ -392,8 +405,10 @@ X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) { *(intptr_t *)(RetAddr - 0xa) = NewVal; ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6)); } + sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd); #else ((unsigned char*)RetAddr)[-1] = 0xE9; + sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5); #endif } @@ -408,104 +423,104 @@ X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) { TargetJITInfo::LazyResolverFn X86JITInfo::getLazyResolverFunction(JITCompilerFn F) { + TsanIgnoreWritesBegin(); JITCompilerFunction = F; + TsanIgnoreWritesEnd(); #if defined (X86_32_JIT) && !defined (_MSC_VER) - if (Subtarget->hasSSE1()) +#if defined(__SSE__) + // SSE Callback should be called for SSE-enabled LLVM. + return X86CompilationCallback_SSE; +#else + if (useSSE) return X86CompilationCallback_SSE; +#endif #endif return X86CompilationCallback; } -X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) { - Subtarget = &TM.getSubtarget(); +X86JITInfo::X86JITInfo(bool UseSSE) { + useSSE = UseSSE; useGOT = 0; - TLSOffset = 0; + TLSOffset = nullptr; } void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; #if defined (X86_64_JIT) - JCE.startGVStub(BS, GV, 8, 8); - JCE.emitWordLE((unsigned)(intptr_t)ptr); - JCE.emitWordLE((unsigned)(((intptr_t)ptr) >> 32)); + const unsigned Alignment = 8; + uint8_t Buffer[8]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr); + MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32)); #else - JCE.startGVStub(BS, GV, 4, 4); - JCE.emitWordLE((intptr_t)ptr); + const unsigned Alignment = 4; + uint8_t Buffer[4]; + uint8_t *Cur = Buffer; + MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)ptr); #endif - return JCE.finishGVStub(BS); + return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment); } -void *X86JITInfo::emitFunctionStub(const Function* F, void *Fn, +TargetJITInfo::StubLayout X86JITInfo::getStubLayout() { + // The 64-bit stub contains: + // movabs r10 <- 8-byte-target-address # 10 bytes + // call|jmp *r10 # 3 bytes + // The 32-bit stub contains a 5-byte call|jmp. + // If the stub is a call to the compilation callback, an extra byte is added + // to mark it as a stub. + StubLayout Result = {14, 4}; + return Result; +} + +void *X86JITInfo::emitFunctionStub(const Function* F, void *Target, JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; - // Note, we cast to intptr_t here to silence a -pedantic warning that + // Note, we cast to intptr_t here to silence a -pedantic warning that // complains about casting a function pointer to a normal pointer. #if defined (X86_32_JIT) && !defined (_MSC_VER) - bool NotCC = (Fn != (void*)(intptr_t)X86CompilationCallback && - Fn != (void*)(intptr_t)X86CompilationCallback_SSE); + bool NotCC = (Target != (void*)(intptr_t)X86CompilationCallback && + Target != (void*)(intptr_t)X86CompilationCallback_SSE); #else - bool NotCC = Fn != (void*)(intptr_t)X86CompilationCallback; + bool NotCC = Target != (void*)(intptr_t)X86CompilationCallback; #endif + JCE.emitAlignment(4); + void *Result = (void*)JCE.getCurrentPCValue(); if (NotCC) { #if defined (X86_64_JIT) - JCE.startGVStub(BS, F, 13, 4); JCE.emitByte(0x49); // REX prefix JCE.emitByte(0xB8+2); // movabsq r10 - JCE.emitWordLE((unsigned)(intptr_t)Fn); - JCE.emitWordLE((unsigned)(((intptr_t)Fn) >> 32)); + JCE.emitWordLE((unsigned)(intptr_t)Target); + JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32)); JCE.emitByte(0x41); // REX prefix JCE.emitByte(0xFF); // jmpq *r10 JCE.emitByte(2 | (4 << 3) | (3 << 6)); #else - JCE.startGVStub(BS, F, 5, 4); JCE.emitByte(0xE9); - JCE.emitWordLE((intptr_t)Fn-JCE.getCurrentPCValue()-4); + JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4); #endif - return JCE.finishGVStub(BS); + return Result; } #if defined (X86_64_JIT) - JCE.startGVStub(BS, F, 14, 4); JCE.emitByte(0x49); // REX prefix JCE.emitByte(0xB8+2); // movabsq r10 - JCE.emitWordLE((unsigned)(intptr_t)Fn); - JCE.emitWordLE((unsigned)(((intptr_t)Fn) >> 32)); + JCE.emitWordLE((unsigned)(intptr_t)Target); + JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32)); JCE.emitByte(0x41); // REX prefix JCE.emitByte(0xFF); // callq *r10 JCE.emitByte(2 | (2 << 3) | (3 << 6)); #else - JCE.startGVStub(BS, F, 6, 4); JCE.emitByte(0xE8); // Call with 32 bit pc-rel destination... - JCE.emitWordLE((intptr_t)Fn-JCE.getCurrentPCValue()-4); + JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4); #endif // This used to use 0xCD, but that value is used by JITMemoryManager to // initialize the buffer with garbage, which means it may follow a - // noreturn function call, confusing X86CompilationCallback2. PR 4929. + // noreturn function call, confusing LLVMX86CompilationCallback2. PR 4929. JCE.emitByte(0xCE); // Interrupt - Just a marker identifying the stub! - return JCE.finishGVStub(BS); -} - -void X86JITInfo::emitFunctionStubAtAddr(const Function* F, void *Fn, void *Stub, - JITCodeEmitter &JCE) { - MachineCodeEmitter::BufferState BS; - // Note, we cast to intptr_t here to silence a -pedantic warning that - // complains about casting a function pointer to a normal pointer. - JCE.startGVStub(BS, Stub, 5); - JCE.emitByte(0xE9); -#if defined (X86_64_JIT) && !defined (NDEBUG) - // Yes, we need both of these casts, or some broken versions of GCC (4.2.4) - // get the signed-ness of the expression wrong. Go figure. - intptr_t Displacement = (intptr_t)Fn - (intptr_t)JCE.getCurrentPCValue() - 5; - assert(((Displacement << 32) >> 32) == Displacement - && "PIC displacement does not fit in displacement field!"); -#endif - JCE.emitWordLE((intptr_t)Fn-JCE.getCurrentPCValue()-4); - JCE.finishGVStub(BS); + return Result; } /// getPICJumpTableEntry - Returns the value of the jumptable entry for the @@ -518,6 +533,15 @@ uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) { #endif } +template static void addUnaligned(void *Pos, T Delta) { + T Value; + std::memcpy(reinterpret_cast(&Value), reinterpret_cast(Pos), + sizeof(T)); + Value += Delta; + std::memcpy(reinterpret_cast(Pos), reinterpret_cast(&Value), + sizeof(T)); +} + /// relocate - Before the JIT can run a block of code that has been emitted, /// it must rewrite the code to contain the actual addresses of any /// referenced global symbols. @@ -531,24 +555,24 @@ void X86JITInfo::relocate(void *Function, MachineRelocation *MR, // PC relative relocation, add the relocated value to the value already in // memory, after we adjust it for where the PC is. ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal(); - *((unsigned*)RelocPos) += (unsigned)ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; } case X86::reloc_picrel_word: { // PIC base relative relocation, add the relocated value to the value // already in memory, after we adjust it for where the PIC base is. ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal()); - *((unsigned*)RelocPos) += (unsigned)ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; } case X86::reloc_absolute_word: case X86::reloc_absolute_word_sext: // Absolute relocation, just add the relocated value to the value already // in memory. - *((unsigned*)RelocPos) += (unsigned)ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; case X86::reloc_absolute_dword: - *((intptr_t*)RelocPos) += ResultPtr; + addUnaligned(RelocPos, ResultPtr); break; } } @@ -560,6 +584,5 @@ char* X86JITInfo::allocateThreadLocalMemory(size_t size) { return TLSOffset; #else llvm_unreachable("Cannot allocate thread local storage on this arch!"); - return 0; #endif }