/// external functions.
std::map<void*, void*> ExternalFnToStubMap;
- //map addresses to indexes in the GOT
+ /// revGOTMap - map addresses to indexes in the GOT
std::map<void*, unsigned> revGOTMap;
unsigned nextGOTIndex;
/// getFunctionStub - This returns a pointer to a function stub, creating
/// one on demand as needed. If empty is true, create a function stub
/// pointing at address 0, to be filled in later.
- void *getFunctionStub(Function *F, bool empty = false);
+ void *getFunctionStub(Function *F);
/// getExternalFunctionStub - Return a stub for the function at the
/// specified address, created lazily on demand.
}
void getRelocatableGVs(SmallVectorImpl<GlobalValue*> &GVs,
- SmallVectorImpl<void*> &Ptrs);
+ SmallVectorImpl<void*> &Ptrs);
+
+ GlobalValue *invalidateStub(void *Stub);
/// getGOTIndexForAddress - Return a new or existing index in the GOT for
/// an address. This function only manages slots, it does not manage the
/// getFunctionStub - This returns a pointer to a function stub, creating
/// one on demand as needed.
-void *JITResolver::getFunctionStub(Function *F, bool empty) {
+void *JITResolver::getFunctionStub(Function *F) {
MutexGuard locked(TheJIT->lock);
// If we already have a stub for this function, recycle it.
void *&Stub = state.getFunctionToStubMap(locked)[F];
if (Stub) return Stub;
- // Call the lazy resolver function unless we already KNOW it is an external
- // function, in which case we just skip the lazy resolution step.
- void *Actual = empty ? (void*)0 : (void*)(intptr_t)LazyResolverFn;
+ // Call the lazy resolver function unless we are JIT'ing non-lazily, in which
+ // case we must resolve the symbol now.
+ void *Actual = TheJIT->isLazyCompilationDisabled()
+ ? (void *)0 : (void *)(intptr_t)LazyResolverFn;
+
+ // If this is an external declaration, attempt to resolve the address now
+ // to place in the stub.
if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode()) {
Actual = TheJIT->getPointerToFunction(F);
// If we resolved the symbol to a null address (eg. a weak external)
- // don't emit a stub. Return a null pointer to the application.
- if (!Actual) return 0;
+ // don't emit a stub. Return a null pointer to the application. If dlsym
+ // stubs are enabled, not being able to resolve the address is not
+ // meaningful.
+ if (!Actual && !TheJIT->areDlsymStubsEnabled()) return 0;
}
- // Otherwise, codegen a new stub. For now, the stub will call the lazy
- // resolver function.
+ // Codegen a new stub, calling the lazy resolver or the actual address of the
+ // external function, if it was resolved.
Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual,
*TheJIT->getCodeEmitter());
// JITCompilerFn knows which function to compile!
state.getStubToFunctionMap(locked)[Stub] = F;
- // If this is an "empty" stub, then inform the JIT that it will need to
- // JIT the function so an address can be provided.
- if (empty)
- TheJIT->addPendingFunction(F);
+ // If we are JIT'ing non-lazily but need to call a function that does not
+ // exist yet, add it to the JIT's work list so that we can fill in the stub
+ // address later.
+ if (!Actual && TheJIT->isLazyCompilationDisabled())
+ if (!F->isDeclaration() || F->hasNotBeenReadFromBitcode())
+ TheJIT->addPendingFunction(F);
return Stub;
}
}
}
+GlobalValue *JITResolver::invalidateStub(void *Stub) {
+ MutexGuard locked(TheJIT->lock);
+
+ std::map<Function*,void*> &FM = state.getFunctionToStubMap(locked);
+ std::map<void*,Function*> &SM = state.getStubToFunctionMap(locked);
+ std::map<GlobalValue*,void*> &GM = state.getGlobalToIndirectSymMap(locked);
+
+ // Look up the cheap way first, to see if it's a function stub we are
+ // invalidating. If so, remove it from both the forward and reverse maps.
+ if (SM.find(Stub) != SM.end()) {
+ Function *F = SM[Stub];
+ SM.erase(Stub);
+ FM.erase(F);
+ return F;
+ }
+
+ // Otherwise, it might be an indirect symbol stub. Find it and remove it.
+ for (std::map<GlobalValue*,void*>::iterator i = GM.begin(), e = GM.end();
+ i != e; ++i) {
+ if (i->second != Stub)
+ continue;
+ GlobalValue *GV = i->first;
+ GM.erase(i);
+ return GV;
+ }
+
+ // Lastly, check to see if it's in the ExternalFnToStubMap.
+ for (std::map<void *, void *>::iterator i = ExternalFnToStubMap.begin(),
+ e = ExternalFnToStubMap.end(); i != e; ++i) {
+ if (i->second != Stub)
+ continue;
+ ExternalFnToStubMap.erase(i);
+ break;
+ }
+
+ return 0;
+}
+
/// JITCompilerFn - This function is called when a lazy compilation stub has
/// been entered. It looks up which function this stub corresponds to, compiles
/// it if necessary, then returns the resultant function pointer.
///
void *ConstantPoolBase;
+ /// ConstPoolAddresses - Addresses of individual constant pool entries.
+ ///
+ SmallVector<uintptr_t, 8> ConstPoolAddresses;
+
/// JumpTable - The jump tables for the current function.
///
MachineJumpTableInfo *JumpTable;
// GVSet - a set to keep track of which globals have been seen
SmallPtrSet<const GlobalVariable*, 8> GVSet;
+ // CurFn - The llvm function being emitted. Only valid during
+ // finishFunction().
+ const Function *CurFn;
+
+ // CurFnStubUses - For a given Function, a vector of stubs that it
+ // references. This facilitates the JIT detecting that a stub is no
+ // longer used, so that it may be deallocated.
+ DenseMap<const Function *, SmallVector<void*, 1> > CurFnStubUses;
+
+ // StubFnRefs - For a given pointer to a stub, a set of Functions which
+ // reference the stub. When the count of a stub's references drops to zero,
+ // the stub is unused.
+ DenseMap<void *, SmallPtrSet<const Function*, 1> > StubFnRefs;
+
+ // ExtFnStubs - A map of external function names to stubs which have entries
+ // in the JITResolver's ExternalFnToStubMap.
+ StringMap<void *> ExtFnStubs;
+
public:
- JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit) {
+ JITEmitter(JIT &jit, JITMemoryManager *JMM) : Resolver(jit), CurFn(0) {
MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
if (jit.getJITInfo().needsGOT()) {
MemMgr->AllocateGOT();
/// deallocateMemForFunction - Deallocate all memory for the specified
/// function body.
- void deallocateMemForFunction(Function *F) {
- MemMgr->deallocateMemForFunction(F);
- }
+ void deallocateMemForFunction(Function *F);
+
+ /// AddStubToCurrentFunction - Mark the current function being JIT'd as
+ /// using the stub at the specified address. Allows
+ /// deallocateMemForFunction to also remove stubs no longer referenced.
+ void AddStubToCurrentFunction(void *Stub);
+
+ /// getExternalFnStubs - Accessor for the JIT to find stubs emitted for
+ /// MachineRelocations that reference external functions by name.
+ const StringMap<void*> &getExternalFnStubs() const { return ExtFnStubs; }
virtual void emitLabel(uint64_t LabelID) {
if (LabelLocations.size() <= LabelID)
// If we have already compiled the function, return a pointer to its body.
Function *F = cast<Function>(V);
void *ResultPtr;
- if (!DoesntNeedStub && !TheJIT->isLazyCompilationDisabled())
+ if (!DoesntNeedStub && !TheJIT->isLazyCompilationDisabled()) {
// Return the function stub if it's already created.
ResultPtr = Resolver.getFunctionStubIfAvailable(F);
- else
+ if (ResultPtr)
+ AddStubToCurrentFunction(ResultPtr);
+ } else {
ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
+ }
if (ResultPtr) return ResultPtr;
// If this is an external function pointer, we can force the JIT to
- // 'compile' it, which really just adds it to the map.
- if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode() && DoesntNeedStub)
+ // 'compile' it, which really just adds it to the map. In dlsym mode,
+ // external functions are forced through a stub, regardless of reloc type.
+ if (F->isDeclaration() && !F->hasNotBeenReadFromBitcode() &&
+ DoesntNeedStub && !TheJIT->areDlsymStubsEnabled())
return TheJIT->getPointerToFunction(F);
- // If we are jitting non-lazily but encounter a function that has not been
- // jitted yet, we need to allocate a blank stub to call the function
- // once we JIT it and its address is known.
- if (TheJIT->isLazyCompilationDisabled())
- if (!F->isDeclaration() || F->hasNotBeenReadFromBitcode())
- return Resolver.getFunctionStub(F, true);
-
// Okay, the function has not been compiled yet, if the target callback
// mechanism is capable of rewriting the instruction directly, prefer to do
- // that instead of emitting a stub.
- if (DoesntNeedStub)
+ // that instead of emitting a stub. This uses the lazy resolver, so is not
+ // legal if lazy compilation is disabled.
+ if (DoesntNeedStub && !TheJIT->isLazyCompilationDisabled())
return Resolver.AddCallbackAtLocation(F, Reference);
- // Otherwise, we have to emit a lazy resolving stub.
- return Resolver.getFunctionStub(F);
+ // Otherwise, we have to emit a stub.
+ void *StubAddr = Resolver.getFunctionStub(F);
+
+ // Add the stub to the current function's list of referenced stubs, so we can
+ // deallocate them if the current function is ever freed. It's possible to
+ // return null from getFunctionStub in the case of a weak extern that fails
+ // to resolve.
+ if (StubAddr)
+ AddStubToCurrentFunction(StubAddr);
+
+ return StubAddr;
}
void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference,
bool NoNeedStub) {
- // Make sure GV is emitted first.
- // FIXME: For now, if the GV is an external function we force the JIT to
- // compile it so the indirect symbol will contain the fully resolved address.
+ // Make sure GV is emitted first, and create a stub containing the fully
+ // resolved address.
void *GVAddress = getPointerToGlobal(V, Reference, true);
- return Resolver.getGlobalValueIndirectSym(V, GVAddress);
+ void *StubAddr = Resolver.getGlobalValueIndirectSym(V, GVAddress);
+
+ // Add the stub to the current function's list of referenced stubs, so we can
+ // deallocate them if the current function is ever freed.
+ AddStubToCurrentFunction(StubAddr);
+
+ return StubAddr;
+}
+
+void JITEmitter::AddStubToCurrentFunction(void *StubAddr) {
+ if (!TheJIT->areDlsymStubsEnabled())
+ return;
+
+ assert(CurFn && "Stub added to current function, but current function is 0!");
+
+ SmallVectorImpl<void*> &StubsUsed = CurFnStubUses[CurFn];
+ StubsUsed.push_back(StubAddr);
+
+ SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[StubAddr];
+ FnRefs.insert(CurFn);
}
-static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP) {
+static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
+ const TargetData *TD) {
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return 0;
- MachineConstantPoolEntry CPE = Constants.back();
- unsigned Size = CPE.Offset;
- const Type *Ty = CPE.isMachineConstantPoolEntry()
- ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
- Size += TheJIT->getTargetData()->getTypePaddedSize(Ty);
+ unsigned Size = 0;
+ for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
+ MachineConstantPoolEntry CPE = Constants[i];
+ unsigned AlignMask = CPE.getAlignment() - 1;
+ Size = (Size + AlignMask) & ~AlignMask;
+ const Type *Ty = CPE.getType();
+ Size += TD->getTypePaddedSize(Ty);
+ }
return Size;
}
ActualSize = RoundUpToAlign(ActualSize, 16);
// Add the alignment of the constant pool
- ActualSize = RoundUpToAlign(ActualSize,
- 1 << MCP->getConstantPoolAlignment());
+ ActualSize = RoundUpToAlign(ActualSize, MCP->getConstantPoolAlignment());
// Add the constant pool size
- ActualSize += GetConstantPoolSizeInBytes(MCP);
+ ActualSize += GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
// Add the aligment of the jump table info
ActualSize = RoundUpToAlign(ActualSize, MJTI->getAlignment());
(unsigned char *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
if (!Relocations.empty()) {
+ CurFn = F.getFunction();
NumRelos += Relocations.size();
// Resolve the relocations to concrete pointers.
<< ResultPtr << "]\n";
// If the target REALLY wants a stub for this function, emit it now.
- if (!MR.doesntNeedStub())
- ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
+ if (!MR.doesntNeedStub()) {
+ if (!TheJIT->areDlsymStubsEnabled()) {
+ ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
+ } else {
+ void *&Stub = ExtFnStubs[MR.getExternalSymbol()];
+ if (!Stub) {
+ Stub = Resolver.getExternalFunctionStub((void *)&Stub);
+ AddStubToCurrentFunction(Stub);
+ }
+ ResultPtr = Stub;
+ }
+ }
} else if (MR.isGlobalValue()) {
ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
BufferBegin+MR.getMachineCodeOffset(),
}
}
+ CurFn = 0;
TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
Relocations.size(), MemMgr->getGOTBase());
}
<< ": " << (FnEnd-FnStart) << " bytes of text, "
<< Relocations.size() << " relocations\n";
Relocations.clear();
+ ConstPoolAddresses.clear();
// Mark code region readable and executable if it's not so already.
MemMgr->setMemoryExecutable();
return false;
}
+/// deallocateMemForFunction - Deallocate all memory for the specified
+/// function body. Also drop any references the function has to stubs.
+void JITEmitter::deallocateMemForFunction(Function *F) {
+ MemMgr->deallocateMemForFunction(F);
+
+ // If the function did not reference any stubs, return.
+ if (CurFnStubUses.find(F) == CurFnStubUses.end())
+ return;
+
+ // For each referenced stub, erase the reference to this function, and then
+ // erase the list of referenced stubs.
+ SmallVectorImpl<void *> &StubList = CurFnStubUses[F];
+ for (unsigned i = 0, e = StubList.size(); i != e; ++i) {
+ void *Stub = StubList[i];
+
+ // If we already invalidated this stub for this function, continue.
+ if (StubFnRefs.count(Stub) == 0)
+ continue;
+
+ SmallPtrSet<const Function *, 1> &FnRefs = StubFnRefs[Stub];
+ FnRefs.erase(F);
+
+ // If this function was the last reference to the stub, invalidate the stub
+ // in the JITResolver. Were there a memory manager deallocateStub routine,
+ // we could call that at this point too.
+ if (FnRefs.empty()) {
+ DOUT << "\nJIT: Invalidated Stub at [" << Stub << "]\n";
+ StubFnRefs.erase(Stub);
+
+ // Invalidate the stub. If it is a GV stub, update the JIT's global
+ // mapping for that GV to zero, otherwise, search the string map of
+ // external function names to stubs and remove the entry for this stub.
+ GlobalValue *GV = Resolver.invalidateStub(Stub);
+ if (GV) {
+ TheJIT->updateGlobalMapping(GV, 0);
+ } else {
+ for (StringMapIterator<void*> i = ExtFnStubs.begin(),
+ e = ExtFnStubs.end(); i != e; ++i) {
+ if (i->second == Stub) {
+ ExtFnStubs.erase(i);
+ break;
+ }
+ }
+ }
+ }
+ }
+ CurFnStubUses.erase(F);
+}
+
+
void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
if (BufferBegin)
return MachineCodeEmitter::allocateSpace(Size, Alignment);
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return;
- MachineConstantPoolEntry CPE = Constants.back();
- unsigned Size = CPE.Offset;
- const Type *Ty = CPE.isMachineConstantPoolEntry()
- ? CPE.Val.MachineCPVal->getType() : CPE.Val.ConstVal->getType();
- Size += TheJIT->getTargetData()->getTypePaddedSize(Ty);
-
- unsigned Align = 1 << MCP->getConstantPoolAlignment();
+ unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
+ unsigned Align = MCP->getConstantPoolAlignment();
ConstantPoolBase = allocateSpace(Size, Align);
ConstantPool = MCP;
<< "] (size: " << Size << ", alignment: " << Align << ")\n";
// Initialize the memory for all of the constant pool entries.
+ unsigned Offset = 0;
for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
- void *CAddr = (char*)ConstantPoolBase+Constants[i].Offset;
- if (Constants[i].isMachineConstantPoolEntry()) {
+ MachineConstantPoolEntry CPE = Constants[i];
+ unsigned AlignMask = CPE.getAlignment() - 1;
+ Offset = (Offset + AlignMask) & ~AlignMask;
+
+ uintptr_t CAddr = (uintptr_t)ConstantPoolBase + Offset;
+ ConstPoolAddresses.push_back(CAddr);
+ if (CPE.isMachineConstantPoolEntry()) {
// FIXME: add support to lower machine constant pool values into bytes!
cerr << "Initialize memory with machine specific constant pool entry"
<< " has not been implemented!\n";
abort();
}
- TheJIT->InitializeMemory(Constants[i].Val.ConstVal, CAddr);
- DOUT << "JIT: CP" << i << " at [" << CAddr << "]\n";
+ TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr);
+ DOUT << "JIT: CP" << i << " at [0x"
+ << std::hex << CAddr << std::dec << "]\n";
+
+ const Type *Ty = CPE.Val.ConstVal->getType();
+ Offset += TheJIT->getTargetData()->getTypePaddedSize(Ty);
}
}
uintptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
assert(ConstantNum < ConstantPool->getConstants().size() &&
"Invalid ConstantPoolIndex!");
- return (uintptr_t)ConstantPoolBase +
- ConstantPool->getConstants()[ConstantNum].Offset;
+ return ConstPoolAddresses[ConstantNum];
}
// getJumpTableEntryAddress - Return the address of the JumpTable with index
SmallVector<GlobalValue*, 8> GVs;
SmallVector<void*, 8> Ptrs;
+ const StringMap<void *> &ExtFns = JE->getExternalFnStubs();
JE->getJITResolver().getRelocatableGVs(GVs, Ptrs);
+ unsigned nStubs = GVs.size() + ExtFns.size();
+
// If there are no relocatable stubs, return.
- if (GVs.empty())
+ if (nStubs == 0)
return;
// If there are no new relocatable stubs, return.
void *CurTable = JE->getMemMgr()->getDlsymTable();
- if (CurTable && (*(unsigned *)CurTable == GVs.size()))
+ if (CurTable && (*(unsigned *)CurTable == nStubs))
return;
// Calculate the size of the stub info
- unsigned offset = 4 + 4 * GVs.size();
+ unsigned offset = 4 + 4 * nStubs + sizeof(intptr_t) * nStubs;
SmallVector<unsigned, 8> Offsets;
for (unsigned i = 0; i != GVs.size(); ++i) {
Offsets.push_back(offset);
offset += GVs[i]->getName().length() + 1;
}
+ for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end();
+ i != e; ++i) {
+ Offsets.push_back(offset);
+ offset += strlen(i->first()) + 1;
+ }
- // FIXME: This currently allocates new space every time it's called. A
- // different data structure could be used to make this unnecessary.
+ // Allocate space for the new "stub", which contains the dlsym table.
JE->startGVStub(0, offset, 4);
// Emit the number of records
- MCE->emitInt32(GVs.size());
+ MCE->emitInt32(nStubs);
// Emit the string offsets
- for (unsigned i = 0; i != GVs.size(); ++i)
+ for (unsigned i = 0; i != nStubs; ++i)
MCE->emitInt32(Offsets[i]);
- // Emit the pointers
- for (unsigned i = 0; i != GVs.size(); ++i)
- if (sizeof(void *) == 8)
- MCE->emitInt64((intptr_t)Ptrs[i]);
+ // Emit the pointers. Verify that they are at least 2-byte aligned, and set
+ // the low bit to 0 == GV, 1 == Function, so that the client code doing the
+ // relocation can write the relocated pointer at the appropriate place in
+ // the stub.
+ for (unsigned i = 0; i != GVs.size(); ++i) {
+ intptr_t Ptr = (intptr_t)Ptrs[i];
+ assert((Ptr & 1) == 0 && "Stub pointers must be at least 2-byte aligned!");
+
+ if (isa<Function>(GVs[i]))
+ Ptr |= (intptr_t)1;
+
+ if (sizeof(Ptr) == 8)
+ MCE->emitInt64(Ptr);
else
- MCE->emitInt32((intptr_t)Ptrs[i]);
+ MCE->emitInt32(Ptr);
+ }
+ for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end();
+ i != e; ++i) {
+ intptr_t Ptr = (intptr_t)i->second | 1;
+
+ if (sizeof(Ptr) == 8)
+ MCE->emitInt64(Ptr);
+ else
+ MCE->emitInt32(Ptr);
+ }
- // Emit the strings
+ // Emit the strings.
for (unsigned i = 0; i != GVs.size(); ++i)
MCE->emitString(GVs[i]->getName());
+ for (StringMapConstIterator<void*> i = ExtFns.begin(), e = ExtFns.end();
+ i != e; ++i)
+ MCE->emitString(i->first());
- // Tell the JIT memory manager where it is.
+ // Tell the JIT memory manager where it is. The JIT Memory Manager will
+ // deallocate space for the old one, if one existed.
JE->getMemMgr()->SetDlsymTable(JE->finishGVStub(0));
}