2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_THREADLOCALDETAIL_H_
18 #define FOLLY_DETAIL_THREADLOCALDETAIL_H_
27 #include <glog/logging.h>
29 #include "folly/Foreach.h"
30 #include "folly/Exception.h"
31 #include "folly/Malloc.h"
33 // TODO(tudorb): Remove this declaration after Malloc.h is pushed to
35 extern "C" int allocm(void**, size_t*, size_t, int)
36 __attribute__((weak));
39 namespace threadlocal_detail {
42 * Base class for deleters.
46 virtual ~DeleterBase() { }
47 virtual void dispose(void* ptr, TLPDestructionMode mode) const = 0;
51 * Simple deleter class that calls delete on the passed-in pointer.
54 class SimpleDeleter : public DeleterBase {
56 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
57 delete static_cast<Ptr>(ptr);
62 * Custom deleter that calls a given callable.
64 template <class Ptr, class Deleter>
65 class CustomDeleter : public DeleterBase {
67 explicit CustomDeleter(Deleter d) : deleter_(d) { }
68 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
69 deleter_(static_cast<Ptr>(ptr), mode);
77 * POD wrapper around an element (a void*) and an associated deleter.
78 * This must be POD, as we memset() it to 0 and memcpy() it around.
80 struct ElementWrapper {
81 void dispose(TLPDestructionMode mode) {
83 DCHECK(deleter != NULL);
84 deleter->dispose(ptr, mode);
97 DCHECK(deleter == NULL);
100 // We leak a single object here but that is ok. If we used an
101 // object directly, there is a chance that the destructor will be
102 // called on that static object before any of the ElementWrappers
103 // are disposed and that isn't so nice.
104 static auto d = new SimpleDeleter<Ptr>();
111 template <class Ptr, class Deleter>
112 void set(Ptr p, Deleter d) {
114 DCHECK(deleter == NULL);
117 deleter = new CustomDeleter<Ptr,Deleter>(d);
123 DeleterBase* deleter;
128 * Per-thread entry. Each thread using a StaticMeta object has one.
129 * This is written from the owning thread only (under the lock), read
130 * from the owning thread (no lock necessary), and read from other threads
134 ElementWrapper* elements;
135 size_t elementsCapacity;
140 // Held in a singleton to track our global instances.
141 // We have one of these per "Tag", by default one for the whole system
144 // Creating and destroying ThreadLocalPtr objects, as well as thread exit
145 // for threads that use ThreadLocalPtr objects collide on a lock inside
146 // StaticMeta; you can specify multiple Tag types to break that lock.
149 static StaticMeta<Tag>& instance() {
150 // Leak it on exit, there's only one per process and we don't have to
151 // worry about synchronization with exiting threads.
152 static bool constructed = (inst_ = new StaticMeta<Tag>());
153 (void)constructed; // suppress unused warning
158 std::vector<int> freeIds_;
160 pthread_key_t pthreadKey_;
163 void push_back(ThreadEntry* t) {
165 t->prev = head_.prev;
166 head_.prev->next = t;
170 void erase(ThreadEntry* t) {
171 t->next->prev = t->prev;
172 t->prev->next = t->next;
173 t->next = t->prev = t;
176 static __thread ThreadEntry threadEntry_;
177 static StaticMeta<Tag>* inst_;
179 StaticMeta() : nextId_(1) {
180 head_.next = head_.prev = &head_;
181 int ret = pthread_key_create(&pthreadKey_, &onThreadExit);
182 checkPosixError(ret, "pthread_key_create failed");
184 ret = pthread_atfork(/*prepare*/ &StaticMeta::preFork,
185 /*parent*/ &StaticMeta::onForkParent,
186 /*child*/ &StaticMeta::onForkChild);
187 checkPosixError(ret, "pthread_atfork failed");
190 LOG(FATAL) << "StaticMeta lives forever!";
193 static void preFork(void) {
194 instance().lock_.lock(); // Make sure it's created
197 static void onForkParent(void) {
198 inst_->lock_.unlock();
201 static void onForkChild(void) {
202 inst_->head_.next = inst_->head_.prev = &inst_->head_;
203 inst_->push_back(&threadEntry_); // only the current thread survives
204 inst_->lock_.unlock();
207 static void onThreadExit(void* ptr) {
208 auto & meta = instance();
209 DCHECK_EQ(ptr, &meta);
210 // We wouldn't call pthread_setspecific unless we actually called get()
211 DCHECK_NE(threadEntry_.elementsCapacity, 0);
213 std::lock_guard<std::mutex> g(meta.lock_);
214 meta.erase(&threadEntry_);
215 // No need to hold the lock any longer; threadEntry_ is private to this
216 // thread now that it's been removed from meta.
218 FOR_EACH_RANGE(i, 0, threadEntry_.elementsCapacity) {
219 threadEntry_.elements[i].dispose(TLPDestructionMode::THIS_THREAD);
221 free(threadEntry_.elements);
222 threadEntry_.elements = NULL;
223 pthread_setspecific(meta.pthreadKey_, NULL);
226 static int create() {
228 auto & meta = instance();
229 std::lock_guard<std::mutex> g(meta.lock_);
230 if (!meta.freeIds_.empty()) {
231 id = meta.freeIds_.back();
232 meta.freeIds_.pop_back();
239 static void destroy(int id) {
241 auto & meta = instance();
242 // Elements in other threads that use this id.
243 std::vector<ElementWrapper> elements;
245 std::lock_guard<std::mutex> g(meta.lock_);
246 for (ThreadEntry* e = meta.head_.next; e != &meta.head_; e = e->next) {
247 if (id < e->elementsCapacity && e->elements[id].ptr) {
248 elements.push_back(e->elements[id]);
251 * Writing another thread's ThreadEntry from here is fine;
252 * the only other potential reader is the owning thread --
253 * from onThreadExit (which grabs the lock, so is properly
254 * synchronized with us) or from get(), which also grabs
255 * the lock if it needs to resize the elements vector.
257 * We can't conflict with reads for a get(id), because
258 * it's illegal to call get on a thread local that's
261 e->elements[id].ptr = nullptr;
262 e->elements[id].deleter = nullptr;
263 e->elements[id].ownsDeleter = false;
266 meta.freeIds_.push_back(id);
268 // Delete elements outside the lock
269 FOR_EACH(it, elements) {
270 it->dispose(TLPDestructionMode::ALL_THREADS);
272 } catch (...) { // Just in case we get a lock error or something anyway...
273 LOG(WARNING) << "Destructor discarding an exception that was thrown.";
278 * Reserve enough space in the threadEntry_.elements for the item
281 static void reserve(int id) {
282 size_t prevCapacity = threadEntry_.elementsCapacity;
283 // Growth factor < 2, see folly/docs/FBVector.md; + 5 to prevent
285 size_t newCapacity = static_cast<size_t>((id + 5) * 1.7);
286 assert(newCapacity > prevCapacity);
287 auto& meta = instance();
288 ElementWrapper* reallocated = nullptr;
290 // Need to grow. Note that we can't call realloc, as elements is
291 // still linked in meta, so another thread might access invalid memory
292 // after realloc succeeds. We'll copy by hand and update threadEntry_
294 if (usingJEMalloc()) {
295 bool success = false;
296 size_t newByteSize = newCapacity * sizeof(ElementWrapper);
297 size_t realByteSize = 0;
299 // Try to grow in place.
301 // Note that rallocm(ALLOCM_ZERO) will only zero newly allocated memory,
302 // even if a previous allocation allocated more than we requested.
303 // This is fine; we always use ALLOCM_ZERO with jemalloc and we
304 // always expand our allocation to the real size.
305 if (prevCapacity * sizeof(ElementWrapper) >=
306 jemallocMinInPlaceExpandable) {
307 success = (rallocm(reinterpret_cast<void**>(&threadEntry_.elements),
311 ALLOCM_NO_MOVE | ALLOCM_ZERO) == ALLOCM_SUCCESS);
315 // In-place growth failed.
317 // Note that, unlike calloc,allocm(... ALLOCM_ZERO) zeros all
318 // allocated bytes (*realByteSize) and not just the requested
319 // bytes (newByteSize)
320 success = (allocm(reinterpret_cast<void**>(&reallocated),
323 ALLOCM_ZERO) == ALLOCM_SUCCESS);
327 // Expand to real size
328 assert(realByteSize / sizeof(ElementWrapper) >= newCapacity);
329 newCapacity = realByteSize / sizeof(ElementWrapper);
331 throw std::bad_alloc();
333 } else { // no jemalloc
334 // calloc() is simpler than malloc() followed by memset(), and
335 // potentially faster when dealing with a lot of memory, as it can get
336 // already-zeroed pages from the kernel.
337 reallocated = static_cast<ElementWrapper*>(
338 calloc(newCapacity, sizeof(ElementWrapper)));
340 throw std::bad_alloc();
344 // Success, update the entry
346 std::lock_guard<std::mutex> g(meta.lock_);
348 if (prevCapacity == 0) {
349 meta.push_back(&threadEntry_);
354 * Note: we need to hold the meta lock when copying data out of
355 * the old vector, because some other thread might be
356 * destructing a ThreadLocal and writing to the elements vector
359 memcpy(reallocated, threadEntry_.elements,
360 sizeof(ElementWrapper) * prevCapacity);
362 swap(reallocated, threadEntry_.elements);
364 threadEntry_.elementsCapacity = newCapacity;
369 if (prevCapacity == 0) {
370 pthread_setspecific(meta.pthreadKey_, &meta);
374 static ElementWrapper& get(int id) {
375 if (UNLIKELY(threadEntry_.elementsCapacity <= id)) {
377 assert(threadEntry_.elementsCapacity > id);
379 return threadEntry_.elements[id];
383 template <class Tag> __thread ThreadEntry StaticMeta<Tag>::threadEntry_ = {0};
384 template <class Tag> StaticMeta<Tag>* StaticMeta<Tag>::inst_ = nullptr;
386 } // namespace threadlocal_detail
389 #endif /* FOLLY_DETAIL_THREADLOCALDETAIL_H_ */