2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_THREADLOCALDETAIL_H_
18 #define FOLLY_DETAIL_THREADLOCALDETAIL_H_
27 #include <glog/logging.h>
29 #include <folly/Foreach.h>
30 #include <folly/Exception.h>
31 #include <folly/Malloc.h>
33 // In general, emutls cleanup is not guaranteed to play nice with the way
34 // StaticMeta mixes direct pthread calls and the use of __thread. This has
35 // caused problems on multiple platforms so don't use __thread there.
37 // XXX: Ideally we would instead determine if emutls is in use at runtime as it
38 // is possible to configure glibc on Linux to use emutls regardless.
39 #if !__APPLE__ && !__ANDROID__
40 #define FOLLY_TLD_USE_FOLLY_TLS 1
42 #undef FOLLY_TLD_USE_FOLLY_TLS
46 namespace threadlocal_detail {
49 * Base class for deleters.
53 virtual ~DeleterBase() { }
54 virtual void dispose(void* ptr, TLPDestructionMode mode) const = 0;
58 * Simple deleter class that calls delete on the passed-in pointer.
61 class SimpleDeleter : public DeleterBase {
63 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
64 delete static_cast<Ptr>(ptr);
69 * Custom deleter that calls a given callable.
71 template <class Ptr, class Deleter>
72 class CustomDeleter : public DeleterBase {
74 explicit CustomDeleter(Deleter d) : deleter_(d) { }
75 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
76 deleter_(static_cast<Ptr>(ptr), mode);
84 * POD wrapper around an element (a void*) and an associated deleter.
85 * This must be POD, as we memset() it to 0 and memcpy() it around.
87 struct ElementWrapper {
88 bool dispose(TLPDestructionMode mode) {
93 DCHECK(deleter != nullptr);
94 deleter->dispose(ptr, mode);
102 if (ptr != nullptr) {
111 DCHECK(ptr == nullptr);
112 DCHECK(deleter == nullptr);
115 // We leak a single object here but that is ok. If we used an
116 // object directly, there is a chance that the destructor will be
117 // called on that static object before any of the ElementWrappers
118 // are disposed and that isn't so nice.
119 static auto d = new SimpleDeleter<Ptr>();
126 template <class Ptr, class Deleter>
127 void set(Ptr p, Deleter d) {
128 DCHECK(ptr == nullptr);
129 DCHECK(deleter == nullptr);
132 deleter = new CustomDeleter<Ptr,Deleter>(d);
147 DeleterBase* deleter;
152 * Per-thread entry. Each thread using a StaticMeta object has one.
153 * This is written from the owning thread only (under the lock), read
154 * from the owning thread (no lock necessary), and read from other threads
158 ElementWrapper* elements;
159 size_t elementsCapacity;
164 // Held in a singleton to track our global instances.
165 // We have one of these per "Tag", by default one for the whole system
168 // Creating and destroying ThreadLocalPtr objects, as well as thread exit
169 // for threads that use ThreadLocalPtr objects collide on a lock inside
170 // StaticMeta; you can specify multiple Tag types to break that lock.
173 static StaticMeta<Tag>& instance() {
174 // Leak it on exit, there's only one per process and we don't have to
175 // worry about synchronization with exiting threads.
176 static bool constructed = (inst_ = new StaticMeta<Tag>());
177 (void)constructed; // suppress unused warning
182 std::vector<uint32_t> freeIds_;
184 pthread_key_t pthreadKey_;
187 void push_back(ThreadEntry* t) {
189 t->prev = head_.prev;
190 head_.prev->next = t;
194 void erase(ThreadEntry* t) {
195 t->next->prev = t->prev;
196 t->prev->next = t->next;
197 t->next = t->prev = t;
200 #ifdef FOLLY_TLD_USE_FOLLY_TLS
201 static FOLLY_TLS ThreadEntry threadEntry_;
203 static StaticMeta<Tag>* inst_;
205 StaticMeta() : nextId_(1) {
206 head_.next = head_.prev = &head_;
207 int ret = pthread_key_create(&pthreadKey_, &onThreadExit);
208 checkPosixError(ret, "pthread_key_create failed");
210 // pthread_atfork is not part of the Android NDK at least as of n9d. If
211 // something is trying to call native fork() directly at all with Android's
212 // process management model, this is probably the least of the problems.
214 ret = pthread_atfork(/*prepare*/ &StaticMeta::preFork,
215 /*parent*/ &StaticMeta::onForkParent,
216 /*child*/ &StaticMeta::onForkChild);
217 checkPosixError(ret, "pthread_atfork failed");
221 LOG(FATAL) << "StaticMeta lives forever!";
224 static ThreadEntry* getThreadEntry() {
225 #ifdef FOLLY_TLD_USE_FOLLY_TLS
226 return &threadEntry_;
228 ThreadEntry* threadEntry =
229 static_cast<ThreadEntry*>(pthread_getspecific(inst_->pthreadKey_));
231 threadEntry = new ThreadEntry();
232 int ret = pthread_setspecific(inst_->pthreadKey_, threadEntry);
233 checkPosixError(ret, "pthread_setspecific failed");
239 static void preFork(void) {
240 instance().lock_.lock(); // Make sure it's created
243 static void onForkParent(void) {
244 inst_->lock_.unlock();
247 static void onForkChild(void) {
248 // only the current thread survives
249 inst_->head_.next = inst_->head_.prev = &inst_->head_;
250 ThreadEntry* threadEntry = getThreadEntry();
251 // If this thread was in the list before the fork, add it back.
252 if (threadEntry->elementsCapacity != 0) {
253 inst_->push_back(threadEntry);
255 inst_->lock_.unlock();
258 static void onThreadExit(void* ptr) {
259 auto& meta = instance();
260 #ifdef FOLLY_TLD_USE_FOLLY_TLS
261 ThreadEntry* threadEntry = getThreadEntry();
263 DCHECK_EQ(ptr, &meta);
264 DCHECK_GT(threadEntry->elementsCapacity, 0);
266 ThreadEntry* threadEntry = static_cast<ThreadEntry*>(ptr);
269 std::lock_guard<std::mutex> g(meta.lock_);
270 meta.erase(threadEntry);
271 // No need to hold the lock any longer; the ThreadEntry is private to this
272 // thread now that it's been removed from meta.
274 // NOTE: User-provided deleter / object dtor itself may be using ThreadLocal
275 // with the same Tag, so dispose() calls below may (re)create some of the
276 // elements or even increase elementsCapacity, thus multiple cleanup rounds
278 for (bool shouldRun = true; shouldRun; ) {
280 FOR_EACH_RANGE(i, 0, threadEntry->elementsCapacity) {
281 if (threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD)) {
286 free(threadEntry->elements);
287 threadEntry->elements = nullptr;
288 pthread_setspecific(meta.pthreadKey_, nullptr);
290 #ifndef FOLLY_TLD_USE_FOLLY_TLS
291 // Allocated in getThreadEntry() when not using folly TLS; free it
296 static uint32_t create() {
298 auto & meta = instance();
299 std::lock_guard<std::mutex> g(meta.lock_);
300 if (!meta.freeIds_.empty()) {
301 id = meta.freeIds_.back();
302 meta.freeIds_.pop_back();
309 static void destroy(uint32_t id) {
311 auto & meta = instance();
312 // Elements in other threads that use this id.
313 std::vector<ElementWrapper> elements;
315 std::lock_guard<std::mutex> g(meta.lock_);
316 for (ThreadEntry* e = meta.head_.next; e != &meta.head_; e = e->next) {
317 if (id < e->elementsCapacity && e->elements[id].ptr) {
318 elements.push_back(e->elements[id]);
321 * Writing another thread's ThreadEntry from here is fine;
322 * the only other potential reader is the owning thread --
323 * from onThreadExit (which grabs the lock, so is properly
324 * synchronized with us) or from get(), which also grabs
325 * the lock if it needs to resize the elements vector.
327 * We can't conflict with reads for a get(id), because
328 * it's illegal to call get on a thread local that's
331 e->elements[id].ptr = nullptr;
332 e->elements[id].deleter = nullptr;
333 e->elements[id].ownsDeleter = false;
336 meta.freeIds_.push_back(id);
338 // Delete elements outside the lock
339 FOR_EACH(it, elements) {
340 it->dispose(TLPDestructionMode::ALL_THREADS);
342 } catch (...) { // Just in case we get a lock error or something anyway...
343 LOG(WARNING) << "Destructor discarding an exception that was thrown.";
348 * Reserve enough space in the ThreadEntry::elements for the item
351 static void reserve(uint32_t id) {
352 auto& meta = instance();
353 ThreadEntry* threadEntry = getThreadEntry();
354 size_t prevCapacity = threadEntry->elementsCapacity;
355 // Growth factor < 2, see folly/docs/FBVector.md; + 5 to prevent
357 size_t newCapacity = static_cast<size_t>((id + 5) * 1.7);
358 assert(newCapacity > prevCapacity);
359 ElementWrapper* reallocated = nullptr;
361 // Need to grow. Note that we can't call realloc, as elements is
362 // still linked in meta, so another thread might access invalid memory
363 // after realloc succeeds. We'll copy by hand and update our ThreadEntry
365 if (usingJEMalloc()) {
366 bool success = false;
367 size_t newByteSize = nallocx(newCapacity * sizeof(ElementWrapper), 0);
369 // Try to grow in place.
371 // Note that xallocx(MALLOCX_ZERO) will only zero newly allocated memory,
372 // even if a previous allocation allocated more than we requested.
373 // This is fine; we always use MALLOCX_ZERO with jemalloc and we
374 // always expand our allocation to the real size.
375 if (prevCapacity * sizeof(ElementWrapper) >=
376 jemallocMinInPlaceExpandable) {
377 success = (xallocx(threadEntry->elements, newByteSize, 0, MALLOCX_ZERO)
381 // In-place growth failed.
383 success = ((reallocated = static_cast<ElementWrapper*>(
384 mallocx(newByteSize, MALLOCX_ZERO))) != nullptr);
388 // Expand to real size
389 assert(newByteSize / sizeof(ElementWrapper) >= newCapacity);
390 newCapacity = newByteSize / sizeof(ElementWrapper);
392 throw std::bad_alloc();
394 } else { // no jemalloc
395 // calloc() is simpler than malloc() followed by memset(), and
396 // potentially faster when dealing with a lot of memory, as it can get
397 // already-zeroed pages from the kernel.
398 reallocated = static_cast<ElementWrapper*>(
399 calloc(newCapacity, sizeof(ElementWrapper)));
401 throw std::bad_alloc();
405 // Success, update the entry
407 std::lock_guard<std::mutex> g(meta.lock_);
409 if (prevCapacity == 0) {
410 meta.push_back(threadEntry);
415 * Note: we need to hold the meta lock when copying data out of
416 * the old vector, because some other thread might be
417 * destructing a ThreadLocal and writing to the elements vector
420 memcpy(reallocated, threadEntry->elements,
421 sizeof(ElementWrapper) * prevCapacity);
423 swap(reallocated, threadEntry->elements);
425 threadEntry->elementsCapacity = newCapacity;
430 #ifdef FOLLY_TLD_USE_FOLLY_TLS
431 if (prevCapacity == 0) {
432 pthread_setspecific(meta.pthreadKey_, &meta);
437 static ElementWrapper& get(uint32_t id) {
438 ThreadEntry* threadEntry = getThreadEntry();
439 if (UNLIKELY(threadEntry->elementsCapacity <= id)) {
441 assert(threadEntry->elementsCapacity > id);
443 return threadEntry->elements[id];
447 #ifdef FOLLY_TLD_USE_FOLLY_TLS
449 FOLLY_TLS ThreadEntry StaticMeta<Tag>::threadEntry_{nullptr, 0,
452 template <class Tag> StaticMeta<Tag>* StaticMeta<Tag>::inst_ = nullptr;
454 } // namespace threadlocal_detail
457 #endif /* FOLLY_DETAIL_THREADLOCALDETAIL_H_ */