2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_THREADLOCALDETAIL_H_
18 #define FOLLY_DETAIL_THREADLOCALDETAIL_H_
27 #include <glog/logging.h>
29 #include "folly/Foreach.h"
30 #include "folly/Exception.h"
31 #include "folly/Malloc.h"
34 namespace threadlocal_detail {
37 * Base class for deleters.
41 virtual ~DeleterBase() { }
42 virtual void dispose(void* ptr, TLPDestructionMode mode) const = 0;
46 * Simple deleter class that calls delete on the passed-in pointer.
49 class SimpleDeleter : public DeleterBase {
51 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
52 delete static_cast<Ptr>(ptr);
57 * Custom deleter that calls a given callable.
59 template <class Ptr, class Deleter>
60 class CustomDeleter : public DeleterBase {
62 explicit CustomDeleter(Deleter d) : deleter_(d) { }
63 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
64 deleter_(static_cast<Ptr>(ptr), mode);
72 * POD wrapper around an element (a void*) and an associated deleter.
73 * This must be POD, as we memset() it to 0 and memcpy() it around.
75 struct ElementWrapper {
76 void dispose(TLPDestructionMode mode) {
78 DCHECK(deleter != nullptr);
79 deleter->dispose(ptr, mode);
91 DCHECK(ptr == nullptr);
92 DCHECK(deleter == nullptr);
95 // We leak a single object here but that is ok. If we used an
96 // object directly, there is a chance that the destructor will be
97 // called on that static object before any of the ElementWrappers
98 // are disposed and that isn't so nice.
99 static auto d = new SimpleDeleter<Ptr>();
106 template <class Ptr, class Deleter>
107 void set(Ptr p, Deleter d) {
108 DCHECK(ptr == nullptr);
109 DCHECK(deleter == nullptr);
112 deleter = new CustomDeleter<Ptr,Deleter>(d);
118 DeleterBase* deleter;
123 * Per-thread entry. Each thread using a StaticMeta object has one.
124 * This is written from the owning thread only (under the lock), read
125 * from the owning thread (no lock necessary), and read from other threads
129 ElementWrapper* elements;
130 size_t elementsCapacity;
135 // Held in a singleton to track our global instances.
136 // We have one of these per "Tag", by default one for the whole system
139 // Creating and destroying ThreadLocalPtr objects, as well as thread exit
140 // for threads that use ThreadLocalPtr objects collide on a lock inside
141 // StaticMeta; you can specify multiple Tag types to break that lock.
144 static StaticMeta<Tag>& instance() {
145 // Leak it on exit, there's only one per process and we don't have to
146 // worry about synchronization with exiting threads.
147 static bool constructed = (inst_ = new StaticMeta<Tag>());
148 (void)constructed; // suppress unused warning
153 std::vector<int> freeIds_;
155 pthread_key_t pthreadKey_;
158 void push_back(ThreadEntry* t) {
160 t->prev = head_.prev;
161 head_.prev->next = t;
165 void erase(ThreadEntry* t) {
166 t->next->prev = t->prev;
167 t->prev->next = t->next;
168 t->next = t->prev = t;
172 static __thread ThreadEntry threadEntry_;
174 static StaticMeta<Tag>* inst_;
176 StaticMeta() : nextId_(1) {
177 head_.next = head_.prev = &head_;
178 int ret = pthread_key_create(&pthreadKey_, &onThreadExit);
179 checkPosixError(ret, "pthread_key_create failed");
181 ret = pthread_atfork(/*prepare*/ &StaticMeta::preFork,
182 /*parent*/ &StaticMeta::onForkParent,
183 /*child*/ &StaticMeta::onForkChild);
184 checkPosixError(ret, "pthread_atfork failed");
187 LOG(FATAL) << "StaticMeta lives forever!";
190 static ThreadEntry* getThreadEntry() {
192 return &threadEntry_;
194 ThreadEntry* threadEntry =
195 static_cast<ThreadEntry*>(pthread_getspecific(inst_->pthreadKey_));
197 threadEntry = new ThreadEntry();
198 int ret = pthread_setspecific(inst_->pthreadKey_, threadEntry);
199 checkPosixError(ret, "pthread_setspecific failed");
205 static void preFork(void) {
206 instance().lock_.lock(); // Make sure it's created
209 static void onForkParent(void) {
210 inst_->lock_.unlock();
213 static void onForkChild(void) {
214 // only the current thread survives
215 inst_->head_.next = inst_->head_.prev = &inst_->head_;
216 ThreadEntry* threadEntry = getThreadEntry();
217 // If this thread was in the list before the fork, add it back.
218 if (threadEntry->elementsCapacity != 0) {
219 inst_->push_back(threadEntry);
221 inst_->lock_.unlock();
224 static void onThreadExit(void* ptr) {
225 auto & meta = instance();
227 ThreadEntry* threadEntry = getThreadEntry();
229 DCHECK_EQ(ptr, &meta);
230 DCHECK_GT(threadEntry->elementsCapacity, 0);
232 ThreadEntry* threadEntry = static_cast<ThreadEntry*>(ptr);
235 std::lock_guard<std::mutex> g(meta.lock_);
236 meta.erase(threadEntry);
237 // No need to hold the lock any longer; the ThreadEntry is private to this
238 // thread now that it's been removed from meta.
240 FOR_EACH_RANGE(i, 0, threadEntry->elementsCapacity) {
241 threadEntry->elements[i].dispose(TLPDestructionMode::THIS_THREAD);
243 free(threadEntry->elements);
244 threadEntry->elements = nullptr;
245 pthread_setspecific(meta.pthreadKey_, nullptr);
248 // Allocated in getThreadEntry(); free it
253 static int create() {
255 auto & meta = instance();
256 std::lock_guard<std::mutex> g(meta.lock_);
257 if (!meta.freeIds_.empty()) {
258 id = meta.freeIds_.back();
259 meta.freeIds_.pop_back();
266 static void destroy(size_t id) {
268 auto & meta = instance();
269 // Elements in other threads that use this id.
270 std::vector<ElementWrapper> elements;
272 std::lock_guard<std::mutex> g(meta.lock_);
273 for (ThreadEntry* e = meta.head_.next; e != &meta.head_; e = e->next) {
274 if (id < e->elementsCapacity && e->elements[id].ptr) {
275 elements.push_back(e->elements[id]);
278 * Writing another thread's ThreadEntry from here is fine;
279 * the only other potential reader is the owning thread --
280 * from onThreadExit (which grabs the lock, so is properly
281 * synchronized with us) or from get(), which also grabs
282 * the lock if it needs to resize the elements vector.
284 * We can't conflict with reads for a get(id), because
285 * it's illegal to call get on a thread local that's
288 e->elements[id].ptr = nullptr;
289 e->elements[id].deleter = nullptr;
290 e->elements[id].ownsDeleter = false;
293 meta.freeIds_.push_back(id);
295 // Delete elements outside the lock
296 FOR_EACH(it, elements) {
297 it->dispose(TLPDestructionMode::ALL_THREADS);
299 } catch (...) { // Just in case we get a lock error or something anyway...
300 LOG(WARNING) << "Destructor discarding an exception that was thrown.";
305 * Reserve enough space in the ThreadEntry::elements for the item
308 static void reserve(int id) {
309 auto& meta = instance();
310 ThreadEntry* threadEntry = getThreadEntry();
311 size_t prevCapacity = threadEntry->elementsCapacity;
312 // Growth factor < 2, see folly/docs/FBVector.md; + 5 to prevent
314 size_t newCapacity = static_cast<size_t>((id + 5) * 1.7);
315 assert(newCapacity > prevCapacity);
316 ElementWrapper* reallocated = nullptr;
318 // Need to grow. Note that we can't call realloc, as elements is
319 // still linked in meta, so another thread might access invalid memory
320 // after realloc succeeds. We'll copy by hand and update our ThreadEntry
322 if (usingJEMalloc()) {
323 bool success = false;
324 size_t newByteSize = newCapacity * sizeof(ElementWrapper);
325 size_t realByteSize = 0;
327 // Try to grow in place.
329 // Note that rallocm(ALLOCM_ZERO) will only zero newly allocated memory,
330 // even if a previous allocation allocated more than we requested.
331 // This is fine; we always use ALLOCM_ZERO with jemalloc and we
332 // always expand our allocation to the real size.
333 if (prevCapacity * sizeof(ElementWrapper) >=
334 jemallocMinInPlaceExpandable) {
335 success = (rallocm(reinterpret_cast<void**>(&threadEntry->elements),
339 ALLOCM_NO_MOVE | ALLOCM_ZERO) == ALLOCM_SUCCESS);
343 // In-place growth failed.
345 // Note that, unlike calloc,allocm(... ALLOCM_ZERO) zeros all
346 // allocated bytes (*realByteSize) and not just the requested
347 // bytes (newByteSize)
348 success = (allocm(reinterpret_cast<void**>(&reallocated),
351 ALLOCM_ZERO) == ALLOCM_SUCCESS);
355 // Expand to real size
356 assert(realByteSize / sizeof(ElementWrapper) >= newCapacity);
357 newCapacity = realByteSize / sizeof(ElementWrapper);
359 throw std::bad_alloc();
361 } else { // no jemalloc
362 // calloc() is simpler than malloc() followed by memset(), and
363 // potentially faster when dealing with a lot of memory, as it can get
364 // already-zeroed pages from the kernel.
365 reallocated = static_cast<ElementWrapper*>(
366 calloc(newCapacity, sizeof(ElementWrapper)));
368 throw std::bad_alloc();
372 // Success, update the entry
374 std::lock_guard<std::mutex> g(meta.lock_);
376 if (prevCapacity == 0) {
377 meta.push_back(threadEntry);
382 * Note: we need to hold the meta lock when copying data out of
383 * the old vector, because some other thread might be
384 * destructing a ThreadLocal and writing to the elements vector
387 memcpy(reallocated, threadEntry->elements,
388 sizeof(ElementWrapper) * prevCapacity);
390 swap(reallocated, threadEntry->elements);
392 threadEntry->elementsCapacity = newCapacity;
398 if (prevCapacity == 0) {
399 pthread_setspecific(meta.pthreadKey_, &meta);
404 static ElementWrapper& get(size_t id) {
405 ThreadEntry* threadEntry = getThreadEntry();
406 if (UNLIKELY(threadEntry->elementsCapacity <= id)) {
408 assert(threadEntry->elementsCapacity > id);
410 return threadEntry->elements[id];
415 template <class Tag> __thread ThreadEntry StaticMeta<Tag>::threadEntry_ = {0};
417 template <class Tag> StaticMeta<Tag>* StaticMeta<Tag>::inst_ = nullptr;
419 } // namespace threadlocal_detail
422 #endif /* FOLLY_DETAIL_THREADLOCALDETAIL_H_ */