2 * Copyright 2011-present Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include <glog/logging.h>
29 #include <folly/Exception.h>
30 #include <folly/Function.h>
31 #include <folly/MicroSpinLock.h>
32 #include <folly/Portability.h>
33 #include <folly/ScopeGuard.h>
34 #include <folly/SharedMutex.h>
35 #include <folly/container/Foreach.h>
36 #include <folly/detail/AtFork.h>
37 #include <folly/memory/Malloc.h>
38 #include <folly/portability/PThread.h>
40 #include <folly/detail/StaticSingletonManager.h>
42 // In general, emutls cleanup is not guaranteed to play nice with the way
43 // StaticMeta mixes direct pthread calls and the use of __thread. This has
44 // caused problems on multiple platforms so don't use __thread there.
46 // XXX: Ideally we would instead determine if emutls is in use at runtime as it
47 // is possible to configure glibc on Linux to use emutls regardless.
48 #if !FOLLY_MOBILE && !defined(__APPLE__) && !defined(_MSC_VER)
49 #define FOLLY_TLD_USE_FOLLY_TLS 1
51 #undef FOLLY_TLD_USE_FOLLY_TLS
56 enum class TLPDestructionMode { THIS_THREAD, ALL_THREADS };
57 struct AccessModeStrict {};
59 namespace threadlocal_detail {
62 * POD wrapper around an element (a void*) and an associated deleter.
63 * This must be POD, as we memset() it to 0 and memcpy() it around.
65 struct ElementWrapper {
66 using DeleterFunType = void(void*, TLPDestructionMode);
68 bool dispose(TLPDestructionMode mode) {
73 DCHECK(deleter1 != nullptr);
74 ownsDeleter ? (*deleter2)(ptr, mode) : (*deleter1)(ptr, mode);
91 auto guard = makeGuard([&] { delete p; });
92 DCHECK(ptr == nullptr);
93 DCHECK(deleter1 == nullptr);
97 deleter1 = [](void* pt, TLPDestructionMode) {
98 delete static_cast<Ptr>(pt);
105 template <class Ptr, class Deleter>
106 void set(Ptr p, const Deleter& d) {
107 auto guard = makeGuard([&] {
109 d(p, TLPDestructionMode::THIS_THREAD);
112 DCHECK(ptr == nullptr);
113 DCHECK(deleter2 == nullptr);
116 auto d2 = d; // gcc-4.8 doesn't decay types correctly in lambda captures
117 deleter2 = new std::function<DeleterFunType>(
118 [d2](void* pt, TLPDestructionMode mode) {
119 d2(static_cast<Ptr>(pt), mode);
137 DeleterFunType* deleter1;
138 std::function<DeleterFunType>* deleter2;
143 struct StaticMetaBase;
146 * Per-thread entry. Each thread using a StaticMeta object has one.
147 * This is written from the owning thread only (under the lock), read
148 * from the owning thread (no lock necessary), and read from other threads
152 ElementWrapper* elements{nullptr};
153 size_t elementsCapacity{0};
154 ThreadEntry* next{nullptr};
155 ThreadEntry* prev{nullptr};
156 StaticMetaBase* meta{nullptr};
159 constexpr uint32_t kEntryIDInvalid = std::numeric_limits<uint32_t>::max();
161 struct PthreadKeyUnregisterTester;
164 * We want to disable onThreadExit call at the end of shutdown, we don't care
165 * about leaking memory at that point.
167 * Otherwise if ThreadLocal is used in a shared library, onThreadExit may be
168 * called after dlclose().
170 * This class has one single static instance; however since it's so widely used,
171 * directly or indirectly, by so many classes, we need to take care to avoid
172 * problems stemming from the Static Initialization/Destruction Order Fiascos.
173 * Therefore this class needs to be constexpr-constructible, so as to avoid
174 * the need for this to participate in init/destruction order.
176 class PthreadKeyUnregister {
178 static constexpr size_t kMaxKeys = 1UL << 16;
180 ~PthreadKeyUnregister() {
181 // If static constructor priorities are not supported then
182 // ~PthreadKeyUnregister logic is not safe.
183 #if !defined(__APPLE__) && !defined(_MSC_VER)
186 pthread_key_delete(keys_[--size_]);
191 static void registerKey(pthread_key_t key) {
192 instance_.registerKeyImpl(key);
197 * Only one global instance should exist, hence this is private.
198 * See also the important note at the top of this class about `constexpr`
201 constexpr PthreadKeyUnregister() : lock_(), size_(0), keys_() { }
202 friend struct folly::threadlocal_detail::PthreadKeyUnregisterTester;
204 void registerKeyImpl(pthread_key_t key) {
206 if (size_ == kMaxKeys) {
207 throw std::logic_error("pthread_key limit has already been reached");
209 keys_[size_++] = key;
214 pthread_key_t keys_[kMaxKeys];
216 static PthreadKeyUnregister instance_;
219 struct StaticMetaBase {
220 // Represents an ID of a thread local object. Initially set to the maximum
221 // uint. This representation allows us to avoid a branch in accessing TLS data
222 // (because if you test capacity > id if id = maxint then the test will always
223 // fail). It allows us to keep a constexpr constructor and avoid SIOF.
226 std::atomic<uint32_t> value;
228 constexpr EntryID() : value(kEntryIDInvalid) {
231 EntryID(EntryID&& other) noexcept : value(other.value.load()) {
232 other.value = kEntryIDInvalid;
235 EntryID& operator=(EntryID&& other) {
236 assert(this != &other);
237 value = other.value.load();
238 other.value = kEntryIDInvalid;
242 EntryID(const EntryID& other) = delete;
243 EntryID& operator=(const EntryID& other) = delete;
245 uint32_t getOrInvalid() {
246 // It's OK for this to be relaxed, even though we're effectively doing
247 // double checked locking in using this value. We only care about the
248 // uniqueness of IDs, getOrAllocate does not modify any other memory
249 // this thread will use.
250 return value.load(std::memory_order_relaxed);
253 uint32_t getOrAllocate(StaticMetaBase& meta) {
254 uint32_t id = getOrInvalid();
255 if (id != kEntryIDInvalid) {
258 // The lock inside allocate ensures that a single value is allocated
259 return meta.allocate(this);
263 StaticMetaBase(ThreadEntry* (*threadEntry)(), bool strict);
265 [[noreturn]] ~StaticMetaBase() {
266 folly::assume_unreachable();
269 void push_back(ThreadEntry* t) {
271 t->prev = head_.prev;
272 head_.prev->next = t;
276 void erase(ThreadEntry* t) {
277 t->next->prev = t->prev;
278 t->prev->next = t->next;
279 t->next = t->prev = t;
282 static void onThreadExit(void* ptr);
284 uint32_t allocate(EntryID* ent);
286 void destroy(EntryID* ent);
289 * Reserve enough space in the ThreadEntry::elements for the item
292 void reserve(EntryID* id);
294 ElementWrapper& getElement(EntryID* ent);
297 std::vector<uint32_t> freeIds_;
299 SharedMutex accessAllThreadsLock_;
300 pthread_key_t pthreadKey_;
302 ThreadEntry* (*threadEntry_)();
306 // Held in a singleton to track our global instances.
307 // We have one of these per "Tag", by default one for the whole system
310 // Creating and destroying ThreadLocalPtr objects, as well as thread exit
311 // for threads that use ThreadLocalPtr objects collide on a lock inside
312 // StaticMeta; you can specify multiple Tag types to break that lock.
313 template <class Tag, class AccessMode>
314 struct StaticMeta : StaticMetaBase {
317 &StaticMeta::getThreadEntrySlow,
318 std::is_same<AccessMode, AccessModeStrict>::value) {
319 detail::AtFork::registerHandler(
321 /*prepare*/ &StaticMeta::preFork,
322 /*parent*/ &StaticMeta::onForkParent,
323 /*child*/ &StaticMeta::onForkChild);
326 static StaticMeta<Tag, AccessMode>& instance() {
327 // Leak it on exit, there's only one per process and we don't have to
328 // worry about synchronization with exiting threads.
329 /* library-local */ static auto instance =
330 detail::createGlobal<StaticMeta<Tag, AccessMode>, void>();
334 FOLLY_ALWAYS_INLINE static ElementWrapper& get(EntryID* ent) {
335 uint32_t id = ent->getOrInvalid();
336 #ifdef FOLLY_TLD_USE_FOLLY_TLS
337 static FOLLY_TLS ThreadEntry* threadEntry{};
338 static FOLLY_TLS size_t capacity{};
339 // Eliminate as many branches and as much extra code as possible in the
340 // cached fast path, leaving only one branch here and one indirection below.
341 if (UNLIKELY(capacity <= id)) {
342 getSlowReserveAndCache(ent, id, threadEntry, capacity);
345 ThreadEntry* threadEntry{};
347 getSlowReserveAndCache(ent, id, threadEntry, capacity);
349 return threadEntry->elements[id];
352 static void getSlowReserveAndCache(
355 ThreadEntry*& threadEntry,
357 auto& inst = instance();
358 threadEntry = inst.threadEntry_();
359 if (UNLIKELY(threadEntry->elementsCapacity <= id)) {
361 id = ent->getOrInvalid();
363 capacity = threadEntry->elementsCapacity;
364 assert(capacity > id);
367 static ThreadEntry* getThreadEntrySlow() {
368 auto& meta = instance();
369 auto key = meta.pthreadKey_;
370 ThreadEntry* threadEntry =
371 static_cast<ThreadEntry*>(pthread_getspecific(key));
373 #ifdef FOLLY_TLD_USE_FOLLY_TLS
374 static FOLLY_TLS ThreadEntry threadEntrySingleton;
375 threadEntry = &threadEntrySingleton;
377 threadEntry = new ThreadEntry();
379 threadEntry->meta = &meta;
380 int ret = pthread_setspecific(key, threadEntry);
381 checkPosixError(ret, "pthread_setspecific failed");
386 static void preFork() {
387 instance().lock_.lock(); // Make sure it's created
390 static void onForkParent() {
391 instance().lock_.unlock();
394 static void onForkChild() {
395 // only the current thread survives
396 instance().head_.next = instance().head_.prev = &instance().head_;
397 ThreadEntry* threadEntry = instance().threadEntry_();
398 // If this thread was in the list before the fork, add it back.
399 if (threadEntry->elementsCapacity != 0) {
400 instance().push_back(threadEntry);
402 instance().lock_.unlock();
405 } // namespace threadlocal_detail