2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_DETAIL_THREADLOCALDETAIL_H_
18 #define FOLLY_DETAIL_THREADLOCALDETAIL_H_
26 #include <boost/thread/locks.hpp>
27 #include <boost/thread/mutex.hpp>
28 #include <boost/thread/locks.hpp>
30 #include <glog/logging.h>
32 #include "folly/Foreach.h"
33 #include "folly/Malloc.h"
36 namespace threadlocal_detail {
39 * Base class for deleters.
43 virtual ~DeleterBase() { }
44 virtual void dispose(void* ptr, TLPDestructionMode mode) const = 0;
48 * Simple deleter class that calls delete on the passed-in pointer.
51 class SimpleDeleter : public DeleterBase {
53 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
54 delete static_cast<Ptr>(ptr);
59 * Custom deleter that calls a given callable.
61 template <class Ptr, class Deleter>
62 class CustomDeleter : public DeleterBase {
64 explicit CustomDeleter(Deleter d) : deleter_(d) { }
65 virtual void dispose(void* ptr, TLPDestructionMode mode) const {
66 deleter_(static_cast<Ptr>(ptr), mode);
74 * POD wrapper around an element (a void*) and an associated deleter.
75 * This must be POD, as we memset() it to 0 and memcpy() it around.
77 struct ElementWrapper {
78 void dispose(TLPDestructionMode mode) {
80 DCHECK(deleter != NULL);
81 deleter->dispose(ptr, mode);
94 DCHECK(deleter == NULL);
97 // We leak a single object here but that is ok. If we used an
98 // object directly, there is a chance that the destructor will be
99 // called on that static object before any of the ElementWrappers
100 // are disposed and that isn't so nice.
101 static auto d = new SimpleDeleter<Ptr>();
108 template <class Ptr, class Deleter>
109 void set(Ptr p, Deleter d) {
111 DCHECK(deleter == NULL);
114 deleter = new CustomDeleter<Ptr,Deleter>(d);
120 DeleterBase* deleter;
125 * Per-thread entry. Each thread using a StaticMeta object has one.
126 * This is written from the owning thread only (under the lock), read
127 * from the owning thread (no lock necessary), and read from other threads
131 ElementWrapper* elements;
132 size_t elementsCapacity;
137 // Held in a singleton to track our global instances.
138 // We have one of these per "Tag", by default one for the whole system
141 // Creating and destroying ThreadLocalPtr objects, as well as thread exit
142 // for threads that use ThreadLocalPtr objects collide on a lock inside
143 // StaticMeta; you can specify multiple Tag types to break that lock.
146 static StaticMeta<Tag>& instance() {
147 // Leak it on exit, there's only one per process and we don't have to
148 // worry about synchronization with exiting threads.
149 static bool constructed = (inst = new StaticMeta<Tag>());
150 (void)constructed; // suppress unused warning
155 std::vector<int> freeIds_;
157 pthread_key_t pthreadKey_;
160 void push_back(ThreadEntry* t) {
162 t->prev = head_.prev;
163 head_.prev->next = t;
167 void erase(ThreadEntry* t) {
168 t->next->prev = t->prev;
169 t->prev->next = t->next;
170 t->next = t->prev = t;
173 static __thread ThreadEntry threadEntry_;
174 static StaticMeta<Tag>* inst;
176 StaticMeta() : nextId_(1) {
177 head_.next = head_.prev = &head_;
178 int ret = pthread_key_create(&pthreadKey_, &onThreadExit);
184 snprintf(buf, sizeof(buf), "PTHREAD_KEYS_MAX (%d) is exceeded",
189 msg = "Out-of-memory";
192 msg = "(unknown error)";
194 throw std::runtime_error("pthread_key_create failed: " + msg);
198 LOG(FATAL) << "StaticMeta lives forever!";
201 static void onThreadExit(void* ptr) {
202 auto & meta = instance();
203 DCHECK_EQ(ptr, &meta);
204 // We wouldn't call pthread_setspecific unless we actually called get()
205 DCHECK_NE(threadEntry_.elementsCapacity, 0);
207 boost::lock_guard<boost::mutex> g(meta.lock_);
208 meta.erase(&threadEntry_);
209 // No need to hold the lock any longer; threadEntry_ is private to this
210 // thread now that it's been removed from meta.
212 FOR_EACH_RANGE(i, 0, threadEntry_.elementsCapacity) {
213 threadEntry_.elements[i].dispose(TLPDestructionMode::THIS_THREAD);
215 free(threadEntry_.elements);
216 threadEntry_.elements = NULL;
217 pthread_setspecific(meta.pthreadKey_, NULL);
220 static int create() {
222 auto & meta = instance();
223 boost::lock_guard<boost::mutex> g(meta.lock_);
224 if (!meta.freeIds_.empty()) {
225 id = meta.freeIds_.back();
226 meta.freeIds_.pop_back();
233 static void destroy(int id) {
235 auto & meta = instance();
236 // Elements in other threads that use this id.
237 std::vector<ElementWrapper> elements;
239 boost::lock_guard<boost::mutex> g(meta.lock_);
240 for (ThreadEntry* e = meta.head_.next; e != &meta.head_; e = e->next) {
241 if (id < e->elementsCapacity && e->elements[id].ptr) {
242 elements.push_back(e->elements[id]);
244 // Writing another thread's ThreadEntry from here is fine;
245 // the only other potential reader is the owning thread --
246 // from onThreadExit (which grabs the lock, so is properly
247 // synchronized with us) or from get() -- but using get() on a
248 // ThreadLocalPtr object that's being destroyed is a bug, so
249 // undefined behavior is fair game.
250 e->elements[id].ptr = NULL;
251 e->elements[id].deleter = NULL;
254 meta.freeIds_.push_back(id);
256 // Delete elements outside the lock
257 FOR_EACH(it, elements) {
258 it->dispose(TLPDestructionMode::ALL_THREADS);
260 } catch (...) { // Just in case we get a lock error or something anyway...
261 LOG(WARNING) << "Destructor discarding an exception that was thrown.";
266 * Reserve enough space in the threadEntry_.elements for the item
269 static void reserve(int id) {
270 size_t prevSize = threadEntry_.elementsCapacity;
271 size_t newSize = static_cast<size_t>((id + 5) * 1.7);
272 auto& meta = instance();
273 ElementWrapper* ptr = nullptr;
274 // Rely on jemalloc to zero the memory if possible -- maybe it knows
275 // it's already zeroed and saves us some work.
276 if (!usingJEMalloc() ||
277 prevSize < jemallocMinInPlaceExpandable ||
279 static_cast<void**>(static_cast<void*>(&threadEntry_.elements)),
280 NULL, newSize * sizeof(ElementWrapper), 0,
281 ALLOCM_NO_MOVE | ALLOCM_ZERO) != ALLOCM_SUCCESS)) {
282 // Sigh, must realloc, but we can't call realloc here, as elements is
283 // still linked in meta, so another thread might access invalid memory
284 // after realloc succeeds. We'll copy by hand and update threadEntry_
287 // Note that we're using calloc instead of malloc in order to zero
288 // the entire region. rallocm (ALLOCM_ZERO) will only zero newly
289 // allocated memory, so if a previous allocation allocated more than
290 // we requested, it's our responsibility to guarantee that the tail
291 // is zeroed. calloc() is simpler than malloc() followed by memset(),
292 // and potentially faster when dealing with a lot of memory, as
293 // it can get already-zeroed pages from the kernel.
294 if ((ptr = static_cast<ElementWrapper*>(
295 calloc(newSize, sizeof(ElementWrapper)))) != nullptr) {
296 memcpy(ptr, threadEntry_.elements, sizeof(ElementWrapper) * prevSize);
298 throw std::bad_alloc();
302 // Success, update the entry
304 boost::lock_guard<boost::mutex> g(meta.lock_);
306 meta.push_back(&threadEntry_);
310 swap(ptr, threadEntry_.elements);
312 threadEntry_.elementsCapacity = newSize;
318 pthread_setspecific(meta.pthreadKey_, &meta);
322 static ElementWrapper& get(int id) {
323 if (UNLIKELY(threadEntry_.elementsCapacity <= id)) {
326 return threadEntry_.elements[id];
330 template <class Tag> __thread ThreadEntry StaticMeta<Tag>::threadEntry_ = {0};
331 template <class Tag> StaticMeta<Tag>* StaticMeta<Tag>::inst = nullptr;
333 } // namespace threadlocal_detail
336 #endif /* FOLLY_DETAIL_THREADLOCALDETAIL_H_ */