2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/ThreadLocal.h>
24 #include <sys/types.h>
29 #include <condition_variable>
35 #include <unordered_map>
37 #include <glog/logging.h>
39 #include <folly/Baton.h>
40 #include <folly/Memory.h>
41 #include <folly/experimental/io/FsUtil.h>
42 #include <folly/portability/GTest.h>
43 #include <folly/portability/Unistd.h>
45 using namespace folly;
54 static void customDeleter(Widget* w, TLPDestructionMode mode) {
55 totalVal_ += (mode == TLPDestructionMode::ALL_THREADS) ? 1000 : 1;
59 int Widget::totalVal_ = 0;
61 TEST(ThreadLocalPtr, BasicDestructor) {
62 Widget::totalVal_ = 0;
63 ThreadLocalPtr<Widget> w;
65 w.reset(new Widget());
68 EXPECT_EQ(10, Widget::totalVal_);
71 TEST(ThreadLocalPtr, CustomDeleter1) {
72 Widget::totalVal_ = 0;
74 ThreadLocalPtr<Widget> w;
76 w.reset(new Widget(), Widget::customDeleter);
79 EXPECT_EQ(11, Widget::totalVal_);
81 EXPECT_EQ(11, Widget::totalVal_);
84 TEST(ThreadLocalPtr, CustomDeleterOwnershipTransfer) {
85 Widget::totalVal_ = 0;
87 ThreadLocalPtr<Widget> w;
88 auto deleter = [](Widget* ptr) {
89 Widget::customDeleter(ptr, TLPDestructionMode::THIS_THREAD);
91 std::unique_ptr<Widget, decltype(deleter)> source(new Widget(), deleter);
92 std::thread([&w, &source]() {
93 w.reset(std::move(source));
96 EXPECT_EQ(11, Widget::totalVal_);
98 EXPECT_EQ(11, Widget::totalVal_);
101 TEST(ThreadLocalPtr, DefaultDeleterOwnershipTransfer) {
102 Widget::totalVal_ = 0;
104 ThreadLocalPtr<Widget> w;
105 auto source = folly::make_unique<Widget>();
106 std::thread([&w, &source]() {
107 w.reset(std::move(source));
110 EXPECT_EQ(10, Widget::totalVal_);
112 EXPECT_EQ(10, Widget::totalVal_);
115 TEST(ThreadLocalPtr, resetNull) {
116 ThreadLocalPtr<int> tl;
118 tl.reset(new int(4));
119 EXPECT_TRUE(static_cast<bool>(tl));
120 EXPECT_EQ(*tl.get(), 4);
125 TEST(ThreadLocalPtr, TestRelease) {
126 Widget::totalVal_ = 0;
127 ThreadLocalPtr<Widget> w;
128 std::unique_ptr<Widget> wPtr;
129 std::thread([&w, &wPtr]() {
130 w.reset(new Widget());
133 wPtr.reset(w.release());
135 EXPECT_EQ(0, Widget::totalVal_);
137 EXPECT_EQ(10, Widget::totalVal_);
140 TEST(ThreadLocalPtr, CreateOnThreadExit) {
141 Widget::totalVal_ = 0;
142 ThreadLocal<Widget> w;
143 ThreadLocalPtr<int> tl;
147 [&](int* ptr, TLPDestructionMode /* mode */) {
149 // This test ensures Widgets allocated here are not leaked.
151 ThreadLocal<Widget> wl;
155 EXPECT_EQ(2, Widget::totalVal_);
158 // Test deleting the ThreadLocalPtr object
159 TEST(ThreadLocalPtr, CustomDeleter2) {
160 Widget::totalVal_ = 0;
163 std::condition_variable cv;
169 State state = State::START;
171 ThreadLocalPtr<Widget> w;
172 t = std::thread([&]() {
173 w.reset(new Widget(), Widget::customDeleter);
176 // Notify main thread that we're done
178 std::unique_lock<std::mutex> lock(mutex);
183 // Wait for main thread to allow us to exit
185 std::unique_lock<std::mutex> lock(mutex);
186 while (state != State::EXIT) {
192 // Wait for main thread to start (and set w.get()->val_)
194 std::unique_lock<std::mutex> lock(mutex);
195 while (state != State::DONE) {
200 // Thread started but hasn't exited yet
201 EXPECT_EQ(0, Widget::totalVal_);
203 // Destroy ThreadLocalPtr<Widget> (by letting it go out of scope)
206 EXPECT_EQ(1010, Widget::totalVal_);
208 // Allow thread to exit
210 std::unique_lock<std::mutex> lock(mutex);
216 EXPECT_EQ(1010, Widget::totalVal_);
219 TEST(ThreadLocal, BasicDestructor) {
220 Widget::totalVal_ = 0;
221 ThreadLocal<Widget> w;
222 std::thread([&w]() { w->val_ += 10; }).join();
223 EXPECT_EQ(10, Widget::totalVal_);
226 TEST(ThreadLocal, SimpleRepeatDestructor) {
227 Widget::totalVal_ = 0;
229 ThreadLocal<Widget> w;
233 ThreadLocal<Widget> w;
236 EXPECT_EQ(20, Widget::totalVal_);
239 TEST(ThreadLocal, InterleavedDestructors) {
240 Widget::totalVal_ = 0;
241 std::unique_ptr<ThreadLocal<Widget>> w;
243 const int wVersionMax = 2;
246 auto th = std::thread([&]() {
247 int wVersionPrev = 0;
250 std::lock_guard<std::mutex> g(lock);
251 if (wVersion > wVersionMax) {
254 if (wVersion > wVersionPrev) {
255 // We have a new version of w, so it should be initialized to zero
256 EXPECT_EQ((*w)->val_, 0);
260 std::lock_guard<std::mutex> g(lock);
261 wVersionPrev = wVersion;
266 FOR_EACH_RANGE(i, 0, wVersionMax) {
269 std::lock_guard<std::mutex> g(lock);
271 w.reset(new ThreadLocal<Widget>());
275 std::lock_guard<std::mutex> g(lock);
276 if (thIter > thIterPrev) {
282 std::lock_guard<std::mutex> g(lock);
283 wVersion = wVersionMax + 1;
286 EXPECT_EQ(wVersionMax * 10, Widget::totalVal_);
289 class SimpleThreadCachedInt {
292 ThreadLocal<int,NewTag> val_;
301 for (const auto& i : val_.accessAllThreads()) {
308 TEST(ThreadLocalPtr, AccessAllThreadsCounter) {
309 const int kNumThreads = 10;
310 SimpleThreadCachedInt stci;
311 std::atomic<bool> run(true);
312 std::atomic<int> totalAtomic(0);
313 std::vector<std::thread> threads;
314 for (int i = 0; i < kNumThreads; ++i) {
315 threads.push_back(std::thread([&,i]() {
317 totalAtomic.fetch_add(1);
318 while (run.load()) { usleep(100); }
321 while (totalAtomic.load() != kNumThreads) { usleep(100); }
322 EXPECT_EQ(kNumThreads, stci.read());
324 for (auto& t : threads) {
329 TEST(ThreadLocal, resetNull) {
331 tl.reset(new int(4));
332 EXPECT_EQ(*tl.get(), 4);
334 EXPECT_EQ(*tl.get(), 0);
335 tl.reset(new int(5));
336 EXPECT_EQ(*tl.get(), 5);
343 folly::ThreadLocal<int, Tag> tl;
347 TEST(ThreadLocal, Movable1) {
350 EXPECT_TRUE(a.tl.get() != b.tl.get());
354 EXPECT_TRUE(a.tl.get() != b.tl.get());
357 TEST(ThreadLocal, Movable2) {
358 std::map<int, Foo> map;
366 for (auto& m : map) {
367 tls.insert(m.second.tl.get());
370 // Make sure that we have 4 different instances of *tl
371 EXPECT_EQ(4, tls.size());
376 constexpr size_t kFillObjectSize = 300;
378 std::atomic<uint64_t> gDestroyed;
381 * Fill a chunk of memory with a unique-ish pattern that includes the thread id
382 * (so deleting one of these from another thread would cause a failure)
384 * Verify it explicitly and on destruction.
388 explicit FillObject(uint64_t idx) : idx_(idx) {
390 for (size_t i = 0; i < kFillObjectSize; ++i) {
397 for (size_t i = 0; i < kFillObjectSize; ++i) {
398 CHECK_EQ(v, data_[i]);
407 uint64_t val() const {
408 return (idx_ << 40) | uint64_t(pthread_self());
412 uint64_t data_[kFillObjectSize];
417 #if FOLLY_HAVE_STD_THIS_THREAD_SLEEP_FOR
418 TEST(ThreadLocal, Stress) {
419 constexpr size_t numFillObjects = 250;
420 std::array<ThreadLocalPtr<FillObject>, numFillObjects> objects;
422 constexpr size_t numThreads = 32;
423 constexpr size_t numReps = 20;
425 std::vector<std::thread> threads;
426 threads.reserve(numThreads);
428 for (size_t k = 0; k < numThreads; ++k) {
429 threads.emplace_back([&objects] {
430 for (size_t rep = 0; rep < numReps; ++rep) {
431 for (size_t i = 0; i < objects.size(); ++i) {
432 objects[i].reset(new FillObject(rep * objects.size() + i));
433 std::this_thread::sleep_for(std::chrono::microseconds(100));
435 for (size_t i = 0; i < objects.size(); ++i) {
442 for (auto& t : threads) {
446 EXPECT_EQ(numFillObjects * numThreads * numReps, gDestroyed);
450 // Yes, threads and fork don't mix
451 // (http://cppwisdom.quora.com/Why-threads-and-fork-dont-mix) but if you're
452 // stupid or desperate enough to try, we shouldn't stand in your way.
456 HoldsOne() : value_(1) { }
457 // Do an actual access to catch the buggy case where this == nullptr
458 int value() const { return value_; }
463 struct HoldsOneTag {};
465 ThreadLocal<HoldsOne, HoldsOneTag> ptr;
469 for (auto& p : ptr.accessAllThreads()) {
477 #ifdef FOLLY_HAVE_PTHREAD_ATFORK
478 TEST(ThreadLocal, Fork) {
479 EXPECT_EQ(1, ptr->value()); // ensure created
480 EXPECT_EQ(1, totalValue());
481 // Spawn a new thread
484 bool started = false;
485 std::condition_variable startedCond;
486 bool stopped = false;
487 std::condition_variable stoppedCond;
489 std::thread t([&] () {
490 EXPECT_EQ(1, ptr->value()); // ensure created
492 std::unique_lock<std::mutex> lock(mutex);
494 startedCond.notify_all();
497 std::unique_lock<std::mutex> lock(mutex);
499 stoppedCond.wait(lock);
505 std::unique_lock<std::mutex> lock(mutex);
507 startedCond.wait(lock);
511 EXPECT_EQ(2, totalValue());
516 int v = totalValue();
518 // exit successfully if v == 1 (one thread)
519 // diagnostic error code otherwise :)
525 } else if (pid > 0) {
528 EXPECT_EQ(pid, waitpid(pid, &status, 0));
529 EXPECT_TRUE(WIFEXITED(status));
530 EXPECT_EQ(0, WEXITSTATUS(status));
532 EXPECT_TRUE(false) << "fork failed";
535 EXPECT_EQ(2, totalValue());
538 std::unique_lock<std::mutex> lock(mutex);
540 stoppedCond.notify_all();
545 EXPECT_EQ(1, totalValue());
550 struct HoldsOneTag2 {};
552 TEST(ThreadLocal, Fork2) {
553 // A thread-local tag that was used in the parent from a *different* thread
554 // (but not the forking thread) would cause the child to hang in a
555 // ThreadLocalPtr's object destructor. Yeah.
556 ThreadLocal<HoldsOne, HoldsOneTag2> p;
558 // use tag in different thread
559 std::thread t([&p] { p.get(); });
565 ThreadLocal<HoldsOne, HoldsOneTag2> q;
569 } else if (pid > 0) {
571 EXPECT_EQ(pid, waitpid(pid, &status, 0));
572 EXPECT_TRUE(WIFEXITED(status));
573 EXPECT_EQ(0, WEXITSTATUS(status));
575 EXPECT_TRUE(false) << "fork failed";
579 // Elide this test when using any sanitizer. Otherwise, the dlopen'ed code
580 // would end up running without e.g., ASAN-initialized data structures and
581 // failing right away.
582 #if !defined FOLLY_SANITIZE_ADDRESS && !defined UNDEFINED_SANITIZER && \
583 !defined FOLLY_SANITIZE_THREAD
585 TEST(ThreadLocal, SharedLibrary) {
586 auto exe = fs::executable_path();
587 auto lib = exe.parent_path() / "lib_thread_local_test.so";
588 auto handle = dlopen(lib.string().c_str(), RTLD_LAZY);
589 EXPECT_NE(nullptr, handle);
591 typedef void (*useA_t)();
593 useA_t useA = (useA_t) dlsym(handle, "useA");
595 const char *dlsym_error = dlerror();
596 EXPECT_EQ(nullptr, dlsym_error);
600 folly::Baton<> b11, b12, b21, b22;
602 std::thread t1([&]() {
608 std::thread t2([&]() {
629 namespace folly { namespace threadlocal_detail {
630 struct PthreadKeyUnregisterTester {
631 PthreadKeyUnregister p;
632 constexpr PthreadKeyUnregisterTester() = default;
636 TEST(ThreadLocal, UnregisterClassHasConstExprCtor) {
637 folly::threadlocal_detail::PthreadKeyUnregisterTester x;