2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 /* -*- Mode: C++; tab-width: 2; c-basic-offset: 2; indent-tabs-mode: nil -*- */
21 #include <folly/Memory.h>
22 #include <condition_variable>
23 #include <gtest/gtest.h>
25 #include <folly/ReadMostlySharedPtr.h>
27 using folly::ReadMostlySharedPtr;
29 // send SIGALRM to test process after this many seconds
30 const unsigned int TEST_TIMEOUT = 10;
32 class ReadMostlySharedPtrTest : public ::testing::Test {
34 ReadMostlySharedPtrTest() {
41 std::atomic<int>& counter;
43 TestObject(int value, std::atomic<int>& counter)
44 : value(value), counter(counter) {
49 assert(counter.load() > 0);
54 // One side calls requestAndWait(), the other side calls waitForRequest(),
55 // does something and calls completed().
58 void requestAndWait() {
60 std::lock_guard<std::mutex> lock(mutex);
61 assert(!is_requested);
62 assert(!is_completed);
67 std::unique_lock<std::mutex> lock(mutex);
68 cv.wait(lock, [&] { return is_completed; });
72 void waitForRequest() {
73 std::unique_lock<std::mutex> lock(mutex);
74 assert(!is_completed);
75 cv.wait(lock, [&] { return is_requested; });
80 std::lock_guard<std::mutex> lock(mutex);
88 bool is_requested = false;
89 bool is_completed = false;
90 std::condition_variable cv;
94 TEST_F(ReadMostlySharedPtrTest, BasicStores) {
95 ReadMostlySharedPtr<TestObject> ptr;
98 std::atomic<int> cnt1{0};
99 ptr.store(folly::make_unique<TestObject>(1, cnt1));
100 EXPECT_EQ(1, cnt1.load());
102 // Store 2, check that 1 is destroyed.
103 std::atomic<int> cnt2{0};
104 ptr.store(folly::make_unique<TestObject>(2, cnt2));
105 EXPECT_EQ(1, cnt2.load());
106 EXPECT_EQ(0, cnt1.load());
108 // Store nullptr, check that 2 is destroyed.
110 EXPECT_EQ(0, cnt2.load());
113 TEST_F(ReadMostlySharedPtrTest, BasicLoads) {
114 std::atomic<int> cnt2{0};
115 ReadMostlySharedPtr<TestObject>::ReadPtr x;
118 ReadMostlySharedPtr<TestObject> ptr;
120 // Check that ptr is initially nullptr.
121 EXPECT_EQ(ptr.load(), nullptr);
123 std::atomic<int> cnt1{0};
124 ptr.store(folly::make_unique<TestObject>(1, cnt1));
125 EXPECT_EQ(1, cnt1.load());
128 EXPECT_EQ(1, x->value);
130 ptr.store(folly::make_unique<TestObject>(2, cnt2));
131 EXPECT_EQ(1, cnt2.load());
132 EXPECT_EQ(1, cnt1.load());
135 EXPECT_EQ(2, x->value);
136 EXPECT_EQ(0, cnt1.load());
139 EXPECT_EQ(1, cnt2.load());
142 EXPECT_EQ(1, cnt2.load());
145 EXPECT_EQ(0, cnt2.load());
148 TEST_F(ReadMostlySharedPtrTest, LoadsFromThreads) {
149 std::atomic<int> cnt{0};
152 ReadMostlySharedPtr<TestObject> ptr;
153 Coordinator loads[7];
156 loads[0].waitForRequest();
157 EXPECT_EQ(ptr.load(), nullptr);
158 loads[0].completed();
160 loads[3].waitForRequest();
161 EXPECT_EQ(2, ptr.load()->value);
162 loads[3].completed();
164 loads[4].waitForRequest();
165 EXPECT_EQ(4, ptr.load()->value);
166 loads[4].completed();
168 loads[5].waitForRequest();
169 EXPECT_EQ(5, ptr.load()->value);
170 loads[5].completed();
174 loads[1].waitForRequest();
175 EXPECT_EQ(1, ptr.load()->value);
176 loads[1].completed();
178 loads[2].waitForRequest();
179 EXPECT_EQ(2, ptr.load()->value);
180 loads[2].completed();
182 loads[6].waitForRequest();
183 EXPECT_EQ(5, ptr.load()->value);
184 loads[6].completed();
187 loads[0].requestAndWait();
189 ptr.store(folly::make_unique<TestObject>(1, cnt));
190 loads[1].requestAndWait();
192 ptr.store(folly::make_unique<TestObject>(2, cnt));
193 loads[2].requestAndWait();
194 loads[3].requestAndWait();
196 ptr.store(folly::make_unique<TestObject>(3, cnt));
197 ptr.store(folly::make_unique<TestObject>(4, cnt));
198 loads[4].requestAndWait();
200 ptr.store(folly::make_unique<TestObject>(5, cnt));
201 loads[5].requestAndWait();
202 loads[6].requestAndWait();
204 EXPECT_EQ(1, cnt.load());
210 EXPECT_EQ(0, cnt.load());
213 TEST_F(ReadMostlySharedPtrTest, Ctor) {
214 std::atomic<int> cnt1{0};
216 ReadMostlySharedPtr<TestObject> ptr(
217 folly::make_unique<TestObject>(1, cnt1));
219 EXPECT_EQ(1, ptr.load()->value);
222 EXPECT_EQ(0, cnt1.load());
225 TEST_F(ReadMostlySharedPtrTest, ClearingCache) {
226 ReadMostlySharedPtr<TestObject> ptr;
229 std::atomic<int> cnt1{0};
230 ptr.store(folly::make_unique<TestObject>(1, cnt1));
235 // Cache the pointer for this thread.
240 // Wait for the thread to cache pointer.
242 EXPECT_EQ(1, cnt1.load());
244 // Store 2 and check that 1 is destroyed.
245 std::atomic<int> cnt2{0};
246 ptr.store(folly::make_unique<TestObject>(2, cnt2));
247 EXPECT_EQ(0, cnt1.load());
254 TEST_F(ReadMostlySharedPtrTest, SlowDestructor) {
258 Thingy(Coordinator* dtor = nullptr) : dtor(dtor) {}
262 dtor->requestAndWait();
269 ReadMostlySharedPtr<Thingy> ptr;
270 ptr.store(folly::make_unique<Thingy>(&dtor));
273 // This will block in ~Thingy().
274 ptr.store(folly::make_unique<Thingy>());
277 // Wait until store() in thread calls ~T().
278 dtor.waitForRequest();
279 // Do a store while another store() is stuck in ~T().
280 ptr.store(folly::make_unique<Thingy>());
281 // Let the other store() go.
287 TEST_F(ReadMostlySharedPtrTest, StressTest) {
288 const int ptr_count = 2;
289 const int thread_count = 5;
290 const std::chrono::milliseconds duration(100);
291 const std::chrono::milliseconds upd_delay(1);
292 const std::chrono::milliseconds respawn_delay(1);
295 std::atomic<int> value{0};
296 std::atomic<int> prev_value{0};
297 ReadMostlySharedPtr<TestObject> ptr;
302 std::atomic<bool> shutdown{false};
305 std::atomic<int> counter(0);
306 std::vector<Instance> instances(ptr_count);
307 std::vector<Thread> threads(thread_count);
308 std::atomic<int> seed(0);
310 // Threads that call load() and checking value.
311 auto thread_func = [&](int t) {
312 pthread_setname_np(pthread_self(),
313 ("load" + folly::to<std::string>(t)).c_str());
314 std::mt19937 rnd(++seed);
315 while (!threads[t].shutdown.load()) {
316 Instance& instance = instances[rnd() % instances.size()];
317 int val1 = instance.prev_value.load();
318 auto p = instance.ptr.load();
319 int val = p ? p->value : 0;
320 int val2 = instance.value.load();
321 EXPECT_LE(val1, val);
322 EXPECT_LE(val, val2);
326 for (size_t t = 0; t < threads.size(); ++t) {
327 threads[t].t = std::thread(thread_func, t);
330 std::atomic<bool> shutdown(false);
332 // Thread that calls store() occasionally.
333 std::thread update_thread([&] {
334 pthread_setname_np(pthread_self(), "store");
335 std::mt19937 rnd(++seed);
336 while (!shutdown.load()) {
337 Instance& instance = instances[rnd() % instances.size()];
338 int val = ++instance.value;
339 instance.ptr.store(folly::make_unique<TestObject>(val, counter));
340 ++instance.prev_value;
342 std::this_thread::sleep_for(upd_delay);
346 // Thread that joins and spawns load() threads occasionally.
347 std::thread respawn_thread([&] {
348 pthread_setname_np(pthread_self(), "respawn");
349 std::mt19937 rnd(++seed);
350 while (!shutdown.load()) {
351 int t = rnd() % threads.size();
352 threads[t].shutdown.store(true);
354 threads[t].shutdown.store(false);
355 threads[t].t = std::thread(thread_func, t);
358 std::this_thread::sleep_for(respawn_delay);
362 // Let all of this run for some time.
364 std::this_thread::sleep_for(duration);
366 // Shut all of this down.
367 shutdown.store(true);
369 update_thread.join();
370 respawn_thread.join();
371 for (auto& t: threads) {
372 t.shutdown.store(true);
376 for (auto& instance: instances) {
377 instance.ptr.store(nullptr);
378 EXPECT_EQ(instance.value.load(), instance.prev_value.load());
381 EXPECT_EQ(0, counter.load());