2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/IndexedMemPool.h>
18 #include <folly/portability/GMock.h>
19 #include <folly/portability/GTest.h>
20 #include <folly/portability/Semaphore.h>
21 #include <folly/portability/Unistd.h>
22 #include <folly/test/DeterministicSchedule.h>
27 using namespace folly;
28 using namespace folly::test;
29 using namespace testing;
31 TEST(IndexedMemPool, unique_ptr) {
32 typedef IndexedMemPool<size_t> Pool;
35 for (size_t i = 0; i < 100000; ++i) {
36 auto ptr = pool.allocElem();
41 std::vector<Pool::UniquePtr> leak;
43 auto ptr = pool.allocElem();
45 // good, we finally ran out
48 leak.emplace_back(std::move(ptr));
49 EXPECT_LT(leak.size(), 10000u);
53 TEST(IndexedMemPool, no_starvation) {
54 const int count = 1000;
55 const uint32_t poolSize = 100;
57 typedef DeterministicSchedule Sched;
58 Sched sched(Sched::uniform(0));
60 typedef IndexedMemPool<int,8,8,DeterministicAtomic> Pool;
63 for (auto pass = 0; pass < 10; ++pass) {
65 EXPECT_EQ(pipe(fd), 0);
67 // makes sure we wait for available nodes, rather than fail allocIndex
69 sem_init(&allocSem, 0, poolSize);
71 // this semaphore is only needed for deterministic replay, so that we
72 // always block in an Sched:: operation rather than in a read() syscall
74 sem_init(&readSem, 0, 0);
76 std::thread produce = Sched::thread([&]() {
77 for (auto i = 0; i < count; ++i) {
78 Sched::wait(&allocSem);
79 uint32_t idx = pool.allocIndex();
82 poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
84 EXPECT_EQ(write(fd[1], &idx, sizeof(idx)), sizeof(idx));
85 Sched::post(&readSem);
89 std::thread consume = Sched::thread([&]() {
90 for (auto i = 0; i < count; ++i) {
92 Sched::wait(&readSem);
93 EXPECT_EQ(read(fd[0], &idx, sizeof(idx)), sizeof(idx));
97 poolSize + (Pool::NumLocalLists - 1) * Pool::LocalListLimit);
98 EXPECT_EQ(pool[idx], i);
99 pool.recycleIndex(idx);
100 Sched::post(&allocSem);
104 Sched::join(produce);
105 Sched::join(consume);
111 TEST(IndexedMemPool, st_capacity) {
112 // only one local list => capacity is exact
113 typedef IndexedMemPool<int,1,32> Pool;
116 EXPECT_EQ(pool.capacity(), 10u);
117 EXPECT_EQ(Pool::maxIndexForCapacity(10), 10u);
118 for (auto i = 0; i < 10; ++i) {
119 EXPECT_NE(pool.allocIndex(), 0u);
121 EXPECT_EQ(pool.allocIndex(), 0u);
124 TEST(IndexedMemPool, mt_capacity) {
125 typedef IndexedMemPool<int,16,32> Pool;
128 std::thread threads[10];
129 for (auto i = 0; i < 10; ++i) {
130 threads[i] = std::thread([&]() {
131 for (auto j = 0; j < 100; ++j) {
132 uint32_t idx = pool.allocIndex();
138 for (auto i = 0; i < 10; ++i) {
142 for (auto i = 0; i < 16 * 32; ++i) {
145 EXPECT_EQ(pool.allocIndex(), 0u);
148 TEST(IndexedMemPool, locate_elem) {
149 IndexedMemPool<int> pool(1000);
151 for (auto i = 0; i < 1000; ++i) {
152 auto idx = pool.allocIndex();
153 EXPECT_FALSE(idx == 0);
154 int* elem = &pool[idx];
155 EXPECT_TRUE(idx == pool.locateElem(elem));
158 EXPECT_EQ(pool.locateElem(nullptr), 0);
161 struct NonTrivialStruct {
162 static FOLLY_TLS size_t count;
171 NonTrivialStruct(std::unique_ptr<std::string>&& arg1, size_t arg2) {
172 elem_ = arg1->length() + arg2;
176 ~NonTrivialStruct() {
181 FOLLY_TLS size_t NonTrivialStruct::count;
183 TEST(IndexedMemPool, eager_recycle) {
184 typedef IndexedMemPool<NonTrivialStruct> Pool;
187 EXPECT_EQ(NonTrivialStruct::count, 0);
189 for (size_t i = 0; i < 10; ++i) {
191 std::unique_ptr<std::string> arg{ new std::string{ "abc" } };
192 auto ptr = pool.allocElem(std::move(arg), 100);
193 EXPECT_EQ(NonTrivialStruct::count, 1);
194 EXPECT_EQ(ptr->elem_, 103);
197 EXPECT_EQ(NonTrivialStruct::count, 0);
201 TEST(IndexedMemPool, late_recycle) {
203 using Pool = IndexedMemPool<
208 IndexedMemPoolTraitsLazyRecycle<NonTrivialStruct>>;
211 EXPECT_EQ(NonTrivialStruct::count, 0);
213 for (size_t i = 0; i < 10; ++i) {
215 auto ptr = pool.allocElem();
216 EXPECT_TRUE(NonTrivialStruct::count > 0);
220 EXPECT_TRUE(NonTrivialStruct::count > 0);
223 EXPECT_EQ(NonTrivialStruct::count, 0);
226 TEST(IndexedMemPool, no_data_races) {
227 const int count = 1000;
228 const uint32_t poolSize = 100;
229 const int nthreads = 10;
231 using Pool = IndexedMemPool<int, 8, 8>;
234 std::vector<std::thread> thr(nthreads);
235 for (auto i = 0; i < nthreads; ++i) {
236 thr[i] = std::thread([&]() {
237 for (auto j = 0; j < count; ++j) {
238 uint32_t idx = pool.allocIndex();
241 idx, poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
243 pool.recycleIndex(idx);
247 for (auto& t : thr) {
252 std::atomic<int> cnum{0};
253 std::atomic<int> dnum{0};
255 TEST(IndexedMemPool, construction_destruction) {
265 std::atomic<bool> start{false};
266 std::atomic<int> started{0};
268 using Pool = IndexedMemPool<
273 IndexedMemPoolTraitsLazyRecycle<Foo>>;
279 std::vector<std::thread> thr(nthreads);
280 for (auto i = 0; i < nthreads; ++i) {
281 thr[i] = std::thread([&]() {
282 started.fetch_add(1);
283 while (!start.load())
285 for (auto j = 0; j < count; ++j) {
286 uint32_t idx = pool.allocIndex();
288 pool.recycleIndex(idx);
294 while (started.load() < nthreads)
298 for (auto& t : thr) {
303 CHECK_EQ(cnum.load(), dnum.load());
306 /// Global Traits mock. It can't be a regular (non-global) mock because we
307 /// don't have access to the instance.
309 static MockTraits* instance;
319 MOCK_METHOD2(onAllocate, void(std::string*, std::string));
320 MOCK_METHOD1(onRecycle, void(std::string*));
323 static void initialize(std::string* ptr) {
324 new (ptr) std::string();
327 static void cleanup(std::string* ptr) {
332 static void onAllocate(std::string* ptr, std::string s) {
333 instance->onAllocate(ptr, s);
336 static void onRecycle(std::string* ptr) {
337 instance->onRecycle(ptr);
342 MockTraits* MockTraits::instance;
344 using TraitsTestPool =
345 IndexedMemPool<std::string, 1, 1, std::atomic, MockTraits::Forwarder>;
347 void testTraits(TraitsTestPool& pool) {
349 const std::string* elem = nullptr;
350 EXPECT_CALL(traits, onAllocate(_, _))
351 .WillOnce(Invoke([&](std::string* s, auto) {
352 EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
355 std::string* ptr = pool.allocElem("foo").release();
356 EXPECT_EQ(ptr, elem);
359 EXPECT_CALL(traits, onRecycle(_)).WillOnce(Invoke([&](std::string* s) {
360 EXPECT_FALSE(pool.isAllocated(pool.locateElem(s)));
363 pool.recycleIndex(pool.locateElem(ptr));
364 EXPECT_EQ(ptr, elem);
367 // Test that Traits is used when both local and global lists are empty.
368 TEST(IndexedMemPool, use_traits_empty) {
369 TraitsTestPool pool(10);
373 // Test that Traits is used when allocating from a local list.
374 TEST(IndexedMemPool, use_traits_local_list) {
375 TraitsTestPool pool(10);
377 EXPECT_CALL(traits, onAllocate(_, _));
378 // Allocate and immediately recycle an element to populate the local list.
383 // Test that Traits is used when allocating from a global list.
384 TEST(IndexedMemPool, use_traits_global_list) {
385 TraitsTestPool pool(10);
387 EXPECT_CALL(traits, onAllocate(_, _)).Times(2);
388 auto global = pool.allocElem("");
389 // Allocate and immediately recycle an element to fill the local list.
391 // Recycle an element to populate the global list.
396 // Test that IndexedMemPool works with incomplete element types.
397 struct IncompleteTestElement;
398 using IncompleteTestPool = IndexedMemPool<IncompleteTestElement>;