2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/IndexedMemPool.h>
18 #include <folly/test/DeterministicSchedule.h>
19 #include <folly/portability/GTest.h>
20 #include <folly/portability/Unistd.h>
24 #include <semaphore.h>
26 using namespace folly;
27 using namespace folly::test;
29 TEST(IndexedMemPool, unique_ptr) {
30 typedef IndexedMemPool<size_t> Pool;
33 for (size_t i = 0; i < 100000; ++i) {
34 auto ptr = pool.allocElem();
39 std::vector<Pool::UniquePtr> leak;
41 auto ptr = pool.allocElem();
43 // good, we finally ran out
46 leak.emplace_back(std::move(ptr));
47 EXPECT_LT(leak.size(), 10000);
51 TEST(IndexedMemPool, no_starvation) {
52 const int count = 1000;
53 const int poolSize = 100;
55 typedef DeterministicSchedule Sched;
56 Sched sched(Sched::uniform(0));
58 typedef IndexedMemPool<int,8,8,DeterministicAtomic> Pool;
61 for (auto pass = 0; pass < 10; ++pass) {
63 EXPECT_EQ(pipe(fd), 0);
65 // makes sure we wait for available nodes, rather than fail allocIndex
67 sem_init(&allocSem, 0, poolSize);
69 // this semaphore is only needed for deterministic replay, so that we
70 // always block in an Sched:: operation rather than in a read() syscall
72 sem_init(&readSem, 0, 0);
74 std::thread produce = Sched::thread([&]() {
75 for (auto i = 0; i < count; ++i) {
76 Sched::wait(&allocSem);
77 uint32_t idx = pool.allocIndex();
80 poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
82 EXPECT_EQ(write(fd[1], &idx, sizeof(idx)), sizeof(idx));
83 Sched::post(&readSem);
87 std::thread consume = Sched::thread([&]() {
88 for (auto i = 0; i < count; ++i) {
90 Sched::wait(&readSem);
91 EXPECT_EQ(read(fd[0], &idx, sizeof(idx)), sizeof(idx));
95 poolSize + (Pool::NumLocalLists - 1) * Pool::LocalListLimit);
96 EXPECT_EQ(pool[idx], i);
97 pool.recycleIndex(idx);
98 Sched::post(&allocSem);
102 Sched::join(produce);
103 Sched::join(consume);
109 TEST(IndexedMemPool, st_capacity) {
110 // only one local list => capacity is exact
111 typedef IndexedMemPool<int,1,32> Pool;
114 EXPECT_EQ(pool.capacity(), 10);
115 EXPECT_EQ(Pool::maxIndexForCapacity(10), 10);
116 for (auto i = 0; i < 10; ++i) {
117 EXPECT_NE(pool.allocIndex(), 0);
119 EXPECT_EQ(pool.allocIndex(), 0);
122 TEST(IndexedMemPool, mt_capacity) {
123 typedef IndexedMemPool<int,16,32> Pool;
126 std::thread threads[10];
127 for (auto i = 0; i < 10; ++i) {
128 threads[i] = std::thread([&]() {
129 for (auto j = 0; j < 100; ++j) {
130 uint32_t idx = pool.allocIndex();
136 for (auto i = 0; i < 10; ++i) {
140 for (auto i = 0; i < 16 * 32; ++i) {
143 EXPECT_EQ(pool.allocIndex(), 0);
146 TEST(IndexedMemPool, locate_elem) {
147 IndexedMemPool<int> pool(1000);
149 for (auto i = 0; i < 1000; ++i) {
150 auto idx = pool.allocIndex();
151 EXPECT_FALSE(idx == 0);
152 int* elem = &pool[idx];
153 EXPECT_TRUE(idx == pool.locateElem(elem));
156 EXPECT_EQ(pool.locateElem(nullptr), 0);
159 struct NonTrivialStruct {
160 static FOLLY_TLS int count;
169 NonTrivialStruct(std::unique_ptr<std::string>&& arg1, int arg2) {
170 elem_ = arg1->length() + arg2;
174 ~NonTrivialStruct() {
179 FOLLY_TLS int NonTrivialStruct::count;
181 TEST(IndexedMemPool, eager_recycle) {
182 typedef IndexedMemPool<NonTrivialStruct> Pool;
185 EXPECT_EQ(NonTrivialStruct::count, 0);
187 for (size_t i = 0; i < 10; ++i) {
189 std::unique_ptr<std::string> arg{ new std::string{ "abc" } };
190 auto ptr = pool.allocElem(std::move(arg), 100);
191 EXPECT_EQ(NonTrivialStruct::count, 1);
192 EXPECT_EQ(ptr->elem_, 103);
195 EXPECT_EQ(NonTrivialStruct::count, 0);
199 TEST(IndexedMemPool, late_recycle) {
201 typedef IndexedMemPool<NonTrivialStruct, 8, 8, std::atomic, false, false>
205 EXPECT_EQ(NonTrivialStruct::count, 0);
207 for (size_t i = 0; i < 10; ++i) {
209 auto ptr = pool.allocElem();
210 EXPECT_TRUE(NonTrivialStruct::count > 0);
214 EXPECT_TRUE(NonTrivialStruct::count > 0);
217 EXPECT_EQ(NonTrivialStruct::count, 0);