2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/ThreadCachedArena.h>
18 #include <folly/Memory.h>
26 #include <unordered_map>
28 #include <glog/logging.h>
30 #include <folly/Range.h>
31 #include <folly/Benchmark.h>
32 #include <folly/Portability.h>
33 #include <folly/portability/GTest.h>
35 using namespace folly;
41 explicit ArenaTester(ThreadCachedArena& arena) : arena_(&arena) { }
43 void allocate(size_t count, size_t maxSize);
45 void merge(ArenaTester&& other);
48 std::mutex mergeMutex_;
49 std::vector<std::pair<uint8_t, Range<uint8_t*>>> areas_;
50 ThreadCachedArena* arena_;
53 void ArenaTester::allocate(size_t count, size_t maxSize) {
54 // Allocate chunks of memory of random sizes
56 std::uniform_int_distribution<uint32_t> sizeDist(1, maxSize - 1);
58 areas_.reserve(count);
59 for (size_t i = 0; i < count; i++) {
60 size_t size = sizeDist(rnd);
61 uint8_t* p = static_cast<uint8_t*>(arena_->allocate(size));
62 areas_.emplace_back(rnd() & 0xff, Range<uint8_t*>(p, size));
65 // Fill each area with a different value, to prove that they don't overlap
66 // Fill in random order.
68 areas_.begin(), areas_.end(),
69 [&rnd] (int n) -> int {
70 return std::uniform_int_distribution<uint32_t>(0, n-1)(rnd);
73 for (auto& p : areas_) {
74 std::fill(p.second.begin(), p.second.end(), p.first);
78 void ArenaTester::verify() {
79 for (auto& p : areas_) {
80 for (auto v : p.second) {
81 EXPECT_EQ(p.first, v);
86 void ArenaTester::merge(ArenaTester&& other) {
88 std::lock_guard<std::mutex> lock(mergeMutex_);
89 std::move(other.areas_.begin(), other.areas_.end(),
90 std::back_inserter(areas_));
97 TEST(ThreadCachedArena, BlockSize) {
98 static const size_t alignment = alignof(std::max_align_t);
99 static const size_t requestedBlockSize = 64;
101 ThreadCachedArena arena(requestedBlockSize);
102 size_t blockSize = alignment;
103 uint8_t* prev = static_cast<uint8_t*>(arena.allocate(1));
105 // Keep allocating until we're no longer one single alignment away from the
106 // previous allocation -- that's when we've gotten to the next block.
108 while ((p = static_cast<uint8_t*>(arena.allocate(1))) ==
111 blockSize += alignment;
114 VLOG(1) << "Requested block size: " << requestedBlockSize << ", actual: "
116 EXPECT_LE(requestedBlockSize, blockSize);
119 TEST(ThreadCachedArena, SingleThreaded) {
120 static const size_t requestedBlockSize = 64;
121 ThreadCachedArena arena(requestedBlockSize);
122 EXPECT_EQ(arena.totalSize(), sizeof(ThreadCachedArena));
124 ArenaTester tester(arena);
125 tester.allocate(100, 100 << 10);
128 EXPECT_GT(arena.totalSize(), sizeof(ThreadCachedArena));
131 TEST(ThreadCachedArena, MultiThreaded) {
132 static const size_t requestedBlockSize = 64;
133 ThreadCachedArena arena(requestedBlockSize);
134 ArenaTester mainTester(arena);
136 // Do this twice, to catch the possibility that memory from the first
138 static const size_t numThreads = 20;
139 for (size_t i = 0; i < 2; i++) {
140 std::vector<std::thread> threads;
141 threads.reserve(numThreads);
142 for (size_t j = 0; j < numThreads; j++) {
143 threads.emplace_back(
144 [&arena, &mainTester] () {
145 ArenaTester tester(arena);
146 tester.allocate(500, 1 << 10);
148 mainTester.merge(std::move(tester));
151 for (auto& t : threads) {
159 TEST(ThreadCachedArena, StlAllocator) {
160 typedef std::unordered_map<
161 int, int, std::hash<int>, std::equal_to<int>,
162 StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
164 static const size_t requestedBlockSize = 64;
165 ThreadCachedArena arena(requestedBlockSize);
167 Map map {0, std::hash<int>(), std::equal_to<int>(),
168 StlAllocator<ThreadCachedArena, std::pair<const int, int>>(&arena)};
170 for (int i = 0; i < 1000; i++) {
174 for (int i = 0; i < 1000; i++) {
175 EXPECT_EQ(i, map[i]);
181 static const int kNumValues = 10000;
183 BENCHMARK(bmUMStandard, iters) {
184 typedef std::unordered_map<int, int> Map;
188 for (int i = 0; i < kNumValues; i++) {
194 BENCHMARK(bmUMArena, iters) {
195 typedef std::unordered_map<
196 int, int, std::hash<int>, std::equal_to<int>,
197 StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
200 ThreadCachedArena arena;
202 Map map {0, std::hash<int>(), std::equal_to<int>(),
203 StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
206 for (int i = 0; i < kNumValues; i++) {
212 BENCHMARK_DRAW_LINE()
214 BENCHMARK(bmMStandard, iters) {
215 typedef std::map<int, int> Map;
219 for (int i = 0; i < kNumValues; i++) {
225 BENCHMARK_DRAW_LINE()
227 BENCHMARK(bmMArena, iters) {
229 int, int, std::less<int>,
230 StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
233 ThreadCachedArena arena;
235 Map map {std::less<int>(),
236 StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
239 for (int i = 0; i < kNumValues; i++) {
245 BENCHMARK_DRAW_LINE()
250 // Benchmark Iters Total t t/iter iter/sec
251 // ----------------------------------------------------------------------------
252 // Comparing benchmarks: bmUMStandard,bmUMArena
253 // + 143% bmUMStandard 1570 2.005 s 1.277 ms 782.9
254 // * bmUMArena 3817 2.003 s 524.7 us 1.861 k
255 // ----------------------------------------------------------------------------
256 // Comparing benchmarks: bmMStandard,bmMArena
257 // +79.0% bmMStandard 1197 2.009 s 1.678 ms 595.8
258 // * bmMArena 2135 2.002 s 937.6 us 1.042 k
259 // ----------------------------------------------------------------------------
261 int main(int argc, char *argv[]) {
262 testing::InitGoogleTest(&argc, argv);
263 gflags::ParseCommandLineFlags(&argc, &argv, true);
264 auto ret = RUN_ALL_TESTS();
265 if (!ret && FLAGS_benchmark) {
266 folly::runBenchmarks();