2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/ThreadCachedArena.h>
18 #include <folly/Memory.h>
26 #include <unordered_map>
28 #include <glog/logging.h>
29 #include <gtest/gtest.h>
31 #include <folly/Range.h>
32 #include <folly/Benchmark.h>
34 using namespace folly;
40 explicit ArenaTester(ThreadCachedArena& arena) : arena_(&arena) { }
42 void allocate(size_t count, size_t maxSize);
44 void merge(ArenaTester&& other);
47 std::mutex mergeMutex_;
48 std::vector<std::pair<uint8_t, Range<uint8_t*>>> areas_;
49 ThreadCachedArena* arena_;
52 void ArenaTester::allocate(size_t count, size_t maxSize) {
53 // Allocate chunks of memory of random sizes
55 std::uniform_int_distribution<uint32_t> sizeDist(1, maxSize - 1);
57 areas_.reserve(count);
58 for (size_t i = 0; i < count; i++) {
59 size_t size = sizeDist(rnd);
60 uint8_t* p = static_cast<uint8_t*>(arena_->allocate(size));
61 areas_.emplace_back(rnd() & 0xff, Range<uint8_t*>(p, size));
64 // Fill each area with a different value, to prove that they don't overlap
65 // Fill in random order.
67 areas_.begin(), areas_.end(),
68 [&rnd] (int n) -> int {
69 return std::uniform_int_distribution<uint32_t>(0, n-1)(rnd);
72 for (auto& p : areas_) {
73 std::fill(p.second.begin(), p.second.end(), p.first);
77 void ArenaTester::verify() {
78 for (auto& p : areas_) {
79 for (auto v : p.second) {
80 EXPECT_EQ(p.first, v);
85 void ArenaTester::merge(ArenaTester&& other) {
87 std::lock_guard<std::mutex> lock(mergeMutex_);
88 std::move(other.areas_.begin(), other.areas_.end(),
89 std::back_inserter(areas_));
96 TEST(ThreadCachedArena, BlockSize) {
97 struct Align { char c; } __attribute__((aligned));
98 static const size_t alignment = alignof(Align);
99 static const size_t requestedBlockSize = 64;
101 ThreadCachedArena arena(requestedBlockSize);
102 size_t blockSize = alignment;
103 uint8_t* prev = static_cast<uint8_t*>(arena.allocate(1));
105 // Keep allocating until we're no longer one single alignment away from the
106 // previous allocation -- that's when we've gotten to the next block.
108 while ((p = static_cast<uint8_t*>(arena.allocate(1))) ==
111 blockSize += alignment;
114 VLOG(1) << "Requested block size: " << requestedBlockSize << ", actual: "
116 EXPECT_LE(requestedBlockSize, blockSize);
119 TEST(ThreadCachedArena, SingleThreaded) {
120 static const size_t requestedBlockSize = 64;
121 ThreadCachedArena arena(requestedBlockSize);
122 ArenaTester tester(arena);
123 tester.allocate(100, 100 << 10);
127 TEST(ThreadCachedArena, MultiThreaded) {
128 static const size_t requestedBlockSize = 64;
129 ThreadCachedArena arena(requestedBlockSize);
130 ArenaTester mainTester(arena);
132 // Do this twice, to catch the possibility that memory from the first
134 static const size_t numThreads = 20;
135 for (size_t i = 0; i < 2; i++) {
136 std::vector<std::thread> threads;
137 threads.reserve(numThreads);
138 for (size_t j = 0; j < numThreads; j++) {
139 threads.emplace_back(
140 [&arena, &mainTester] () {
141 ArenaTester tester(arena);
142 tester.allocate(500, 1 << 10);
144 mainTester.merge(std::move(tester));
147 for (auto& t : threads) {
155 TEST(ThreadCachedArena, StlAllocator) {
156 typedef std::unordered_map<
157 int, int, std::hash<int>, std::equal_to<int>,
158 StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
160 static const size_t requestedBlockSize = 64;
161 ThreadCachedArena arena(requestedBlockSize);
163 Map map {0, std::hash<int>(), std::equal_to<int>(),
164 StlAllocator<ThreadCachedArena, std::pair<const int, int>>(&arena)};
166 for (int i = 0; i < 1000; i++) {
170 for (int i = 0; i < 1000; i++) {
171 EXPECT_EQ(i, map[i]);
177 static const int kNumValues = 10000;
179 BENCHMARK(bmUMStandard, iters) {
180 typedef std::unordered_map<int, int> Map;
184 for (int i = 0; i < kNumValues; i++) {
190 BENCHMARK(bmUMArena, iters) {
191 typedef std::unordered_map<
192 int, int, std::hash<int>, std::equal_to<int>,
193 StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
196 ThreadCachedArena arena;
198 Map map {0, std::hash<int>(), std::equal_to<int>(),
199 StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
202 for (int i = 0; i < kNumValues; i++) {
208 BENCHMARK_DRAW_LINE()
210 BENCHMARK(bmMStandard, iters) {
211 typedef std::map<int, int> Map;
215 for (int i = 0; i < kNumValues; i++) {
221 BENCHMARK_DRAW_LINE()
223 BENCHMARK(bmMArena, iters) {
225 int, int, std::less<int>,
226 StlAllocator<ThreadCachedArena, std::pair<const int, int>>> Map;
229 ThreadCachedArena arena;
231 Map map {std::less<int>(),
232 StlAllocator<ThreadCachedArena, std::pair<const int, int>>(
235 for (int i = 0; i < kNumValues; i++) {
241 BENCHMARK_DRAW_LINE()
246 // Benchmark Iters Total t t/iter iter/sec
247 // ----------------------------------------------------------------------------
248 // Comparing benchmarks: bmUMStandard,bmUMArena
249 // + 143% bmUMStandard 1570 2.005 s 1.277 ms 782.9
250 // * bmUMArena 3817 2.003 s 524.7 us 1.861 k
251 // ----------------------------------------------------------------------------
252 // Comparing benchmarks: bmMStandard,bmMArena
253 // +79.0% bmMStandard 1197 2.009 s 1.678 ms 595.8
254 // * bmMArena 2135 2.002 s 937.6 us 1.042 k
255 // ----------------------------------------------------------------------------
257 int main(int argc, char *argv[]) {
258 testing::InitGoogleTest(&argc, argv);
259 gflags::ParseCommandLineFlags(&argc, &argv, true);
260 auto ret = RUN_ALL_TESTS();
261 if (!ret && FLAGS_benchmark) {
262 folly::runBenchmarks();