2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/AtomicHashMap.h>
19 #include <glog/logging.h>
20 #include <gtest/gtest.h>
25 #include <folly/Benchmark.h>
26 #include <folly/Conv.h>
30 using folly::AtomicHashMap;
31 using folly::AtomicHashArray;
34 DEFINE_double(targetLoadFactor, 0.75, "Target memory utilization fraction.");
35 DEFINE_double(maxLoadFactor, 0.80, "Max before growth.");
36 DEFINE_int32(numThreads, 8, "Threads to use for concurrency tests.");
37 DEFINE_int64(numBMElements, 12 * 1000 * 1000, "Size of maps for benchmarks.");
39 const double LF = FLAGS_maxLoadFactor / FLAGS_targetLoadFactor;
40 const int maxBMElements = int(FLAGS_numBMElements * LF); // hit our target LF.
42 static int64_t nowInUsec() {
45 return int64_t(tv.tv_sec) * 1000 * 1000 + tv.tv_usec;
48 TEST(Ahm, BasicStrings) {
49 typedef AtomicHashMap<int64_t,string> AHM;
51 EXPECT_TRUE(myMap.begin() == myMap.end());
53 for (int i = 0; i < 100; ++i) {
54 myMap.insert(make_pair(i, folly::to<string>(i)));
56 for (int i = 0; i < 100; ++i) {
57 EXPECT_EQ(myMap.find(i)->second, folly::to<string>(i));
60 myMap.insert(std::make_pair(999, "A"));
61 myMap.insert(std::make_pair(999, "B"));
62 EXPECT_EQ(myMap.find(999)->second, "A"); // shouldn't have overwritten
63 myMap.find(999)->second = "B";
64 myMap.find(999)->second = "C";
65 EXPECT_EQ(myMap.find(999)->second, "C");
66 EXPECT_EQ(myMap.find(999)->first, 999);
70 TEST(Ahm, BasicNoncopyable) {
71 typedef AtomicHashMap<int64_t,std::unique_ptr<int>> AHM;
73 EXPECT_TRUE(myMap.begin() == myMap.end());
75 for (int i = 0; i < 50; ++i) {
76 myMap.insert(make_pair(i, std::unique_ptr<int>(new int(i))));
78 for (int i = 50; i < 100; ++i) {
79 myMap.insert(i, std::unique_ptr<int>(new int (i)));
81 for (int i = 0; i < 100; ++i) {
82 EXPECT_EQ(*(myMap.find(i)->second), i);
84 for (int i = 0; i < 100; i+=4) {
87 for (int i = 0; i < 100; i+=4) {
88 EXPECT_EQ(myMap.find(i), myMap.end());
93 typedef int32_t ValueT;
95 typedef AtomicHashMap<KeyT,ValueT> AHMapT;
96 typedef AHMapT::value_type RecordT;
97 typedef AtomicHashArray<KeyT,ValueT> AHArrayT;
99 AHArrayT::Config config;
100 static AHArrayT::SmartPtr globalAHA(nullptr);
101 static std::unique_ptr<AHMapT> globalAHM;
103 // Generate a deterministic value based on an input key
104 static int genVal(int key) {
109 VLOG(1) << "Overhead: " << sizeof(AHArrayT) << " (array) " <<
110 sizeof(AHMapT) + sizeof(AHArrayT) << " (map/set) Bytes.";
111 uint64_t numEntries = 10000;
112 float sizeFactor = 0.46;
114 std::unique_ptr<AHMapT> m(new AHMapT(int(numEntries * sizeFactor), config));
116 // load map - make sure we succeed and the index is accurate
118 for (uint64_t i = 0; i < numEntries; i++) {
119 auto ret = m->insert(RecordT(i, genVal(i)));
120 success &= ret.second;
121 success &= (m->findAt(ret.first.getIndex())->second == genVal(i));
123 // Overwrite vals to make sure there are no dups
124 // Every insert should fail because the keys are already in the map.
126 for (uint64_t i = 0; i < numEntries; i++) {
127 auto ret = m->insert(RecordT(i, genVal(i * 2)));
128 success &= (ret.second == false); // fail on collision
129 success &= (ret.first->second == genVal(i)); // return the previous value
130 success &= (m->findAt(ret.first.getIndex())->second == genVal(i));
132 EXPECT_TRUE(success);
135 size_t cap = m->capacity();
137 EXPECT_GT(m->numSubMaps(), 1); // make sure we grew
139 EXPECT_EQ(m->size(), numEntries);
140 for (size_t i = 0; i < numEntries; i++) {
141 success &= (m->find(i)->second == genVal(i));
143 EXPECT_TRUE(success);
148 AHMapT::const_iterator retIt;
149 for (int32_t i = 0; i < int32_t(numEntries); i++) {
151 retIt = m->findAt(retIt.getIndex());
152 success &= (retIt->second == genVal(i));
153 // We use a uint32_t index so that this comparison is between two
154 // variables of the same type.
155 success &= (retIt->first == i);
157 EXPECT_TRUE(success);
159 // Try modifying value
160 m->find(8)->second = 5309;
161 EXPECT_EQ(m->find(8)->second, 5309);
166 for (uint64_t i = 0; i < numEntries / 2; i++) {
167 success &= m->insert(RecordT(i, genVal(i))).second;
169 EXPECT_TRUE(success);
170 EXPECT_EQ(m->size(), numEntries / 2);
173 TEST(Ahm, iterator) {
174 int numEntries = 10000;
175 float sizeFactor = .46;
176 std::unique_ptr<AHMapT> m(new AHMapT(int(numEntries * sizeFactor), config));
178 // load map - make sure we succeed and the index is accurate
179 for (int i = 0; i < numEntries; i++) {
180 m->insert(RecordT(i, genVal(i)));
186 success &= (it->second == genVal(it->first));
189 EXPECT_TRUE(success);
190 EXPECT_EQ(count, numEntries);
195 // Note: Unfortunately can't currently put a std::atomic<int64_t> in
196 // the value in ahm since it doesn't support types that are both non-copy
197 // and non-move constructible yet.
198 AtomicHashMap<int64_t,int64_t> ahm;
201 explicit Counters(size_t numCounters) : ahm(numCounters) {}
203 void increment(int64_t obj_id) {
204 auto ret = ahm.insert(std::make_pair(obj_id, 1));
206 // obj_id already exists, increment count
207 __sync_fetch_and_add(&ret.first->second, 1);
211 int64_t getValue(int64_t obj_id) {
212 auto ret = ahm.find(obj_id);
213 return ret != ahm.end() ? ret->second : 0;
216 // export the counters without blocking increments
219 ret.reserve(ahm.size() * 32);
220 for (const auto& e : ahm) {
221 ret += folly::to<string>(
222 " [", e.first, ":", e.second, "]\n");
229 // If you get an error "terminate called without an active exception", there
230 // might be too many threads getting created - decrease numKeys and/or mult.
232 const int numKeys = 10;
235 vector<int64_t> keys;
236 FOR_EACH_RANGE(i, 1, numKeys) {
239 vector<std::thread> threads;
240 for (auto key : keys) {
241 FOR_EACH_RANGE(i, 0, key * mult) {
242 threads.push_back(std::thread([&, key] { c.increment(key); }));
245 for (auto& t : threads) {
248 string str = c.toString();
249 for (auto key : keys) {
250 int val = key * mult;
251 EXPECT_EQ(val, c.getValue(key));
252 EXPECT_NE(string::npos, str.find(folly::to<string>("[",key,":",val,"]")));
259 explicit Integer(KeyT v = 0) : v_(v) {}
261 Integer& operator=(const Integer& a) {
262 static bool throwException_ = false;
263 throwException_ = !throwException_;
264 if (throwException_) {
271 bool operator==(const Integer& a) const { return v_ == a.v_; }
277 TEST(Ahm, map_exception_safety) {
278 typedef AtomicHashMap<KeyT,Integer> MyMapT;
280 int numEntries = 10000;
281 float sizeFactor = 0.46;
282 std::unique_ptr<MyMapT> m(new MyMapT(int(numEntries * sizeFactor)));
286 for (int i = 0; i < numEntries; i++) {
288 m->insert(i, Integer(genVal(i)));
289 success &= (m->find(i)->second == Integer(genVal(i)));
292 success &= !m->count(i);
295 EXPECT_EQ(count, m->size());
296 EXPECT_TRUE(success);
299 TEST(Ahm, basicErase) {
300 size_t numEntries = 3000;
302 std::unique_ptr<AHMapT> s(new AHMapT(numEntries, config));
303 // Iterate filling up the map and deleting all keys a few times
304 // to test more than one subMap.
305 for (int iterations = 0; iterations < 4; ++iterations) {
306 // Testing insertion of keys
308 for (size_t i = 0; i < numEntries; ++i) {
309 success &= !(s->count(i));
310 auto ret = s->insert(RecordT(i, i));
311 success &= s->count(i);
312 success &= ret.second;
314 EXPECT_TRUE(success);
315 EXPECT_EQ(s->size(), numEntries);
317 // Delete every key in the map and verify that the key is gone and the the
320 for (size_t i = 0; i < numEntries; ++i) {
321 success &= s->erase(i);
322 success &= (s->size() == numEntries - 1 - i);
323 success &= !(s->count(i));
324 success &= !(s->erase(i));
326 EXPECT_TRUE(success);
328 VLOG(1) << "Final number of subMaps = " << s->numSubMaps();
333 inline KeyT randomizeKey(int key) {
334 // We deterministically randomize the key to more accurately simulate
335 // real-world usage, and to avoid pathalogical performance patterns (e.g.
336 // those related to __gnu_cxx::hash<int64_t>()(1) == 1).
338 // Use a hash function we don't normally use for ints to avoid interactions.
339 return folly::hash::jenkins_rev_mix32(key);
342 int numOpsPerThread = 0;
344 void* insertThread(void* jj) {
345 int64_t j = (int64_t) jj;
346 for (int i = 0; i < numOpsPerThread; ++i) {
347 KeyT key = randomizeKey(i + j * numOpsPerThread);
348 globalAHM->insert(key, genVal(key));
353 void* insertThreadArr(void* jj) {
354 int64_t j = (int64_t) jj;
355 for (int i = 0; i < numOpsPerThread; ++i) {
356 KeyT key = randomizeKey(i + j * numOpsPerThread);
357 globalAHA->insert(std::make_pair(key, genVal(key)));
362 std::atomic<bool> runThreadsCreatedAllThreads;
363 void runThreads(void *(*thread)(void*), int numThreads, void **statuses) {
364 folly::BenchmarkSuspender susp;
365 runThreadsCreatedAllThreads.store(false);
366 vector<pthread_t> threadIds;
367 for (int64_t j = 0; j < numThreads; j++) {
369 if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
370 LOG(ERROR) << "Could not start thread";
372 threadIds.push_back(tid);
377 runThreadsCreatedAllThreads.store(true);
378 for (size_t i = 0; i < threadIds.size(); ++i) {
379 pthread_join(threadIds[i], statuses == nullptr ? nullptr : &statuses[i]);
383 void runThreads(void *(*thread)(void*)) {
384 runThreads(thread, FLAGS_numThreads, nullptr);
389 TEST(Ahm, collision_test) {
390 const int numInserts = 1000000 / 4;
392 // Doing the same number on each thread so we collide.
393 numOpsPerThread = numInserts;
395 float sizeFactor = 0.46;
396 int entrySize = sizeof(KeyT) + sizeof(ValueT);
397 VLOG(1) << "Testing " << numInserts << " unique " << entrySize <<
398 " Byte entries replicated in " << FLAGS_numThreads <<
399 " threads with " << FLAGS_maxLoadFactor * 100.0 << "% max load factor.";
401 globalAHM.reset(new AHMapT(int(numInserts * sizeFactor), config));
403 size_t sizeInit = globalAHM->capacity();
404 VLOG(1) << " Initial capacity: " << sizeInit;
406 double start = nowInUsec();
407 runThreads([](void*) -> void* { // collisionInsertThread
408 for (int i = 0; i < numOpsPerThread; ++i) {
409 KeyT key = randomizeKey(i);
410 globalAHM->insert(key, genVal(key));
414 double elapsed = nowInUsec() - start;
416 size_t finalCap = globalAHM->capacity();
417 size_t sizeAHM = globalAHM->size();
418 VLOG(1) << elapsed/sizeAHM << " usec per " << FLAGS_numThreads <<
419 " duplicate inserts (atomic).";
420 VLOG(1) << " Final capacity: " << finalCap << " in " <<
421 globalAHM->numSubMaps() << " sub maps (" <<
422 sizeAHM * 100 / finalCap << "% load factor, " <<
423 (finalCap - sizeInit) * 100 / sizeInit << "% growth).";
426 EXPECT_EQ(sizeAHM, numInserts);
429 for (int i = 0; i < numInserts; ++i) {
430 KeyT key = randomizeKey(i);
431 success &= (globalAHM->find(key)->second == genVal(key));
433 EXPECT_TRUE(success);
435 // check colliding finds
437 runThreads([](void*) -> void* { // collisionFindThread
439 for (int i = 0; i < numOpsPerThread; ++i) {
440 globalAHM->find(key);
445 elapsed = nowInUsec() - start;
447 VLOG(1) << elapsed/sizeAHM << " usec per " << FLAGS_numThreads <<
448 " duplicate finds (atomic).";
453 const int kInsertPerThread = 100000;
454 int raceFinalSizeEstimate;
456 void* raceIterateThread(void* jj) {
457 int64_t j = (int64_t) jj;
460 AHMapT::iterator it = globalAHM->begin();
461 AHMapT::iterator end = globalAHM->end();
462 for (; it != end; ++it) {
464 if (count > raceFinalSizeEstimate) {
465 EXPECT_FALSE("Infinite loop in iterator.");
472 void* raceInsertRandomThread(void* jj) {
473 int64_t j = (int64_t) jj;
474 for (int i = 0; i < kInsertPerThread; ++i) {
476 globalAHM->insert(key, genVal(key));
483 // Test for race conditions when inserting and iterating at the same time and
484 // creating multiple submaps.
485 TEST(Ahm, race_insert_iterate_thread_test) {
486 const int kInsertThreads = 20;
487 const int kIterateThreads = 20;
488 raceFinalSizeEstimate = kInsertThreads * kInsertPerThread;
490 VLOG(1) << "Testing iteration and insertion with " << kInsertThreads
491 << " threads inserting and " << kIterateThreads << " threads iterating.";
493 globalAHM.reset(new AHMapT(raceFinalSizeEstimate / 9, config));
495 vector<pthread_t> threadIds;
496 for (int64_t j = 0; j < kInsertThreads + kIterateThreads; j++) {
498 void *(*thread)(void*) =
499 (j < kInsertThreads ? raceInsertRandomThread : raceIterateThread);
500 if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
501 LOG(ERROR) << "Could not start thread";
503 threadIds.push_back(tid);
506 for (size_t i = 0; i < threadIds.size(); ++i) {
507 pthread_join(threadIds[i], nullptr);
509 VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps";
510 VLOG(1) << "Final size of map " << globalAHM->size();
515 const int kTestEraseInsertions = 200000;
516 std::atomic<int32_t> insertedLevel;
518 void* testEraseInsertThread(void*) {
519 for (int i = 0; i < kTestEraseInsertions; ++i) {
520 KeyT key = randomizeKey(i);
521 globalAHM->insert(key, genVal(key));
522 insertedLevel.store(i, std::memory_order_release);
524 insertedLevel.store(kTestEraseInsertions, std::memory_order_release);
528 void* testEraseEraseThread(void*) {
529 for (int i = 0; i < kTestEraseInsertions; ++i) {
531 * Make sure that we don't get ahead of the insert thread, because
532 * part of the condition for this unit test succeeding is that the
535 * Note, there is a subtle case here when a new submap is
536 * allocated: the erasing thread might get 0 from count(key)
537 * because it hasn't seen numSubMaps_ update yet. To avoid this
538 * race causing problems for the test (it's ok for real usage), we
539 * lag behind the inserter by more than just element.
544 currentLevel = insertedLevel.load(std::memory_order_acquire);
545 if (currentLevel == kTestEraseInsertions) currentLevel += lag + 1;
546 } while (currentLevel - lag < i);
548 KeyT key = randomizeKey(i);
549 while (globalAHM->count(key)) {
550 if (globalAHM->erase(key)) {
560 // Here we have a single thread inserting some values, and several threads
561 // racing to delete the values in the order they were inserted.
562 TEST(Ahm, thread_erase_insert_race) {
563 const int kInsertThreads = 1;
564 const int kEraseThreads = 10;
566 VLOG(1) << "Testing insertion and erase with " << kInsertThreads
567 << " thread inserting and " << kEraseThreads << " threads erasing.";
569 globalAHM.reset(new AHMapT(kTestEraseInsertions / 4, config));
571 vector<pthread_t> threadIds;
572 for (int64_t j = 0; j < kInsertThreads + kEraseThreads; j++) {
574 void *(*thread)(void*) =
575 (j < kInsertThreads ? testEraseInsertThread : testEraseEraseThread);
576 if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
577 LOG(ERROR) << "Could not start thread";
579 threadIds.push_back(tid);
582 for (size_t i = 0; i < threadIds.size(); i++) {
583 pthread_join(threadIds[i], nullptr);
586 EXPECT_TRUE(globalAHM->empty());
587 EXPECT_EQ(globalAHM->size(), 0);
589 VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps";
592 // Repro for T#483734: Duplicate AHM inserts due to incorrect AHA return value.
593 typedef AtomicHashArray<int32_t, int32_t> AHA;
594 AHA::Config configRace;
595 auto atomicHashArrayInsertRaceArray = AHA::create(2, configRace);
596 void* atomicHashArrayInsertRaceThread(void* j) {
597 AHA* arr = atomicHashArrayInsertRaceArray.get();
598 uintptr_t numInserted = 0;
599 while (!runThreadsCreatedAllThreads.load());
600 for (int i = 0; i < 2; i++) {
601 if (arr->insert(RecordT(randomizeKey(i), 0)).first != arr->end()) {
605 pthread_exit((void *) numInserted);
607 TEST(Ahm, atomic_hash_array_insert_race) {
608 AHA* arr = atomicHashArrayInsertRaceArray.get();
609 int numIterations = 50000, FLAGS_numThreads = 4;
610 void* statuses[FLAGS_numThreads];
611 for (int i = 0; i < numIterations; i++) {
613 runThreads(atomicHashArrayInsertRaceThread, FLAGS_numThreads, statuses);
614 EXPECT_GE(arr->size(), 1);
615 for (int j = 0; j < FLAGS_numThreads; j++) {
616 EXPECT_EQ(arr->size(), uintptr_t(statuses[j]));
621 // Repro for T#5841499. Race between erase() and find() on the same key.
622 TEST(Ahm, erase_find_race) {
623 const uint64_t limit = 10000;
624 AtomicHashMap<uint64_t, uint64_t> map(limit + 10);
625 std::atomic<uint64_t> key {1};
627 // Invariant: all values are equal to their keys.
628 // At any moment there is one or two consecutive keys in the map.
630 std::thread write_thread([&]() {
636 map.insert(k + 1, k + 1);
641 std::thread read_thread([&]() {
643 uint64_t k = key.load();
648 auto it = map.find(k);
649 if (it != map.end()) {
650 ASSERT_EQ(k, it->second);
659 // Repro for a bug when iterator didn't skip empty submaps.
660 TEST(Ahm, iterator_skips_empty_submaps) {
661 AtomicHashMap<uint64_t, uint64_t>::Config config;
662 config.growthFactor = 1;
664 AtomicHashMap<uint64_t, uint64_t> map(1, config);
672 auto it = map.find(1);
674 ASSERT_NE(map.end(), it);
675 ASSERT_EQ(1, it->first);
676 ASSERT_EQ(1, it->second);
680 ASSERT_NE(map.end(), it);
681 ASSERT_EQ(3, it->first);
682 ASSERT_EQ(3, it->second);
685 ASSERT_EQ(map.end(), it);
690 void loadGlobalAha() {
691 std::cout << "loading global AHA with " << FLAGS_numThreads
693 uint64_t start = nowInUsec();
694 globalAHA = AHArrayT::create(maxBMElements, config);
695 numOpsPerThread = FLAGS_numBMElements / FLAGS_numThreads;
696 CHECK_EQ(0, FLAGS_numBMElements % FLAGS_numThreads) <<
697 "kNumThreads must evenly divide kNumInserts.";
698 runThreads(insertThreadArr);
699 uint64_t elapsed = nowInUsec() - start;
700 std::cout << " took " << elapsed / 1000 << " ms (" <<
701 (elapsed * 1000 / FLAGS_numBMElements) << " ns/insert).\n";
702 EXPECT_EQ(globalAHA->size(), FLAGS_numBMElements);
705 void loadGlobalAhm() {
706 std::cout << "loading global AHM with " << FLAGS_numThreads
708 uint64_t start = nowInUsec();
709 globalAHM.reset(new AHMapT(maxBMElements, config));
710 numOpsPerThread = FLAGS_numBMElements / FLAGS_numThreads;
711 runThreads(insertThread);
712 uint64_t elapsed = nowInUsec() - start;
713 std::cout << " took " << elapsed / 1000 << " ms (" <<
714 (elapsed * 1000 / FLAGS_numBMElements) << " ns/insert).\n";
715 EXPECT_EQ(globalAHM->size(), FLAGS_numBMElements);
720 BENCHMARK(st_aha_find, iters) {
721 CHECK_LE(iters, FLAGS_numBMElements);
722 for (size_t i = 0; i < iters; i++) {
723 KeyT key = randomizeKey(i);
724 folly::doNotOptimizeAway(globalAHA->find(key)->second);
728 BENCHMARK(st_ahm_find, iters) {
729 CHECK_LE(iters, FLAGS_numBMElements);
730 for (size_t i = 0; i < iters; i++) {
731 KeyT key = randomizeKey(i);
732 folly::doNotOptimizeAway(globalAHM->find(key)->second);
736 BENCHMARK_DRAW_LINE()
738 BENCHMARK(mt_ahm_miss, iters) {
739 CHECK_LE(iters, FLAGS_numBMElements);
740 numOpsPerThread = iters / FLAGS_numThreads;
741 runThreads([](void* jj) -> void* {
742 int64_t j = (int64_t) jj;
743 while (!runThreadsCreatedAllThreads.load());
744 for (int i = 0; i < numOpsPerThread; ++i) {
745 KeyT key = i + j * numOpsPerThread * 100;
746 folly::doNotOptimizeAway(globalAHM->find(key) == globalAHM->end());
752 BENCHMARK(st_ahm_miss, iters) {
753 CHECK_LE(iters, FLAGS_numBMElements);
754 for (size_t i = 0; i < iters; i++) {
755 KeyT key = randomizeKey(i + iters * 100);
756 folly::doNotOptimizeAway(globalAHM->find(key) == globalAHM->end());
760 BENCHMARK(mt_ahm_find_insert_mix, iters) {
761 CHECK_LE(iters, FLAGS_numBMElements);
762 numOpsPerThread = iters / FLAGS_numThreads;
763 runThreads([](void* jj) -> void* {
764 int64_t j = (int64_t) jj;
765 while (!runThreadsCreatedAllThreads.load());
766 for (int i = 0; i < numOpsPerThread; ++i) {
767 if (i % 128) { // ~1% insert mix
768 KeyT key = randomizeKey(i + j * numOpsPerThread);
769 folly::doNotOptimizeAway(globalAHM->find(key)->second);
771 KeyT key = randomizeKey(i + j * numOpsPerThread * 100);
772 globalAHM->insert(key, genVal(key));
779 BENCHMARK(mt_aha_find, iters) {
780 CHECK_LE(iters, FLAGS_numBMElements);
781 numOpsPerThread = iters / FLAGS_numThreads;
782 runThreads([](void* jj) -> void* {
783 int64_t j = (int64_t) jj;
784 while (!runThreadsCreatedAllThreads.load());
785 for (int i = 0; i < numOpsPerThread; ++i) {
786 KeyT key = randomizeKey(i + j * numOpsPerThread);
787 folly::doNotOptimizeAway(globalAHA->find(key)->second);
793 BENCHMARK(mt_ahm_find, iters) {
794 CHECK_LE(iters, FLAGS_numBMElements);
795 numOpsPerThread = iters / FLAGS_numThreads;
796 runThreads([](void* jj) -> void* {
797 int64_t j = (int64_t) jj;
798 while (!runThreadsCreatedAllThreads.load());
799 for (int i = 0; i < numOpsPerThread; ++i) {
800 KeyT key = randomizeKey(i + j * numOpsPerThread);
801 folly::doNotOptimizeAway(globalAHM->find(key)->second);
808 BENCHMARK(st_baseline_modulus_and_random, iters) {
809 for (size_t i = 0; i < iters; ++i) {
810 k = randomizeKey(i) % iters;
814 // insertions go last because they reset the map
816 BENCHMARK(mt_ahm_insert, iters) {
818 globalAHM.reset(new AHMapT(int(iters * LF), config));
819 numOpsPerThread = iters / FLAGS_numThreads;
821 runThreads(insertThread);
824 BENCHMARK(st_ahm_insert, iters) {
825 folly::BenchmarkSuspender susp;
826 std::unique_ptr<AHMapT> ahm(new AHMapT(int(iters * LF), config));
829 for (size_t i = 0; i < iters; i++) {
830 KeyT key = randomizeKey(i);
831 ahm->insert(key, genVal(key));
835 void benchmarkSetup() {
836 config.maxLoadFactor = FLAGS_maxLoadFactor;
837 configRace.maxLoadFactor = 0.5;
838 int numCores = sysconf(_SC_NPROCESSORS_ONLN);
841 string numIters = folly::to<string>(
842 std::min(1000000, int(FLAGS_numBMElements)));
844 gflags::SetCommandLineOptionWithMode(
845 "bm_max_iters", numIters.c_str(), gflags::SET_FLAG_IF_DEFAULT
847 gflags::SetCommandLineOptionWithMode(
848 "bm_min_iters", numIters.c_str(), gflags::SET_FLAG_IF_DEFAULT
850 string numCoresStr = folly::to<string>(numCores);
851 gflags::SetCommandLineOptionWithMode(
852 "numThreads", numCoresStr.c_str(), gflags::SET_FLAG_IF_DEFAULT
855 std::cout << "\nRunning AHM benchmarks on machine with " << numCores
856 << " logical cores.\n"
857 " num elements per map: " << FLAGS_numBMElements << "\n"
858 << " num threads for mt tests: " << FLAGS_numThreads << "\n"
859 << " AHM load factor: " << FLAGS_targetLoadFactor << "\n\n";
862 int main(int argc, char** argv) {
863 testing::InitGoogleTest(&argc, argv);
864 gflags::ParseCommandLineFlags(&argc, &argv, true);
865 auto ret = RUN_ALL_TESTS();
866 if (!ret && FLAGS_benchmark) {
868 folly::runBenchmarks();
874 Benchmarks run on dual Xeon X5650's @ 2.67GHz w/hyperthreading enabled
875 (12 physical cores, 12 MB cache, 72 GB RAM)
877 Running AHM benchmarks on machine with 24 logical cores.
878 num elements per map: 12000000
879 num threads for mt tests: 24
880 AHM load factor: 0.75
882 Benchmark Iters Total t t/iter iter/sec
883 ------------------------------------------------------------------------------
884 Comparing benchmarks: BM_mt_aha_find,BM_mt_ahm_find
885 * BM_mt_aha_find 1000000 7.767 ms 7.767 ns 122.8 M
886 +0.81% BM_mt_ahm_find 1000000 7.83 ms 7.83 ns 121.8 M
887 ------------------------------------------------------------------------------
888 Comparing benchmarks: BM_st_aha_find,BM_st_ahm_find
889 * BM_st_aha_find 1000000 57.83 ms 57.83 ns 16.49 M
890 +77.9% BM_st_ahm_find 1000000 102.9 ms 102.9 ns 9.27 M
891 ------------------------------------------------------------------------------
892 BM_mt_ahm_miss 1000000 2.937 ms 2.937 ns 324.7 M
893 BM_st_ahm_miss 1000000 164.2 ms 164.2 ns 5.807 M
894 BM_mt_ahm_find_insert_mix 1000000 8.797 ms 8.797 ns 108.4 M
895 BM_mt_ahm_insert 1000000 17.39 ms 17.39 ns 54.83 M
896 BM_st_ahm_insert 1000000 106.8 ms 106.8 ns 8.93 M
897 BM_st_baseline_modulus_and_rando 1000000 6.223 ms 6.223 ns 153.2 M