2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <folly/AtomicHashMap.h>
19 #include <glog/logging.h>
20 #include <gtest/gtest.h>
25 #include <folly/Benchmark.h>
26 #include <folly/Conv.h>
30 using folly::AtomicHashMap;
31 using folly::AtomicHashArray;
34 DEFINE_double(targetLoadFactor, 0.75, "Target memory utilization fraction.");
35 DEFINE_double(maxLoadFactor, 0.80, "Max before growth.");
36 DEFINE_int32(numThreads, 8, "Threads to use for concurrency tests.");
37 DEFINE_int64(numBMElements, 12 * 1000 * 1000, "Size of maps for benchmarks.");
39 const double LF = FLAGS_maxLoadFactor / FLAGS_targetLoadFactor;
40 const int maxBMElements = int(FLAGS_numBMElements * LF); // hit our target LF.
42 static int64_t nowInUsec() {
45 return int64_t(tv.tv_sec) * 1000 * 1000 + tv.tv_usec;
48 TEST(Ahm, BasicStrings) {
49 typedef AtomicHashMap<int64_t,string> AHM;
51 EXPECT_TRUE(myMap.begin() == myMap.end());
53 for (int i = 0; i < 100; ++i) {
54 myMap.insert(make_pair(i, folly::to<string>(i)));
56 for (int i = 0; i < 100; ++i) {
57 EXPECT_EQ(myMap.find(i)->second, folly::to<string>(i));
60 myMap.insert(std::make_pair(999, "A"));
61 myMap.insert(std::make_pair(999, "B"));
62 EXPECT_EQ(myMap.find(999)->second, "A"); // shouldn't have overwritten
63 myMap.find(999)->second = "B";
64 myMap.find(999)->second = "C";
65 EXPECT_EQ(myMap.find(999)->second, "C");
66 EXPECT_EQ(myMap.find(999)->first, 999);
70 TEST(Ahm, BasicNoncopyable) {
71 typedef AtomicHashMap<int64_t,std::unique_ptr<int>> AHM;
73 EXPECT_TRUE(myMap.begin() == myMap.end());
75 for (int i = 0; i < 50; ++i) {
76 myMap.insert(make_pair(i, std::unique_ptr<int>(new int(i))));
78 for (int i = 50; i < 100; ++i) {
79 myMap.insert(i, std::unique_ptr<int>(new int (i)));
81 for (int i = 0; i < 100; ++i) {
82 EXPECT_EQ(*(myMap.find(i)->second), i);
84 for (int i = 0; i < 100; i+=4) {
87 for (int i = 0; i < 100; i+=4) {
88 EXPECT_EQ(myMap.find(i), myMap.end());
93 typedef int32_t ValueT;
95 typedef AtomicHashMap<KeyT,ValueT> AHMapT;
96 typedef AHMapT::value_type RecordT;
97 typedef AtomicHashArray<KeyT,ValueT> AHArrayT;
99 AHArrayT::Config config;
100 static AHArrayT::SmartPtr globalAHA(nullptr);
101 static std::unique_ptr<AHMapT> globalAHM;
103 // Generate a deterministic value based on an input key
104 static int genVal(int key) {
109 VLOG(1) << "Overhead: " << sizeof(AHArrayT) << " (array) " <<
110 sizeof(AHMapT) + sizeof(AHArrayT) << " (map/set) Bytes.";
111 uint64_t numEntries = 10000;
112 float sizeFactor = 0.46;
114 std::unique_ptr<AHMapT> m(new AHMapT(int(numEntries * sizeFactor), config));
116 // load map - make sure we succeed and the index is accurate
118 for (uint64_t i = 0; i < numEntries; i++) {
119 auto ret = m->insert(RecordT(i, genVal(i)));
120 success &= ret.second;
121 success &= (m->findAt(ret.first.getIndex())->second == genVal(i));
123 // Overwrite vals to make sure there are no dups
124 // Every insert should fail because the keys are already in the map.
126 for (uint64_t i = 0; i < numEntries; i++) {
127 auto ret = m->insert(RecordT(i, genVal(i * 2)));
128 success &= (ret.second == false); // fail on collision
129 success &= (ret.first->second == genVal(i)); // return the previous value
130 success &= (m->findAt(ret.first.getIndex())->second == genVal(i));
132 EXPECT_TRUE(success);
135 size_t cap = m->capacity();
137 EXPECT_GT(m->numSubMaps(), 1); // make sure we grew
139 EXPECT_EQ(m->size(), numEntries);
140 for (size_t i = 0; i < numEntries; i++) {
141 success &= (m->find(i)->second == genVal(i));
143 EXPECT_TRUE(success);
148 AHMapT::const_iterator retIt;
149 for (int32_t i = 0; i < int32_t(numEntries); i++) {
151 retIt = m->findAt(retIt.getIndex());
152 success &= (retIt->second == genVal(i));
153 // We use a uint32_t index so that this comparison is between two
154 // variables of the same type.
155 success &= (retIt->first == i);
157 EXPECT_TRUE(success);
159 // Try modifying value
160 m->find(8)->second = 5309;
161 EXPECT_EQ(m->find(8)->second, 5309);
166 for (uint64_t i = 0; i < numEntries / 2; i++) {
167 success &= m->insert(RecordT(i, genVal(i))).second;
169 EXPECT_TRUE(success);
170 EXPECT_EQ(m->size(), numEntries / 2);
173 TEST(Ahm, iterator) {
174 int numEntries = 10000;
175 float sizeFactor = .46;
176 std::unique_ptr<AHMapT> m(new AHMapT(int(numEntries * sizeFactor), config));
178 // load map - make sure we succeed and the index is accurate
179 for (int i = 0; i < numEntries; i++) {
180 m->insert(RecordT(i, genVal(i)));
186 success &= (it->second == genVal(it->first));
189 EXPECT_TRUE(success);
190 EXPECT_EQ(count, numEntries);
195 // Note: Unfortunately can't currently put a std::atomic<int64_t> in
196 // the value in ahm since it doesn't support types that are both non-copy
197 // and non-move constructible yet.
198 AtomicHashMap<int64_t,int64_t> ahm;
201 explicit Counters(size_t numCounters) : ahm(numCounters) {}
203 void increment(int64_t obj_id) {
204 auto ret = ahm.insert(std::make_pair(obj_id, 1));
206 // obj_id already exists, increment count
207 __sync_fetch_and_add(&ret.first->second, 1);
211 int64_t getValue(int64_t obj_id) {
212 auto ret = ahm.find(obj_id);
213 return ret != ahm.end() ? ret->second : 0;
216 // export the counters without blocking increments
219 ret.reserve(ahm.size() * 32);
220 for (const auto& e : ahm) {
221 ret += folly::to<string>(
222 " [", e.first, ":", e.second, "]\n");
229 // If you get an error "terminate called without an active exception", there
230 // might be too many threads getting created - decrease numKeys and/or mult.
232 const int numKeys = 10;
235 vector<int64_t> keys;
236 FOR_EACH_RANGE(i, 1, numKeys) {
239 vector<std::thread> threads;
240 for (auto key : keys) {
241 FOR_EACH_RANGE(i, 0, key * mult) {
242 threads.push_back(std::thread([&, key] { c.increment(key); }));
245 for (auto& t : threads) {
248 string str = c.toString();
249 for (auto key : keys) {
250 int val = key * mult;
251 EXPECT_EQ(val, c.getValue(key));
252 EXPECT_NE(string::npos, str.find(folly::to<string>("[",key,":",val,"]")));
259 explicit Integer(KeyT v = 0) : v_(v) {}
261 Integer& operator=(const Integer& a) {
262 static bool throwException_ = false;
263 throwException_ = !throwException_;
264 if (throwException_) {
271 bool operator==(const Integer& a) const { return v_ == a.v_; }
277 TEST(Ahm, map_exception_safety) {
278 typedef AtomicHashMap<KeyT,Integer> MyMapT;
280 int numEntries = 10000;
281 float sizeFactor = 0.46;
282 std::unique_ptr<MyMapT> m(new MyMapT(int(numEntries * sizeFactor)));
286 for (int i = 0; i < numEntries; i++) {
288 m->insert(i, Integer(genVal(i)));
289 success &= (m->find(i)->second == Integer(genVal(i)));
292 success &= !m->count(i);
295 EXPECT_EQ(count, m->size());
296 EXPECT_TRUE(success);
299 TEST(Ahm, basicErase) {
300 size_t numEntries = 3000;
302 std::unique_ptr<AHMapT> s(new AHMapT(numEntries, config));
303 // Iterate filling up the map and deleting all keys a few times
304 // to test more than one subMap.
305 for (int iterations = 0; iterations < 4; ++iterations) {
306 // Testing insertion of keys
308 for (size_t i = 0; i < numEntries; ++i) {
309 success &= !(s->count(i));
310 auto ret = s->insert(RecordT(i, i));
311 success &= s->count(i);
312 success &= ret.second;
314 EXPECT_TRUE(success);
315 EXPECT_EQ(s->size(), numEntries);
317 // Delete every key in the map and verify that the key is gone and the the
320 for (size_t i = 0; i < numEntries; ++i) {
321 success &= s->erase(i);
322 success &= (s->size() == numEntries - 1 - i);
323 success &= !(s->count(i));
324 success &= !(s->erase(i));
326 EXPECT_TRUE(success);
328 VLOG(1) << "Final number of subMaps = " << s->numSubMaps();
333 inline KeyT randomizeKey(int key) {
334 // We deterministically randomize the key to more accurately simulate
335 // real-world usage, and to avoid pathalogical performance patterns (e.g.
336 // those related to __gnu_cxx::hash<int64_t>()(1) == 1).
338 // Use a hash function we don't normally use for ints to avoid interactions.
339 return folly::hash::jenkins_rev_mix32(key);
342 int numOpsPerThread = 0;
344 void* insertThread(void* jj) {
345 int64_t j = (int64_t) jj;
346 for (int i = 0; i < numOpsPerThread; ++i) {
347 KeyT key = randomizeKey(i + j * numOpsPerThread);
348 globalAHM->insert(key, genVal(key));
353 void* insertThreadArr(void* jj) {
354 int64_t j = (int64_t) jj;
355 for (int i = 0; i < numOpsPerThread; ++i) {
356 KeyT key = randomizeKey(i + j * numOpsPerThread);
357 globalAHA->insert(std::make_pair(key, genVal(key)));
362 std::atomic<bool> runThreadsCreatedAllThreads;
363 void runThreads(void *(*thread)(void*), int numThreads, void **statuses) {
364 folly::BenchmarkSuspender susp;
365 runThreadsCreatedAllThreads.store(false);
366 vector<pthread_t> threadIds;
367 for (int64_t j = 0; j < numThreads; j++) {
369 if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
370 LOG(ERROR) << "Could not start thread";
372 threadIds.push_back(tid);
377 runThreadsCreatedAllThreads.store(true);
378 for (size_t i = 0; i < threadIds.size(); ++i) {
379 pthread_join(threadIds[i], statuses == nullptr ? nullptr : &statuses[i]);
383 void runThreads(void *(*thread)(void*)) {
384 runThreads(thread, FLAGS_numThreads, nullptr);
389 TEST(Ahm, collision_test) {
390 const int numInserts = 1000000 / 4;
392 // Doing the same number on each thread so we collide.
393 numOpsPerThread = numInserts;
395 float sizeFactor = 0.46;
396 int entrySize = sizeof(KeyT) + sizeof(ValueT);
397 VLOG(1) << "Testing " << numInserts << " unique " << entrySize <<
398 " Byte entries replicated in " << FLAGS_numThreads <<
399 " threads with " << FLAGS_maxLoadFactor * 100.0 << "% max load factor.";
401 globalAHM.reset(new AHMapT(int(numInserts * sizeFactor), config));
403 size_t sizeInit = globalAHM->capacity();
404 VLOG(1) << " Initial capacity: " << sizeInit;
406 double start = nowInUsec();
407 runThreads([](void*) -> void* { // collisionInsertThread
408 for (int i = 0; i < numOpsPerThread; ++i) {
409 KeyT key = randomizeKey(i);
410 globalAHM->insert(key, genVal(key));
414 double elapsed = nowInUsec() - start;
416 size_t finalCap = globalAHM->capacity();
417 size_t sizeAHM = globalAHM->size();
418 VLOG(1) << elapsed/sizeAHM << " usec per " << FLAGS_numThreads <<
419 " duplicate inserts (atomic).";
420 VLOG(1) << " Final capacity: " << finalCap << " in " <<
421 globalAHM->numSubMaps() << " sub maps (" <<
422 sizeAHM * 100 / finalCap << "% load factor, " <<
423 (finalCap - sizeInit) * 100 / sizeInit << "% growth).";
426 EXPECT_EQ(sizeAHM, numInserts);
429 for (int i = 0; i < numInserts; ++i) {
430 KeyT key = randomizeKey(i);
431 success &= (globalAHM->find(key)->second == genVal(key));
433 EXPECT_TRUE(success);
435 // check colliding finds
437 runThreads([](void*) -> void* { // collisionFindThread
439 for (int i = 0; i < numOpsPerThread; ++i) {
440 globalAHM->find(key);
445 elapsed = nowInUsec() - start;
447 VLOG(1) << elapsed/sizeAHM << " usec per " << FLAGS_numThreads <<
448 " duplicate finds (atomic).";
453 const int kInsertPerThread = 100000;
454 int raceFinalSizeEstimate;
456 void* raceIterateThread(void* jj) {
457 int64_t j = (int64_t) jj;
460 AHMapT::iterator it = globalAHM->begin();
461 AHMapT::iterator end = globalAHM->end();
462 for (; it != end; ++it) {
464 if (count > raceFinalSizeEstimate) {
465 EXPECT_FALSE("Infinite loop in iterator.");
472 void* raceInsertRandomThread(void* jj) {
473 int64_t j = (int64_t) jj;
474 for (int i = 0; i < kInsertPerThread; ++i) {
476 globalAHM->insert(key, genVal(key));
483 // Test for race conditions when inserting and iterating at the same time and
484 // creating multiple submaps.
485 TEST(Ahm, race_insert_iterate_thread_test) {
486 const int kInsertThreads = 20;
487 const int kIterateThreads = 20;
488 raceFinalSizeEstimate = kInsertThreads * kInsertPerThread;
490 VLOG(1) << "Testing iteration and insertion with " << kInsertThreads
491 << " threads inserting and " << kIterateThreads << " threads iterating.";
493 globalAHM.reset(new AHMapT(raceFinalSizeEstimate / 9, config));
495 vector<pthread_t> threadIds;
496 for (int64_t j = 0; j < kInsertThreads + kIterateThreads; j++) {
498 void *(*thread)(void*) =
499 (j < kInsertThreads ? raceInsertRandomThread : raceIterateThread);
500 if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
501 LOG(ERROR) << "Could not start thread";
503 threadIds.push_back(tid);
506 for (size_t i = 0; i < threadIds.size(); ++i) {
507 pthread_join(threadIds[i], nullptr);
509 VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps";
510 VLOG(1) << "Final size of map " << globalAHM->size();
515 const int kTestEraseInsertions = 200000;
516 std::atomic<int32_t> insertedLevel;
518 void* testEraseInsertThread(void*) {
519 for (int i = 0; i < kTestEraseInsertions; ++i) {
520 KeyT key = randomizeKey(i);
521 globalAHM->insert(key, genVal(key));
522 insertedLevel.store(i, std::memory_order_release);
524 insertedLevel.store(kTestEraseInsertions, std::memory_order_release);
528 void* testEraseEraseThread(void*) {
529 for (int i = 0; i < kTestEraseInsertions; ++i) {
531 * Make sure that we don't get ahead of the insert thread, because
532 * part of the condition for this unit test succeeding is that the
535 * Note, there is a subtle case here when a new submap is
536 * allocated: the erasing thread might get 0 from count(key)
537 * because it hasn't seen numSubMaps_ update yet. To avoid this
538 * race causing problems for the test (it's ok for real usage), we
539 * lag behind the inserter by more than just element.
544 currentLevel = insertedLevel.load(std::memory_order_acquire);
545 if (currentLevel == kTestEraseInsertions) currentLevel += lag + 1;
546 } while (currentLevel - lag < i);
548 KeyT key = randomizeKey(i);
549 while (globalAHM->count(key)) {
550 if (globalAHM->erase(key)) {
560 // Here we have a single thread inserting some values, and several threads
561 // racing to delete the values in the order they were inserted.
562 TEST(Ahm, thread_erase_insert_race) {
563 const int kInsertThreads = 1;
564 const int kEraseThreads = 10;
566 VLOG(1) << "Testing insertion and erase with " << kInsertThreads
567 << " thread inserting and " << kEraseThreads << " threads erasing.";
569 globalAHM.reset(new AHMapT(kTestEraseInsertions / 4, config));
571 vector<pthread_t> threadIds;
572 for (int64_t j = 0; j < kInsertThreads + kEraseThreads; j++) {
574 void *(*thread)(void*) =
575 (j < kInsertThreads ? testEraseInsertThread : testEraseEraseThread);
576 if (pthread_create(&tid, nullptr, thread, (void*) j) != 0) {
577 LOG(ERROR) << "Could not start thread";
579 threadIds.push_back(tid);
582 for (size_t i = 0; i < threadIds.size(); i++) {
583 pthread_join(threadIds[i], nullptr);
586 EXPECT_TRUE(globalAHM->empty());
587 EXPECT_EQ(globalAHM->size(), 0);
589 VLOG(1) << "Ended up with " << globalAHM->numSubMaps() << " submaps";
592 // Repro for T#483734: Duplicate AHM inserts due to incorrect AHA return value.
593 typedef AtomicHashArray<int32_t, int32_t> AHA;
594 AHA::Config configRace;
595 auto atomicHashArrayInsertRaceArray = AHA::create(2, configRace);
596 void* atomicHashArrayInsertRaceThread(void* j) {
597 AHA* arr = atomicHashArrayInsertRaceArray.get();
598 uintptr_t numInserted = 0;
599 while (!runThreadsCreatedAllThreads.load());
600 for (int i = 0; i < 2; i++) {
601 if (arr->insert(RecordT(randomizeKey(i), 0)).first != arr->end()) {
605 pthread_exit((void *) numInserted);
607 TEST(Ahm, atomic_hash_array_insert_race) {
608 AHA* arr = atomicHashArrayInsertRaceArray.get();
609 int numIterations = 50000, FLAGS_numThreads = 4;
610 void* statuses[FLAGS_numThreads];
611 for (int i = 0; i < numIterations; i++) {
613 runThreads(atomicHashArrayInsertRaceThread, FLAGS_numThreads, statuses);
614 EXPECT_GE(arr->size(), 1);
615 for (int j = 0; j < FLAGS_numThreads; j++) {
616 EXPECT_EQ(arr->size(), uintptr_t(statuses[j]));
621 // Repro for a bug when iterator didn't skip empty submaps.
622 TEST(Ahm, iterator_skips_empty_submaps) {
623 AtomicHashMap<uint64_t, uint64_t>::Config config;
624 config.growthFactor = 1;
626 AtomicHashMap<uint64_t, uint64_t> map(1, config);
634 auto it = map.find(1);
636 ASSERT_NE(map.end(), it);
637 ASSERT_EQ(1, it->first);
638 ASSERT_EQ(1, it->second);
642 ASSERT_NE(map.end(), it);
643 ASSERT_EQ(3, it->first);
644 ASSERT_EQ(3, it->second);
647 ASSERT_EQ(map.end(), it);
652 void loadGlobalAha() {
653 std::cout << "loading global AHA with " << FLAGS_numThreads
655 uint64_t start = nowInUsec();
656 globalAHA = AHArrayT::create(maxBMElements, config);
657 numOpsPerThread = FLAGS_numBMElements / FLAGS_numThreads;
658 CHECK_EQ(0, FLAGS_numBMElements % FLAGS_numThreads) <<
659 "kNumThreads must evenly divide kNumInserts.";
660 runThreads(insertThreadArr);
661 uint64_t elapsed = nowInUsec() - start;
662 std::cout << " took " << elapsed / 1000 << " ms (" <<
663 (elapsed * 1000 / FLAGS_numBMElements) << " ns/insert).\n";
664 EXPECT_EQ(globalAHA->size(), FLAGS_numBMElements);
667 void loadGlobalAhm() {
668 std::cout << "loading global AHM with " << FLAGS_numThreads
670 uint64_t start = nowInUsec();
671 globalAHM.reset(new AHMapT(maxBMElements, config));
672 numOpsPerThread = FLAGS_numBMElements / FLAGS_numThreads;
673 runThreads(insertThread);
674 uint64_t elapsed = nowInUsec() - start;
675 std::cout << " took " << elapsed / 1000 << " ms (" <<
676 (elapsed * 1000 / FLAGS_numBMElements) << " ns/insert).\n";
677 EXPECT_EQ(globalAHM->size(), FLAGS_numBMElements);
682 BENCHMARK(st_aha_find, iters) {
683 CHECK_LE(iters, FLAGS_numBMElements);
684 for (size_t i = 0; i < iters; i++) {
685 KeyT key = randomizeKey(i);
686 folly::doNotOptimizeAway(globalAHA->find(key)->second);
690 BENCHMARK(st_ahm_find, iters) {
691 CHECK_LE(iters, FLAGS_numBMElements);
692 for (size_t i = 0; i < iters; i++) {
693 KeyT key = randomizeKey(i);
694 folly::doNotOptimizeAway(globalAHM->find(key)->second);
698 BENCHMARK_DRAW_LINE()
700 BENCHMARK(mt_ahm_miss, iters) {
701 CHECK_LE(iters, FLAGS_numBMElements);
702 numOpsPerThread = iters / FLAGS_numThreads;
703 runThreads([](void* jj) -> void* {
704 int64_t j = (int64_t) jj;
705 while (!runThreadsCreatedAllThreads.load());
706 for (int i = 0; i < numOpsPerThread; ++i) {
707 KeyT key = i + j * numOpsPerThread * 100;
708 folly::doNotOptimizeAway(globalAHM->find(key) == globalAHM->end());
714 BENCHMARK(st_ahm_miss, iters) {
715 CHECK_LE(iters, FLAGS_numBMElements);
716 for (size_t i = 0; i < iters; i++) {
717 KeyT key = randomizeKey(i + iters * 100);
718 folly::doNotOptimizeAway(globalAHM->find(key) == globalAHM->end());
722 BENCHMARK(mt_ahm_find_insert_mix, iters) {
723 CHECK_LE(iters, FLAGS_numBMElements);
724 numOpsPerThread = iters / FLAGS_numThreads;
725 runThreads([](void* jj) -> void* {
726 int64_t j = (int64_t) jj;
727 while (!runThreadsCreatedAllThreads.load());
728 for (int i = 0; i < numOpsPerThread; ++i) {
729 if (i % 128) { // ~1% insert mix
730 KeyT key = randomizeKey(i + j * numOpsPerThread);
731 folly::doNotOptimizeAway(globalAHM->find(key)->second);
733 KeyT key = randomizeKey(i + j * numOpsPerThread * 100);
734 globalAHM->insert(key, genVal(key));
741 BENCHMARK(mt_aha_find, iters) {
742 CHECK_LE(iters, FLAGS_numBMElements);
743 numOpsPerThread = iters / FLAGS_numThreads;
744 runThreads([](void* jj) -> void* {
745 int64_t j = (int64_t) jj;
746 while (!runThreadsCreatedAllThreads.load());
747 for (int i = 0; i < numOpsPerThread; ++i) {
748 KeyT key = randomizeKey(i + j * numOpsPerThread);
749 folly::doNotOptimizeAway(globalAHA->find(key)->second);
755 BENCHMARK(mt_ahm_find, iters) {
756 CHECK_LE(iters, FLAGS_numBMElements);
757 numOpsPerThread = iters / FLAGS_numThreads;
758 runThreads([](void* jj) -> void* {
759 int64_t j = (int64_t) jj;
760 while (!runThreadsCreatedAllThreads.load());
761 for (int i = 0; i < numOpsPerThread; ++i) {
762 KeyT key = randomizeKey(i + j * numOpsPerThread);
763 folly::doNotOptimizeAway(globalAHM->find(key)->second);
770 BENCHMARK(st_baseline_modulus_and_random, iters) {
771 for (size_t i = 0; i < iters; ++i) {
772 k = randomizeKey(i) % iters;
776 // insertions go last because they reset the map
778 BENCHMARK(mt_ahm_insert, iters) {
780 globalAHM.reset(new AHMapT(int(iters * LF), config));
781 numOpsPerThread = iters / FLAGS_numThreads;
783 runThreads(insertThread);
786 BENCHMARK(st_ahm_insert, iters) {
787 folly::BenchmarkSuspender susp;
788 std::unique_ptr<AHMapT> ahm(new AHMapT(int(iters * LF), config));
791 for (size_t i = 0; i < iters; i++) {
792 KeyT key = randomizeKey(i);
793 ahm->insert(key, genVal(key));
797 void benchmarkSetup() {
798 config.maxLoadFactor = FLAGS_maxLoadFactor;
799 configRace.maxLoadFactor = 0.5;
800 int numCores = sysconf(_SC_NPROCESSORS_ONLN);
803 string numIters = folly::to<string>(
804 std::min(1000000, int(FLAGS_numBMElements)));
806 gflags::SetCommandLineOptionWithMode(
807 "bm_max_iters", numIters.c_str(), gflags::SET_FLAG_IF_DEFAULT
809 gflags::SetCommandLineOptionWithMode(
810 "bm_min_iters", numIters.c_str(), gflags::SET_FLAG_IF_DEFAULT
812 string numCoresStr = folly::to<string>(numCores);
813 gflags::SetCommandLineOptionWithMode(
814 "numThreads", numCoresStr.c_str(), gflags::SET_FLAG_IF_DEFAULT
817 std::cout << "\nRunning AHM benchmarks on machine with " << numCores
818 << " logical cores.\n"
819 " num elements per map: " << FLAGS_numBMElements << "\n"
820 << " num threads for mt tests: " << FLAGS_numThreads << "\n"
821 << " AHM load factor: " << FLAGS_targetLoadFactor << "\n\n";
824 int main(int argc, char** argv) {
825 testing::InitGoogleTest(&argc, argv);
826 gflags::ParseCommandLineFlags(&argc, &argv, true);
827 auto ret = RUN_ALL_TESTS();
828 if (!ret && FLAGS_benchmark) {
830 folly::runBenchmarks();
836 Benchmarks run on dual Xeon X5650's @ 2.67GHz w/hyperthreading enabled
837 (12 physical cores, 12 MB cache, 72 GB RAM)
839 Running AHM benchmarks on machine with 24 logical cores.
840 num elements per map: 12000000
841 num threads for mt tests: 24
842 AHM load factor: 0.75
844 Benchmark Iters Total t t/iter iter/sec
845 ------------------------------------------------------------------------------
846 Comparing benchmarks: BM_mt_aha_find,BM_mt_ahm_find
847 * BM_mt_aha_find 1000000 7.767 ms 7.767 ns 122.8 M
848 +0.81% BM_mt_ahm_find 1000000 7.83 ms 7.83 ns 121.8 M
849 ------------------------------------------------------------------------------
850 Comparing benchmarks: BM_st_aha_find,BM_st_ahm_find
851 * BM_st_aha_find 1000000 57.83 ms 57.83 ns 16.49 M
852 +77.9% BM_st_ahm_find 1000000 102.9 ms 102.9 ns 9.27 M
853 ------------------------------------------------------------------------------
854 BM_mt_ahm_miss 1000000 2.937 ms 2.937 ns 324.7 M
855 BM_st_ahm_miss 1000000 164.2 ms 164.2 ns 5.807 M
856 BM_mt_ahm_find_insert_mix 1000000 8.797 ms 8.797 ns 108.4 M
857 BM_mt_ahm_insert 1000000 17.39 ms 17.39 ns 54.83 M
858 BM_st_ahm_insert 1000000 106.8 ms 106.8 ns 8.93 M
859 BM_st_baseline_modulus_and_rando 1000000 6.223 ms 6.223 ns 153.2 M