2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include <folly/detail/AtomicHashUtils.h>
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
27 template <typename KeyT,
33 typename KeyConvertFcn>
34 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
35 Allocator, ProbeFcn, KeyConvertFcn>::
36 AtomicHashMap(size_t finalSizeEst, const Config& config)
37 : kGrowthFrac_(config.growthFactor < 0 ?
38 1.0 - config.maxLoadFactor : config.growthFactor) {
39 CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
40 subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
41 std::memory_order_relaxed);
42 auto subMapCount = kNumSubMaps_;
43 FOR_EACH_RANGE(i, 1, subMapCount) {
44 subMaps_[i].store(nullptr, std::memory_order_relaxed);
46 numMapsAllocated_.store(1, std::memory_order_relaxed);
50 template <typename KeyT,
56 typename KeyConvertFcn>
57 template <typename LookupKeyT,
58 typename LookupHashFcn,
59 typename LookupEqualFcn,
60 typename LookupKeyToKeyFcn,
62 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator,
63 ProbeFcn, KeyConvertFcn>::iterator, bool>
64 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
65 Allocator, ProbeFcn, KeyConvertFcn>::
66 emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
67 SimpleRetT ret = insertInternal<LookupKeyT,
71 k, std::forward<ArgTs>(vCtorArgs)...);
72 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
73 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
77 // insertInternal -- Allocates new sub maps as existing ones fill up.
78 template <typename KeyT,
84 typename KeyConvertFcn>
85 template <typename LookupKeyT,
86 typename LookupHashFcn,
87 typename LookupEqualFcn,
88 typename LookupKeyToKeyFcn,
90 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
91 Allocator, ProbeFcn, KeyConvertFcn>::
93 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
94 Allocator, ProbeFcn, KeyConvertFcn>::
95 insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
97 auto nextMapIdx = // this maintains our state
98 numMapsAllocated_.load(std::memory_order_acquire);
99 typename SubMap::SimpleRetT ret;
100 FOR_EACH_RANGE(i, 0, nextMapIdx) {
101 // insert in each map successively. If one succeeds, we're done!
102 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
103 ret = subMap->template insertInternal<LookupKeyT,
107 key, std::forward<ArgTs>(vCtorArgs)...);
108 if (ret.idx == subMap->capacity_) {
109 continue; //map is full, so try the next one
111 // Either collision or success - insert in either case
112 return SimpleRetT(i, ret.idx, ret.success);
115 // If we made it this far, all maps are full and we need to try to allocate
118 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
119 if (nextMapIdx >= kNumSubMaps_ ||
120 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
121 // Can't allocate any more sub maps.
122 throw AtomicHashMapFullError();
125 if (tryLockMap(nextMapIdx)) {
126 // Alloc a new map and shove it in. We can change whatever
127 // we want because other threads are waiting on us...
128 size_t numCellsAllocated = (size_t)
129 (primarySubMap->capacity_ *
130 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
131 size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
132 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
133 (SubMap*)kLockedPtr_);
134 // create a new map using the settings stored in the first map
137 config.emptyKey = primarySubMap->kEmptyKey_;
138 config.lockedKey = primarySubMap->kLockedKey_;
139 config.erasedKey = primarySubMap->kErasedKey_;
140 config.maxLoadFactor = primarySubMap->maxLoadFactor();
141 config.entryCountThreadCacheSize =
142 primarySubMap->getEntryCountThreadCacheSize();
143 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
144 std::memory_order_relaxed);
146 // Publish the map to other threads.
147 numMapsAllocated_.fetch_add(1, std::memory_order_release);
148 DCHECK_EQ(nextMapIdx + 1,
149 numMapsAllocated_.load(std::memory_order_relaxed));
151 // If we lost the race, we'll have to wait for the next map to get
152 // allocated before doing any insertion here.
153 detail::atomic_hash_spin_wait([&] {
154 return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
158 // Relaxed is ok here because either we just created this map, or we
159 // just did a spin wait with an acquire load on numMapsAllocated_.
160 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
161 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
162 ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
163 if (ret.idx != loadedMap->capacity_) {
164 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
166 // We took way too long and the new map is already full...try again from
167 // the top (this should pretty much never happen).
168 goto beginInsertInternal;
172 template <typename KeyT,
178 typename KeyConvertFcn>
179 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
180 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
181 Allocator, ProbeFcn, KeyConvertFcn>::
183 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
184 Allocator, ProbeFcn, KeyConvertFcn>::find(
186 SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
190 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
191 return iterator(this, ret.i, subMap->makeIter(ret.j));
194 template <typename KeyT,
200 typename KeyConvertFcn>
201 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
202 typename AtomicHashMap<KeyT, ValueT,
203 HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator
204 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
205 Allocator, ProbeFcn, KeyConvertFcn>::
206 find(LookupKeyT k) const {
207 return const_cast<AtomicHashMap*>(this)->find<LookupKeyT,
213 template <typename KeyT,
219 typename KeyConvertFcn>
220 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
221 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
222 Allocator, ProbeFcn, KeyConvertFcn>::
224 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
225 Allocator, ProbeFcn, KeyConvertFcn>::
226 findInternal(const LookupKeyT k) const {
227 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
228 typename SubMap::SimpleRetT ret =
229 primaryMap->template findInternal<LookupKeyT,
232 if (LIKELY(ret.idx != primaryMap->capacity_)) {
233 return SimpleRetT(0, ret.idx, ret.success);
235 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
236 FOR_EACH_RANGE(i, 1, numMaps) {
237 // Check each map successively. If one succeeds, we're done!
238 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
239 ret = thisMap->template findInternal<LookupKeyT,
242 if (LIKELY(ret.idx != thisMap->capacity_)) {
243 return SimpleRetT(i, ret.idx, ret.success);
246 // Didn't find our key...
247 return SimpleRetT(numMaps, 0, false);
250 // findAtInternal -- see encodeIndex() for details.
251 template <typename KeyT,
257 typename KeyConvertFcn>
258 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
259 Allocator, ProbeFcn, KeyConvertFcn>::
261 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
262 Allocator, ProbeFcn, KeyConvertFcn>::
263 findAtInternal(uint32_t idx) const {
264 uint32_t subMapIdx, subMapOffset;
265 if (idx & kSecondaryMapBit_) {
266 // idx falls in a secondary map
267 idx &= ~kSecondaryMapBit_; // unset secondary bit
268 subMapIdx = idx >> kSubMapIndexShift_;
269 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
270 subMapOffset = idx & kSubMapIndexMask_;
272 // idx falls in primary map
276 return SimpleRetT(subMapIdx, subMapOffset, true);
280 template <typename KeyT,
286 typename KeyConvertFcn>
287 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
288 Allocator, ProbeFcn, KeyConvertFcn>::
290 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
291 Allocator, ProbeFcn, KeyConvertFcn>::
292 erase(const KeyT k) {
293 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
294 FOR_EACH_RANGE(i, 0, numMaps) {
295 // Check each map successively. If one succeeds, we're done!
296 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
300 // Didn't find our key...
304 // capacity -- summation of capacities of all submaps
305 template <typename KeyT,
311 typename KeyConvertFcn>
312 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
313 Allocator, ProbeFcn, KeyConvertFcn>::
316 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
317 FOR_EACH_RANGE(i, 0, numMaps) {
318 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
324 // number of new insertions until current submaps are all at max load
325 template <typename KeyT,
331 typename KeyConvertFcn>
332 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
333 Allocator, ProbeFcn, KeyConvertFcn>::
334 spaceRemaining() const {
336 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
337 FOR_EACH_RANGE(i, 0, numMaps) {
338 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
339 spaceRem += std::max(
341 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
347 // clear -- Wipes all keys and values from primary map and destroys
348 // all secondary maps. Not thread safe.
349 template <typename KeyT,
355 typename KeyConvertFcn>
356 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
357 Allocator, ProbeFcn, KeyConvertFcn>::
359 subMaps_[0].load(std::memory_order_relaxed)->clear();
360 int const numMaps = numMapsAllocated_
361 .load(std::memory_order_relaxed);
362 FOR_EACH_RANGE(i, 1, numMaps) {
363 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
365 SubMap::destroy(thisMap);
366 subMaps_[i].store(nullptr, std::memory_order_relaxed);
368 numMapsAllocated_.store(1, std::memory_order_relaxed);
372 template <typename KeyT,
378 typename KeyConvertFcn>
379 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
380 Allocator, ProbeFcn, KeyConvertFcn>::
383 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
384 FOR_EACH_RANGE(i, 0, numMaps) {
385 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
390 // encodeIndex -- Encode the submap index and offset into return.
391 // index_ret must be pre-populated with the submap offset.
393 // We leave index_ret untouched when referring to the primary map
394 // so it can be as large as possible (31 data bits). Max size of
395 // secondary maps is limited by what can fit in the low 27 bits.
397 // Returns the following bit-encoded data in index_ret:
398 // if subMap == 0 (primary map) =>
401 // 0-30 submap offset (index_ret input)
403 // if subMap > 0 (secondary maps) =>
406 // 27-30 which subMap
407 // 0-26 subMap offset (index_ret input)
408 template <typename KeyT,
414 typename KeyConvertFcn>
416 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
417 Allocator, ProbeFcn, KeyConvertFcn>::
418 encodeIndex(uint32_t subMap, uint32_t offset) {
419 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
420 if (subMap == 0) return offset;
421 // Make sure subMap isn't too big
422 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
423 // Make sure subMap bits of offset are clear
424 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
426 // Set high-order bits to encode which submap this index belongs to
427 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
431 // Iterator implementation
433 template <typename KeyT,
439 typename KeyConvertFcn>
440 template <class ContT, class IterVal, class SubIt>
441 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
442 Allocator, ProbeFcn, KeyConvertFcn>::
443 ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
445 boost::forward_traversal_tag> {
446 explicit ahm_iterator() : ahm_(0) {}
448 // Conversion ctor for interoperability between const_iterator and
449 // iterator. The enable_if<> magic keeps us well-behaved for
450 // is_convertible<> (v. the iterator_facade documentation).
451 template<class OtherContT, class OtherVal, class OtherSubIt>
452 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
453 typename std::enable_if<
454 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
461 * Returns the unique index that can be used for access directly
462 * into the data storage.
464 uint32_t getIndex() const {
466 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
470 friend class AtomicHashMap;
471 explicit ahm_iterator(ContT* ahm,
479 friend class boost::iterator_core_access;
484 checkAdvanceToNextSubmap();
487 bool equal(const ahm_iterator& other) const {
488 if (ahm_ != other.ahm_) {
492 if (isEnd() || other.isEnd()) {
493 return isEnd() == other.isEnd();
496 return subMap_ == other.subMap_ &&
497 subIt_ == other.subIt_;
500 IterVal& dereference() const {
504 bool isEnd() const { return ahm_ == nullptr; }
506 void checkAdvanceToNextSubmap() {
511 SubMap* thisMap = ahm_->subMaps_[subMap_].
512 load(std::memory_order_relaxed);
513 while (subIt_ == thisMap->end()) {
514 // This sub iterator is done, advance to next one
516 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
518 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
519 subIt_ = thisMap->begin();