2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include <folly/detail/AtomicHashUtils.h>
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
34 typename KeyConvertFcn>
42 KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
44 config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
45 : config.growthFactor) {
46 CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
47 subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
48 std::memory_order_relaxed);
49 auto subMapCount = kNumSubMaps_;
50 FOR_EACH_RANGE(i, 1, subMapCount) {
51 subMaps_[i].store(nullptr, std::memory_order_relaxed);
53 numMapsAllocated_.store(1, std::memory_order_relaxed);
64 typename KeyConvertFcn>
67 typename LookupHashFcn,
68 typename LookupEqualFcn,
69 typename LookupKeyToKeyFcn,
71 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator,
72 ProbeFcn, KeyConvertFcn>::iterator, bool>
73 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
74 Allocator, ProbeFcn, KeyConvertFcn>::
75 emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
76 SimpleRetT ret = insertInternal<LookupKeyT,
80 k, std::forward<ArgTs>(vCtorArgs)...);
81 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
82 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
86 // insertInternal -- Allocates new sub maps as existing ones fill up.
94 typename KeyConvertFcn>
97 typename LookupHashFcn,
98 typename LookupEqualFcn,
99 typename LookupKeyToKeyFcn,
101 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
102 Allocator, ProbeFcn, KeyConvertFcn>::
104 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
105 Allocator, ProbeFcn, KeyConvertFcn>::
106 insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
108 auto nextMapIdx = // this maintains our state
109 numMapsAllocated_.load(std::memory_order_acquire);
110 typename SubMap::SimpleRetT ret;
111 FOR_EACH_RANGE(i, 0, nextMapIdx) {
112 // insert in each map successively. If one succeeds, we're done!
113 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
114 ret = subMap->template insertInternal<LookupKeyT,
118 key, std::forward<ArgTs>(vCtorArgs)...);
119 if (ret.idx == subMap->capacity_) {
120 continue; //map is full, so try the next one
122 // Either collision or success - insert in either case
123 return SimpleRetT(i, ret.idx, ret.success);
126 // If we made it this far, all maps are full and we need to try to allocate
129 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
130 if (nextMapIdx >= kNumSubMaps_ ||
131 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
132 // Can't allocate any more sub maps.
133 throw AtomicHashMapFullError();
136 if (tryLockMap(nextMapIdx)) {
137 // Alloc a new map and shove it in. We can change whatever
138 // we want because other threads are waiting on us...
139 size_t numCellsAllocated = (size_t)
140 (primarySubMap->capacity_ *
141 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
142 size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
143 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
144 (SubMap*)kLockedPtr_);
145 // create a new map using the settings stored in the first map
148 config.emptyKey = primarySubMap->kEmptyKey_;
149 config.lockedKey = primarySubMap->kLockedKey_;
150 config.erasedKey = primarySubMap->kErasedKey_;
151 config.maxLoadFactor = primarySubMap->maxLoadFactor();
152 config.entryCountThreadCacheSize =
153 primarySubMap->getEntryCountThreadCacheSize();
154 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
155 std::memory_order_relaxed);
157 // Publish the map to other threads.
158 numMapsAllocated_.fetch_add(1, std::memory_order_release);
159 DCHECK_EQ(nextMapIdx + 1,
160 numMapsAllocated_.load(std::memory_order_relaxed));
162 // If we lost the race, we'll have to wait for the next map to get
163 // allocated before doing any insertion here.
164 detail::atomic_hash_spin_wait([&] {
165 return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
169 // Relaxed is ok here because either we just created this map, or we
170 // just did a spin wait with an acquire load on numMapsAllocated_.
171 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
172 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
173 ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
174 if (ret.idx != loadedMap->capacity_) {
175 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
177 // We took way too long and the new map is already full...try again from
178 // the top (this should pretty much never happen).
179 goto beginInsertInternal;
190 typename KeyConvertFcn>
191 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
192 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
193 Allocator, ProbeFcn, KeyConvertFcn>::
195 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
196 Allocator, ProbeFcn, KeyConvertFcn>::find(
198 SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
202 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
203 return iterator(this, ret.i, subMap->makeIter(ret.j));
213 typename KeyConvertFcn>
214 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
215 typename AtomicHashMap<KeyT, ValueT,
216 HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator
217 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
218 Allocator, ProbeFcn, KeyConvertFcn>::
219 find(LookupKeyT k) const {
220 return const_cast<AtomicHashMap*>(this)->find<LookupKeyT,
233 typename KeyConvertFcn>
234 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
235 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
236 Allocator, ProbeFcn, KeyConvertFcn>::
238 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
239 Allocator, ProbeFcn, KeyConvertFcn>::
240 findInternal(const LookupKeyT k) const {
241 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
242 typename SubMap::SimpleRetT ret =
243 primaryMap->template findInternal<LookupKeyT,
246 if (LIKELY(ret.idx != primaryMap->capacity_)) {
247 return SimpleRetT(0, ret.idx, ret.success);
249 const unsigned int numMaps =
250 numMapsAllocated_.load(std::memory_order_acquire);
251 FOR_EACH_RANGE(i, 1, numMaps) {
252 // Check each map successively. If one succeeds, we're done!
253 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
254 ret = thisMap->template findInternal<LookupKeyT,
257 if (LIKELY(ret.idx != thisMap->capacity_)) {
258 return SimpleRetT(i, ret.idx, ret.success);
261 // Didn't find our key...
262 return SimpleRetT(numMaps, 0, false);
265 // findAtInternal -- see encodeIndex() for details.
273 typename KeyConvertFcn>
274 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
275 Allocator, ProbeFcn, KeyConvertFcn>::
277 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
278 Allocator, ProbeFcn, KeyConvertFcn>::
279 findAtInternal(uint32_t idx) const {
280 uint32_t subMapIdx, subMapOffset;
281 if (idx & kSecondaryMapBit_) {
282 // idx falls in a secondary map
283 idx &= ~kSecondaryMapBit_; // unset secondary bit
284 subMapIdx = idx >> kSubMapIndexShift_;
285 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
286 subMapOffset = idx & kSubMapIndexMask_;
288 // idx falls in primary map
292 return SimpleRetT(subMapIdx, subMapOffset, true);
303 typename KeyConvertFcn>
304 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
305 Allocator, ProbeFcn, KeyConvertFcn>::
307 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
308 Allocator, ProbeFcn, KeyConvertFcn>::
309 erase(const KeyT k) {
310 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
311 FOR_EACH_RANGE(i, 0, numMaps) {
312 // Check each map successively. If one succeeds, we're done!
313 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
317 // Didn't find our key...
321 // capacity -- summation of capacities of all submaps
329 typename KeyConvertFcn>
330 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
331 Allocator, ProbeFcn, KeyConvertFcn>::
334 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
335 FOR_EACH_RANGE(i, 0, numMaps) {
336 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
342 // number of new insertions until current submaps are all at max load
350 typename KeyConvertFcn>
351 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
352 Allocator, ProbeFcn, KeyConvertFcn>::
353 spaceRemaining() const {
355 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
356 FOR_EACH_RANGE(i, 0, numMaps) {
357 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
358 spaceRem += std::max(
360 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
366 // clear -- Wipes all keys and values from primary map and destroys
367 // all secondary maps. Not thread safe.
375 typename KeyConvertFcn>
376 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
377 Allocator, ProbeFcn, KeyConvertFcn>::
379 subMaps_[0].load(std::memory_order_relaxed)->clear();
380 int const numMaps = numMapsAllocated_
381 .load(std::memory_order_relaxed);
382 FOR_EACH_RANGE(i, 1, numMaps) {
383 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
385 SubMap::destroy(thisMap);
386 subMaps_[i].store(nullptr, std::memory_order_relaxed);
388 numMapsAllocated_.store(1, std::memory_order_relaxed);
399 typename KeyConvertFcn>
400 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
401 Allocator, ProbeFcn, KeyConvertFcn>::
404 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
405 FOR_EACH_RANGE(i, 0, numMaps) {
406 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
411 // encodeIndex -- Encode the submap index and offset into return.
412 // index_ret must be pre-populated with the submap offset.
414 // We leave index_ret untouched when referring to the primary map
415 // so it can be as large as possible (31 data bits). Max size of
416 // secondary maps is limited by what can fit in the low 27 bits.
418 // Returns the following bit-encoded data in index_ret:
419 // if subMap == 0 (primary map) =>
422 // 0-30 submap offset (index_ret input)
424 // if subMap > 0 (secondary maps) =>
427 // 27-30 which subMap
428 // 0-26 subMap offset (index_ret input)
436 typename KeyConvertFcn>
438 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
439 Allocator, ProbeFcn, KeyConvertFcn>::
440 encodeIndex(uint32_t subMap, uint32_t offset) {
441 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
445 // Make sure subMap isn't too big
446 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
447 // Make sure subMap bits of offset are clear
448 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
450 // Set high-order bits to encode which submap this index belongs to
451 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
455 // Iterator implementation
464 typename KeyConvertFcn>
465 template <class ContT, class IterVal, class SubIt>
466 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
467 Allocator, ProbeFcn, KeyConvertFcn>::
468 ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
470 boost::forward_traversal_tag> {
471 explicit ahm_iterator() : ahm_(nullptr) {}
473 // Conversion ctor for interoperability between const_iterator and
474 // iterator. The enable_if<> magic keeps us well-behaved for
475 // is_convertible<> (v. the iterator_facade documentation).
476 template <class OtherContT, class OtherVal, class OtherSubIt>
478 const ahm_iterator<OtherContT, OtherVal, OtherSubIt>& o,
479 typename std::enable_if<
480 std::is_convertible<OtherSubIt, SubIt>::value>::type* = nullptr)
481 : ahm_(o.ahm_), subMap_(o.subMap_), subIt_(o.subIt_) {}
484 * Returns the unique index that can be used for access directly
485 * into the data storage.
487 uint32_t getIndex() const {
489 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
493 friend class AtomicHashMap;
494 explicit ahm_iterator(ContT* ahm,
502 friend class boost::iterator_core_access;
507 checkAdvanceToNextSubmap();
510 bool equal(const ahm_iterator& other) const {
511 if (ahm_ != other.ahm_) {
515 if (isEnd() || other.isEnd()) {
516 return isEnd() == other.isEnd();
519 return subMap_ == other.subMap_ &&
520 subIt_ == other.subIt_;
523 IterVal& dereference() const {
527 bool isEnd() const { return ahm_ == nullptr; }
529 void checkAdvanceToNextSubmap() {
534 SubMap* thisMap = ahm_->subMaps_[subMap_].
535 load(std::memory_order_relaxed);
536 while (subIt_ == thisMap->end()) {
537 // This sub iterator is done, advance to next one
539 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
541 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
542 subIt_ = thisMap->begin();