2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include <folly/detail/AtomicHashUtils.h>
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
34 typename KeyConvertFcn>
42 KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
44 config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
45 : config.growthFactor) {
46 CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
47 subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
48 std::memory_order_relaxed);
49 auto subMapCount = kNumSubMaps_;
50 FOR_EACH_RANGE(i, 1, subMapCount) {
51 subMaps_[i].store(nullptr, std::memory_order_relaxed);
53 numMapsAllocated_.store(1, std::memory_order_relaxed);
57 template <typename KeyT,
63 typename KeyConvertFcn>
64 template <typename LookupKeyT,
65 typename LookupHashFcn,
66 typename LookupEqualFcn,
67 typename LookupKeyToKeyFcn,
69 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator,
70 ProbeFcn, KeyConvertFcn>::iterator, bool>
71 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
72 Allocator, ProbeFcn, KeyConvertFcn>::
73 emplace(LookupKeyT k, ArgTs&&... vCtorArgs) {
74 SimpleRetT ret = insertInternal<LookupKeyT,
78 k, std::forward<ArgTs>(vCtorArgs)...);
79 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
80 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
84 // insertInternal -- Allocates new sub maps as existing ones fill up.
85 template <typename KeyT,
91 typename KeyConvertFcn>
92 template <typename LookupKeyT,
93 typename LookupHashFcn,
94 typename LookupEqualFcn,
95 typename LookupKeyToKeyFcn,
97 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
98 Allocator, ProbeFcn, KeyConvertFcn>::
100 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
101 Allocator, ProbeFcn, KeyConvertFcn>::
102 insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) {
104 auto nextMapIdx = // this maintains our state
105 numMapsAllocated_.load(std::memory_order_acquire);
106 typename SubMap::SimpleRetT ret;
107 FOR_EACH_RANGE(i, 0, nextMapIdx) {
108 // insert in each map successively. If one succeeds, we're done!
109 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
110 ret = subMap->template insertInternal<LookupKeyT,
114 key, std::forward<ArgTs>(vCtorArgs)...);
115 if (ret.idx == subMap->capacity_) {
116 continue; //map is full, so try the next one
118 // Either collision or success - insert in either case
119 return SimpleRetT(i, ret.idx, ret.success);
122 // If we made it this far, all maps are full and we need to try to allocate
125 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
126 if (nextMapIdx >= kNumSubMaps_ ||
127 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
128 // Can't allocate any more sub maps.
129 throw AtomicHashMapFullError();
132 if (tryLockMap(nextMapIdx)) {
133 // Alloc a new map and shove it in. We can change whatever
134 // we want because other threads are waiting on us...
135 size_t numCellsAllocated = (size_t)
136 (primarySubMap->capacity_ *
137 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
138 size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
139 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
140 (SubMap*)kLockedPtr_);
141 // create a new map using the settings stored in the first map
144 config.emptyKey = primarySubMap->kEmptyKey_;
145 config.lockedKey = primarySubMap->kLockedKey_;
146 config.erasedKey = primarySubMap->kErasedKey_;
147 config.maxLoadFactor = primarySubMap->maxLoadFactor();
148 config.entryCountThreadCacheSize =
149 primarySubMap->getEntryCountThreadCacheSize();
150 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
151 std::memory_order_relaxed);
153 // Publish the map to other threads.
154 numMapsAllocated_.fetch_add(1, std::memory_order_release);
155 DCHECK_EQ(nextMapIdx + 1,
156 numMapsAllocated_.load(std::memory_order_relaxed));
158 // If we lost the race, we'll have to wait for the next map to get
159 // allocated before doing any insertion here.
160 detail::atomic_hash_spin_wait([&] {
161 return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
165 // Relaxed is ok here because either we just created this map, or we
166 // just did a spin wait with an acquire load on numMapsAllocated_.
167 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
168 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
169 ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
170 if (ret.idx != loadedMap->capacity_) {
171 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
173 // We took way too long and the new map is already full...try again from
174 // the top (this should pretty much never happen).
175 goto beginInsertInternal;
179 template <typename KeyT,
185 typename KeyConvertFcn>
186 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
187 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
188 Allocator, ProbeFcn, KeyConvertFcn>::
190 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
191 Allocator, ProbeFcn, KeyConvertFcn>::find(
193 SimpleRetT ret = findInternal<LookupKeyT, LookupHashFcn, LookupEqualFcn>(k);
197 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
198 return iterator(this, ret.i, subMap->makeIter(ret.j));
201 template <typename KeyT,
207 typename KeyConvertFcn>
208 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
209 typename AtomicHashMap<KeyT, ValueT,
210 HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator
211 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
212 Allocator, ProbeFcn, KeyConvertFcn>::
213 find(LookupKeyT k) const {
214 return const_cast<AtomicHashMap*>(this)->find<LookupKeyT,
220 template <typename KeyT,
226 typename KeyConvertFcn>
227 template <class LookupKeyT, class LookupHashFcn, class LookupEqualFcn>
228 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
229 Allocator, ProbeFcn, KeyConvertFcn>::
231 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
232 Allocator, ProbeFcn, KeyConvertFcn>::
233 findInternal(const LookupKeyT k) const {
234 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
235 typename SubMap::SimpleRetT ret =
236 primaryMap->template findInternal<LookupKeyT,
239 if (LIKELY(ret.idx != primaryMap->capacity_)) {
240 return SimpleRetT(0, ret.idx, ret.success);
242 const unsigned int numMaps =
243 numMapsAllocated_.load(std::memory_order_acquire);
244 FOR_EACH_RANGE(i, 1, numMaps) {
245 // Check each map successively. If one succeeds, we're done!
246 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
247 ret = thisMap->template findInternal<LookupKeyT,
250 if (LIKELY(ret.idx != thisMap->capacity_)) {
251 return SimpleRetT(i, ret.idx, ret.success);
254 // Didn't find our key...
255 return SimpleRetT(numMaps, 0, false);
258 // findAtInternal -- see encodeIndex() for details.
259 template <typename KeyT,
265 typename KeyConvertFcn>
266 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
267 Allocator, ProbeFcn, KeyConvertFcn>::
269 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
270 Allocator, ProbeFcn, KeyConvertFcn>::
271 findAtInternal(uint32_t idx) const {
272 uint32_t subMapIdx, subMapOffset;
273 if (idx & kSecondaryMapBit_) {
274 // idx falls in a secondary map
275 idx &= ~kSecondaryMapBit_; // unset secondary bit
276 subMapIdx = idx >> kSubMapIndexShift_;
277 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
278 subMapOffset = idx & kSubMapIndexMask_;
280 // idx falls in primary map
284 return SimpleRetT(subMapIdx, subMapOffset, true);
288 template <typename KeyT,
294 typename KeyConvertFcn>
295 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
296 Allocator, ProbeFcn, KeyConvertFcn>::
298 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
299 Allocator, ProbeFcn, KeyConvertFcn>::
300 erase(const KeyT k) {
301 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
302 FOR_EACH_RANGE(i, 0, numMaps) {
303 // Check each map successively. If one succeeds, we're done!
304 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
308 // Didn't find our key...
312 // capacity -- summation of capacities of all submaps
313 template <typename KeyT,
319 typename KeyConvertFcn>
320 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
321 Allocator, ProbeFcn, KeyConvertFcn>::
324 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
325 FOR_EACH_RANGE(i, 0, numMaps) {
326 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
332 // number of new insertions until current submaps are all at max load
333 template <typename KeyT,
339 typename KeyConvertFcn>
340 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
341 Allocator, ProbeFcn, KeyConvertFcn>::
342 spaceRemaining() const {
344 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
345 FOR_EACH_RANGE(i, 0, numMaps) {
346 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
347 spaceRem += std::max(
349 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
355 // clear -- Wipes all keys and values from primary map and destroys
356 // all secondary maps. Not thread safe.
357 template <typename KeyT,
363 typename KeyConvertFcn>
364 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
365 Allocator, ProbeFcn, KeyConvertFcn>::
367 subMaps_[0].load(std::memory_order_relaxed)->clear();
368 int const numMaps = numMapsAllocated_
369 .load(std::memory_order_relaxed);
370 FOR_EACH_RANGE(i, 1, numMaps) {
371 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
373 SubMap::destroy(thisMap);
374 subMaps_[i].store(nullptr, std::memory_order_relaxed);
376 numMapsAllocated_.store(1, std::memory_order_relaxed);
380 template <typename KeyT,
386 typename KeyConvertFcn>
387 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
388 Allocator, ProbeFcn, KeyConvertFcn>::
391 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
392 FOR_EACH_RANGE(i, 0, numMaps) {
393 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
398 // encodeIndex -- Encode the submap index and offset into return.
399 // index_ret must be pre-populated with the submap offset.
401 // We leave index_ret untouched when referring to the primary map
402 // so it can be as large as possible (31 data bits). Max size of
403 // secondary maps is limited by what can fit in the low 27 bits.
405 // Returns the following bit-encoded data in index_ret:
406 // if subMap == 0 (primary map) =>
409 // 0-30 submap offset (index_ret input)
411 // if subMap > 0 (secondary maps) =>
414 // 27-30 which subMap
415 // 0-26 subMap offset (index_ret input)
416 template <typename KeyT,
422 typename KeyConvertFcn>
424 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
425 Allocator, ProbeFcn, KeyConvertFcn>::
426 encodeIndex(uint32_t subMap, uint32_t offset) {
427 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
428 if (subMap == 0) return offset;
429 // Make sure subMap isn't too big
430 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
431 // Make sure subMap bits of offset are clear
432 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
434 // Set high-order bits to encode which submap this index belongs to
435 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
439 // Iterator implementation
441 template <typename KeyT,
447 typename KeyConvertFcn>
448 template <class ContT, class IterVal, class SubIt>
449 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
450 Allocator, ProbeFcn, KeyConvertFcn>::
451 ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
453 boost::forward_traversal_tag> {
454 explicit ahm_iterator() : ahm_(0) {}
456 // Conversion ctor for interoperability between const_iterator and
457 // iterator. The enable_if<> magic keeps us well-behaved for
458 // is_convertible<> (v. the iterator_facade documentation).
459 template<class OtherContT, class OtherVal, class OtherSubIt>
460 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
461 typename std::enable_if<
462 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
469 * Returns the unique index that can be used for access directly
470 * into the data storage.
472 uint32_t getIndex() const {
474 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
478 friend class AtomicHashMap;
479 explicit ahm_iterator(ContT* ahm,
487 friend class boost::iterator_core_access;
492 checkAdvanceToNextSubmap();
495 bool equal(const ahm_iterator& other) const {
496 if (ahm_ != other.ahm_) {
500 if (isEnd() || other.isEnd()) {
501 return isEnd() == other.isEnd();
504 return subMap_ == other.subMap_ &&
505 subIt_ == other.subIt_;
508 IterVal& dereference() const {
512 bool isEnd() const { return ahm_ == nullptr; }
514 void checkAdvanceToNextSubmap() {
519 SubMap* thisMap = ahm_->subMaps_[subMap_].
520 load(std::memory_order_relaxed);
521 while (subIt_ == thisMap->end()) {
522 // This sub iterator is done, advance to next one
524 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
526 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
527 subIt_ = thisMap->begin();