2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include <folly/detail/AtomicHashUtils.h>
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
27 template <typename KeyT,
33 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
34 AtomicHashMap(size_t finalSizeEst, const Config& config)
35 : kGrowthFrac_(config.growthFactor < 0 ?
36 1.0 - config.maxLoadFactor : config.growthFactor) {
37 CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
38 subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
39 std::memory_order_relaxed);
40 auto subMapCount = kNumSubMaps_;
41 FOR_EACH_RANGE(i, 1, subMapCount) {
42 subMaps_[i].store(nullptr, std::memory_order_relaxed);
44 numMapsAllocated_.store(1, std::memory_order_relaxed);
48 template <typename KeyT,
54 template <typename... ArgTs>
55 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
56 EqualFcn, Allocator, ProbeFcn>::iterator, bool>
57 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
58 emplace(key_type k, ArgTs&&... vCtorArgs) {
59 SimpleRetT ret = insertInternal(k, std::forward<ArgTs>(vCtorArgs)...);
60 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
61 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
65 // insertInternal -- Allocates new sub maps as existing ones fill up.
66 template <typename KeyT,
72 template <typename... ArgTs>
73 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
75 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
76 insertInternal(key_type key, ArgTs&&... vCtorArgs) {
78 auto nextMapIdx = // this maintains our state
79 numMapsAllocated_.load(std::memory_order_acquire);
80 typename SubMap::SimpleRetT ret;
81 FOR_EACH_RANGE(i, 0, nextMapIdx) {
82 // insert in each map successively. If one succeeds, we're done!
83 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
84 ret = subMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
85 if (ret.idx == subMap->capacity_) {
86 continue; //map is full, so try the next one
88 // Either collision or success - insert in either case
89 return SimpleRetT(i, ret.idx, ret.success);
92 // If we made it this far, all maps are full and we need to try to allocate
95 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
96 if (nextMapIdx >= kNumSubMaps_ ||
97 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
98 // Can't allocate any more sub maps.
99 throw AtomicHashMapFullError();
102 if (tryLockMap(nextMapIdx)) {
103 // Alloc a new map and shove it in. We can change whatever
104 // we want because other threads are waiting on us...
105 size_t numCellsAllocated = (size_t)
106 (primarySubMap->capacity_ *
107 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
108 size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
109 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
110 (SubMap*)kLockedPtr_);
111 // create a new map using the settings stored in the first map
114 config.emptyKey = primarySubMap->kEmptyKey_;
115 config.lockedKey = primarySubMap->kLockedKey_;
116 config.erasedKey = primarySubMap->kErasedKey_;
117 config.maxLoadFactor = primarySubMap->maxLoadFactor();
118 config.entryCountThreadCacheSize =
119 primarySubMap->getEntryCountThreadCacheSize();
120 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
121 std::memory_order_relaxed);
123 // Publish the map to other threads.
124 numMapsAllocated_.fetch_add(1, std::memory_order_release);
125 DCHECK_EQ(nextMapIdx + 1,
126 numMapsAllocated_.load(std::memory_order_relaxed));
128 // If we lost the race, we'll have to wait for the next map to get
129 // allocated before doing any insertion here.
130 detail::atomic_hash_spin_wait([&] {
131 return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
135 // Relaxed is ok here because either we just created this map, or we
136 // just did a spin wait with an acquire load on numMapsAllocated_.
137 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
138 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
139 ret = loadedMap->insertInternal(key, std::forward<ArgTs>(vCtorArgs)...);
140 if (ret.idx != loadedMap->capacity_) {
141 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
143 // We took way too long and the new map is already full...try again from
144 // the top (this should pretty much never happen).
145 goto beginInsertInternal;
149 template <typename KeyT,
155 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
157 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::find(
159 SimpleRetT ret = findInternal(k);
163 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
164 return iterator(this, ret.i, subMap->makeIter(ret.j));
167 template <typename KeyT,
173 typename AtomicHashMap<KeyT, ValueT,
174 HashFcn, EqualFcn, Allocator, ProbeFcn>::const_iterator
175 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
177 return const_cast<AtomicHashMap*>(this)->find(k);
181 template <typename KeyT,
187 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
189 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
190 findInternal(const KeyT k) const {
191 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
192 typename SubMap::SimpleRetT ret = primaryMap->findInternal(k);
193 if (LIKELY(ret.idx != primaryMap->capacity_)) {
194 return SimpleRetT(0, ret.idx, ret.success);
196 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
197 FOR_EACH_RANGE(i, 1, numMaps) {
198 // Check each map successively. If one succeeds, we're done!
199 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
200 ret = thisMap->findInternal(k);
201 if (LIKELY(ret.idx != thisMap->capacity_)) {
202 return SimpleRetT(i, ret.idx, ret.success);
205 // Didn't find our key...
206 return SimpleRetT(numMaps, 0, false);
209 // findAtInternal -- see encodeIndex() for details.
210 template <typename KeyT,
216 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
218 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
219 findAtInternal(uint32_t idx) const {
220 uint32_t subMapIdx, subMapOffset;
221 if (idx & kSecondaryMapBit_) {
222 // idx falls in a secondary map
223 idx &= ~kSecondaryMapBit_; // unset secondary bit
224 subMapIdx = idx >> kSubMapIndexShift_;
225 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
226 subMapOffset = idx & kSubMapIndexMask_;
228 // idx falls in primary map
232 return SimpleRetT(subMapIdx, subMapOffset, true);
236 template <typename KeyT,
242 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
244 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
245 erase(const KeyT k) {
246 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
247 FOR_EACH_RANGE(i, 0, numMaps) {
248 // Check each map successively. If one succeeds, we're done!
249 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
253 // Didn't find our key...
257 // capacity -- summation of capacities of all submaps
258 template <typename KeyT,
264 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
267 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
268 FOR_EACH_RANGE(i, 0, numMaps) {
269 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
275 // number of new insertions until current submaps are all at max load
276 template <typename KeyT,
282 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
283 spaceRemaining() const {
285 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
286 FOR_EACH_RANGE(i, 0, numMaps) {
287 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
288 spaceRem += std::max(
290 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
296 // clear -- Wipes all keys and values from primary map and destroys
297 // all secondary maps. Not thread safe.
298 template <typename KeyT,
304 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
306 subMaps_[0].load(std::memory_order_relaxed)->clear();
307 int const numMaps = numMapsAllocated_
308 .load(std::memory_order_relaxed);
309 FOR_EACH_RANGE(i, 1, numMaps) {
310 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
312 SubMap::destroy(thisMap);
313 subMaps_[i].store(nullptr, std::memory_order_relaxed);
315 numMapsAllocated_.store(1, std::memory_order_relaxed);
319 template <typename KeyT,
325 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
328 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
329 FOR_EACH_RANGE(i, 0, numMaps) {
330 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
335 // encodeIndex -- Encode the submap index and offset into return.
336 // index_ret must be pre-populated with the submap offset.
338 // We leave index_ret untouched when referring to the primary map
339 // so it can be as large as possible (31 data bits). Max size of
340 // secondary maps is limited by what can fit in the low 27 bits.
342 // Returns the following bit-encoded data in index_ret:
343 // if subMap == 0 (primary map) =>
346 // 0-30 submap offset (index_ret input)
348 // if subMap > 0 (secondary maps) =>
351 // 27-30 which subMap
352 // 0-26 subMap offset (index_ret input)
353 template <typename KeyT,
360 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
361 encodeIndex(uint32_t subMap, uint32_t offset) {
362 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
363 if (subMap == 0) return offset;
364 // Make sure subMap isn't too big
365 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
366 // Make sure subMap bits of offset are clear
367 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
369 // Set high-order bits to encode which submap this index belongs to
370 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
374 // Iterator implementation
376 template <typename KeyT,
382 template <class ContT, class IterVal, class SubIt>
383 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator, ProbeFcn>::
384 ahm_iterator : boost::iterator_facade<ahm_iterator<ContT, IterVal, SubIt>,
386 boost::forward_traversal_tag> {
387 explicit ahm_iterator() : ahm_(0) {}
389 // Conversion ctor for interoperability between const_iterator and
390 // iterator. The enable_if<> magic keeps us well-behaved for
391 // is_convertible<> (v. the iterator_facade documentation).
392 template<class OtherContT, class OtherVal, class OtherSubIt>
393 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
394 typename std::enable_if<
395 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
402 * Returns the unique index that can be used for access directly
403 * into the data storage.
405 uint32_t getIndex() const {
407 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
411 friend class AtomicHashMap;
412 explicit ahm_iterator(ContT* ahm,
420 friend class boost::iterator_core_access;
425 checkAdvanceToNextSubmap();
428 bool equal(const ahm_iterator& other) const {
429 if (ahm_ != other.ahm_) {
433 if (isEnd() || other.isEnd()) {
434 return isEnd() == other.isEnd();
437 return subMap_ == other.subMap_ &&
438 subIt_ == other.subIt_;
441 IterVal& dereference() const {
445 bool isEnd() const { return ahm_ == nullptr; }
447 void checkAdvanceToNextSubmap() {
452 SubMap* thisMap = ahm_->subMaps_[subMap_].
453 load(std::memory_order_relaxed);
454 while (subIt_ == thisMap->end()) {
455 // This sub iterator is done, advance to next one
457 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
459 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
460 subIt_ = thisMap->begin();