2 * Copyright 2014 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include <folly/detail/AtomicHashUtils.h>
25 template <class KeyT, class ValueT,
26 class HashFcn, class EqualFcn, class Allocator>
27 const typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::Config
28 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::defaultConfig;
30 // AtomicHashMap constructor -- Atomic wrapper that allows growth
31 // This class has a lot of overhead (184 Bytes) so only use for big maps
32 template <typename KeyT, typename ValueT,
33 typename HashFcn, typename EqualFcn, typename Allocator>
34 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
35 AtomicHashMap(size_t size, const Config& config)
36 : kGrowthFrac_(config.growthFactor < 0 ?
37 1.0 - config.maxLoadFactor : config.growthFactor) {
38 CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
39 subMaps_[0].store(SubMap::create(size, config).release(),
40 std::memory_order_relaxed);
41 auto numSubMaps = kNumSubMaps_;
42 FOR_EACH_RANGE(i, 1, numSubMaps) {
43 subMaps_[i].store(nullptr, std::memory_order_relaxed);
45 numMapsAllocated_.store(1, std::memory_order_relaxed);
49 template <typename KeyT, typename ValueT,
50 typename HashFcn, typename EqualFcn, typename Allocator>
51 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
52 EqualFcn, Allocator>::iterator, bool>
53 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
54 insert(key_type k, const mapped_type& v) {
55 SimpleRetT ret = insertInternal(k,v);
56 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
57 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
61 template <typename KeyT, typename ValueT,
62 typename HashFcn, typename EqualFcn, typename Allocator>
63 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
64 EqualFcn, Allocator>::iterator, bool>
65 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
66 insert(key_type k, mapped_type&& v) {
67 SimpleRetT ret = insertInternal(k, std::move(v));
68 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
69 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
73 // insertInternal -- Allocates new sub maps as existing ones fill up.
74 template <typename KeyT, typename ValueT,
75 typename HashFcn, typename EqualFcn, typename Allocator>
77 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
78 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
79 insertInternal(key_type key, T&& value) {
81 int nextMapIdx = // this maintains our state
82 numMapsAllocated_.load(std::memory_order_acquire);
83 typename SubMap::SimpleRetT ret;
84 FOR_EACH_RANGE(i, 0, nextMapIdx) {
85 // insert in each map successively. If one succeeds, we're done!
86 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
87 ret = subMap->insertInternal(key, std::forward<T>(value));
88 if (ret.idx == subMap->capacity_) {
89 continue; //map is full, so try the next one
91 // Either collision or success - insert in either case
92 return SimpleRetT(i, ret.idx, ret.success);
95 // If we made it this far, all maps are full and we need to try to allocate
98 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
99 if (nextMapIdx >= kNumSubMaps_ ||
100 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
101 // Can't allocate any more sub maps.
102 throw AtomicHashMapFullError();
105 if (tryLockMap(nextMapIdx)) {
106 // Alloc a new map and shove it in. We can change whatever
107 // we want because other threads are waiting on us...
108 size_t numCellsAllocated = (size_t)
109 (primarySubMap->capacity_ *
110 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
111 size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
112 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
113 (SubMap*)kLockedPtr_);
114 // create a new map using the settings stored in the first map
117 config.emptyKey = primarySubMap->kEmptyKey_;
118 config.lockedKey = primarySubMap->kLockedKey_;
119 config.erasedKey = primarySubMap->kErasedKey_;
120 config.maxLoadFactor = primarySubMap->maxLoadFactor();
121 config.entryCountThreadCacheSize =
122 primarySubMap->getEntryCountThreadCacheSize();
123 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
124 std::memory_order_relaxed);
126 // Publish the map to other threads.
127 numMapsAllocated_.fetch_add(1, std::memory_order_release);
128 DCHECK_EQ(nextMapIdx + 1,
129 numMapsAllocated_.load(std::memory_order_relaxed));
131 // If we lost the race, we'll have to wait for the next map to get
132 // allocated before doing any insertion here.
134 nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire)
138 // Relaxed is ok here because either we just created this map, or we
139 // just did a spin wait with an acquire load on numMapsAllocated_.
140 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
141 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
142 ret = loadedMap->insertInternal(key, std::forward<T>(value));
143 if (ret.idx != loadedMap->capacity_) {
144 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
146 // We took way too long and the new map is already full...try again from
147 // the top (this should pretty much never happen).
148 goto beginInsertInternal;
152 template <typename KeyT, typename ValueT,
153 typename HashFcn, typename EqualFcn, typename Allocator>
154 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::iterator
155 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
157 SimpleRetT ret = findInternal(k);
161 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
162 return iterator(this, ret.i, subMap->makeIter(ret.j));
165 template <typename KeyT, typename ValueT,
166 typename HashFcn, typename EqualFcn, typename Allocator>
167 typename AtomicHashMap<KeyT, ValueT,
168 HashFcn, EqualFcn, Allocator>::const_iterator
169 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
171 return const_cast<AtomicHashMap*>(this)->find(k);
175 template <typename KeyT, typename ValueT,
176 typename HashFcn, typename EqualFcn, typename Allocator>
177 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
178 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
179 findInternal(const KeyT k) const {
180 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
181 typename SubMap::SimpleRetT ret = primaryMap->findInternal(k);
182 if (LIKELY(ret.idx != primaryMap->capacity_)) {
183 return SimpleRetT(0, ret.idx, ret.success);
185 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
186 FOR_EACH_RANGE(i, 1, numMaps) {
187 // Check each map successively. If one succeeds, we're done!
188 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
189 ret = thisMap->findInternal(k);
190 if (LIKELY(ret.idx != thisMap->capacity_)) {
191 return SimpleRetT(i, ret.idx, ret.success);
194 // Didn't find our key...
195 return SimpleRetT(numMaps, 0, false);
198 // findAtInternal -- see encodeIndex() for details.
199 template <typename KeyT, typename ValueT,
200 typename HashFcn, typename EqualFcn, typename Allocator>
201 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
202 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
203 findAtInternal(uint32_t idx) const {
204 uint32_t subMapIdx, subMapOffset;
205 if (idx & kSecondaryMapBit_) {
206 // idx falls in a secondary map
207 idx &= ~kSecondaryMapBit_; // unset secondary bit
208 subMapIdx = idx >> kSubMapIndexShift_;
209 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
210 subMapOffset = idx & kSubMapIndexMask_;
212 // idx falls in primary map
216 return SimpleRetT(subMapIdx, subMapOffset, true);
220 template <typename KeyT, typename ValueT,
221 typename HashFcn, typename EqualFcn, typename Allocator>
222 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::size_type
223 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
224 erase(const KeyT k) {
225 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
226 FOR_EACH_RANGE(i, 0, numMaps) {
227 // Check each map successively. If one succeeds, we're done!
228 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
232 // Didn't find our key...
236 // capacity -- summation of capacities of all submaps
237 template <typename KeyT, typename ValueT,
238 typename HashFcn, typename EqualFcn, typename Allocator>
239 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
242 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
243 FOR_EACH_RANGE(i, 0, numMaps) {
244 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
250 // number of new insertions until current submaps are all at max load
251 template <typename KeyT, typename ValueT,
252 typename HashFcn, typename EqualFcn, typename Allocator>
253 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
254 spaceRemaining() const {
256 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
257 FOR_EACH_RANGE(i, 0, numMaps) {
258 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
259 spaceRem += std::max(
261 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
267 // clear -- Wipes all keys and values from primary map and destroys
268 // all secondary maps. Not thread safe.
269 template <typename KeyT, typename ValueT,
270 typename HashFcn, typename EqualFcn, typename Allocator>
271 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
273 subMaps_[0].load(std::memory_order_relaxed)->clear();
274 int const numMaps = numMapsAllocated_
275 .load(std::memory_order_relaxed);
276 FOR_EACH_RANGE(i, 1, numMaps) {
277 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
279 SubMap::destroy(thisMap);
280 subMaps_[i].store(nullptr, std::memory_order_relaxed);
282 numMapsAllocated_.store(1, std::memory_order_relaxed);
286 template <typename KeyT, typename ValueT,
287 typename HashFcn, typename EqualFcn, typename Allocator>
288 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
291 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
292 FOR_EACH_RANGE(i, 0, numMaps) {
293 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
298 // encodeIndex -- Encode the submap index and offset into return.
299 // index_ret must be pre-populated with the submap offset.
301 // We leave index_ret untouched when referring to the primary map
302 // so it can be as large as possible (31 data bits). Max size of
303 // secondary maps is limited by what can fit in the low 27 bits.
305 // Returns the following bit-encoded data in index_ret:
306 // if subMap == 0 (primary map) =>
309 // 0-30 submap offset (index_ret input)
311 // if subMap > 0 (secondary maps) =>
314 // 27-30 which subMap
315 // 0-26 subMap offset (index_ret input)
316 template <typename KeyT, typename ValueT,
317 typename HashFcn, typename EqualFcn, typename Allocator>
318 inline uint32_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
319 encodeIndex(uint32_t subMap, uint32_t offset) {
320 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
321 if (subMap == 0) return offset;
322 // Make sure subMap isn't too big
323 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
324 // Make sure subMap bits of offset are clear
325 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
327 // Set high-order bits to encode which submap this index belongs to
328 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
332 // Iterator implementation
334 template <typename KeyT, typename ValueT,
335 typename HashFcn, typename EqualFcn, typename Allocator>
336 template<class ContT, class IterVal, class SubIt>
337 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::ahm_iterator
338 : boost::iterator_facade<ahm_iterator<ContT,IterVal,SubIt>,
340 boost::forward_traversal_tag>
342 explicit ahm_iterator() : ahm_(0) {}
344 // Conversion ctor for interoperability between const_iterator and
345 // iterator. The enable_if<> magic keeps us well-behaved for
346 // is_convertible<> (v. the iterator_facade documentation).
347 template<class OtherContT, class OtherVal, class OtherSubIt>
348 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
349 typename std::enable_if<
350 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
357 * Returns the unique index that can be used for access directly
358 * into the data storage.
360 uint32_t getIndex() const {
362 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
366 friend class AtomicHashMap;
367 explicit ahm_iterator(ContT* ahm,
374 checkAdvanceToNextSubmap();
377 friend class boost::iterator_core_access;
382 checkAdvanceToNextSubmap();
385 bool equal(const ahm_iterator& other) const {
386 if (ahm_ != other.ahm_) {
390 if (isEnd() || other.isEnd()) {
391 return isEnd() == other.isEnd();
394 return subMap_ == other.subMap_ &&
395 subIt_ == other.subIt_;
398 IterVal& dereference() const {
402 bool isEnd() const { return ahm_ == nullptr; }
404 void checkAdvanceToNextSubmap() {
409 SubMap* thisMap = ahm_->subMaps_[subMap_].
410 load(std::memory_order_relaxed);
411 if (subIt_ == thisMap->end()) {
412 // This sub iterator is done, advance to next one
414 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
416 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
417 subIt_ = thisMap->begin();
432 #undef FOLLY_SPIN_WAIT