2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include <folly/detail/AtomicHashUtils.h>
25 // AtomicHashMap constructor -- Atomic wrapper that allows growth
26 // This class has a lot of overhead (184 Bytes) so only use for big maps
27 template <typename KeyT, typename ValueT,
28 typename HashFcn, typename EqualFcn, typename Allocator>
29 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
30 AtomicHashMap(size_t finalSizeEst, const Config& config)
31 : kGrowthFrac_(config.growthFactor < 0 ?
32 1.0 - config.maxLoadFactor : config.growthFactor) {
33 CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
34 subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
35 std::memory_order_relaxed);
36 auto subMapCount = kNumSubMaps_;
37 FOR_EACH_RANGE(i, 1, subMapCount) {
38 subMaps_[i].store(nullptr, std::memory_order_relaxed);
40 numMapsAllocated_.store(1, std::memory_order_relaxed);
44 template <typename KeyT, typename ValueT,
45 typename HashFcn, typename EqualFcn, typename Allocator>
46 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
47 EqualFcn, Allocator>::iterator, bool>
48 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
49 insert(key_type k, const mapped_type& v) {
50 SimpleRetT ret = insertInternal(k,v);
51 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
52 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
56 template <typename KeyT, typename ValueT,
57 typename HashFcn, typename EqualFcn, typename Allocator>
58 std::pair<typename AtomicHashMap<KeyT, ValueT, HashFcn,
59 EqualFcn, Allocator>::iterator, bool>
60 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
61 insert(key_type k, mapped_type&& v) {
62 SimpleRetT ret = insertInternal(k, std::move(v));
63 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
64 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
68 // insertInternal -- Allocates new sub maps as existing ones fill up.
69 template <typename KeyT, typename ValueT,
70 typename HashFcn, typename EqualFcn, typename Allocator>
72 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
73 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
74 insertInternal(key_type key, T&& value) {
76 auto nextMapIdx = // this maintains our state
77 numMapsAllocated_.load(std::memory_order_acquire);
78 typename SubMap::SimpleRetT ret;
79 FOR_EACH_RANGE(i, 0, nextMapIdx) {
80 // insert in each map successively. If one succeeds, we're done!
81 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
82 ret = subMap->insertInternal(key, std::forward<T>(value));
83 if (ret.idx == subMap->capacity_) {
84 continue; //map is full, so try the next one
86 // Either collision or success - insert in either case
87 return SimpleRetT(i, ret.idx, ret.success);
90 // If we made it this far, all maps are full and we need to try to allocate
93 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
94 if (nextMapIdx >= kNumSubMaps_ ||
95 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
96 // Can't allocate any more sub maps.
97 throw AtomicHashMapFullError();
100 if (tryLockMap(nextMapIdx)) {
101 // Alloc a new map and shove it in. We can change whatever
102 // we want because other threads are waiting on us...
103 size_t numCellsAllocated = (size_t)
104 (primarySubMap->capacity_ *
105 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
106 size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
107 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
108 (SubMap*)kLockedPtr_);
109 // create a new map using the settings stored in the first map
112 config.emptyKey = primarySubMap->kEmptyKey_;
113 config.lockedKey = primarySubMap->kLockedKey_;
114 config.erasedKey = primarySubMap->kErasedKey_;
115 config.maxLoadFactor = primarySubMap->maxLoadFactor();
116 config.entryCountThreadCacheSize =
117 primarySubMap->getEntryCountThreadCacheSize();
118 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
119 std::memory_order_relaxed);
121 // Publish the map to other threads.
122 numMapsAllocated_.fetch_add(1, std::memory_order_release);
123 DCHECK_EQ(nextMapIdx + 1,
124 numMapsAllocated_.load(std::memory_order_relaxed));
126 // If we lost the race, we'll have to wait for the next map to get
127 // allocated before doing any insertion here.
128 detail::atomic_hash_spin_wait([&] {
129 return nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire);
133 // Relaxed is ok here because either we just created this map, or we
134 // just did a spin wait with an acquire load on numMapsAllocated_.
135 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
136 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
137 ret = loadedMap->insertInternal(key, std::forward<T>(value));
138 if (ret.idx != loadedMap->capacity_) {
139 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
141 // We took way too long and the new map is already full...try again from
142 // the top (this should pretty much never happen).
143 goto beginInsertInternal;
147 template <typename KeyT, typename ValueT,
148 typename HashFcn, typename EqualFcn, typename Allocator>
149 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::iterator
150 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
152 SimpleRetT ret = findInternal(k);
156 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
157 return iterator(this, ret.i, subMap->makeIter(ret.j));
160 template <typename KeyT, typename ValueT,
161 typename HashFcn, typename EqualFcn, typename Allocator>
162 typename AtomicHashMap<KeyT, ValueT,
163 HashFcn, EqualFcn, Allocator>::const_iterator
164 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
166 return const_cast<AtomicHashMap*>(this)->find(k);
170 template <typename KeyT, typename ValueT,
171 typename HashFcn, typename EqualFcn, typename Allocator>
172 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
173 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
174 findInternal(const KeyT k) const {
175 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
176 typename SubMap::SimpleRetT ret = primaryMap->findInternal(k);
177 if (LIKELY(ret.idx != primaryMap->capacity_)) {
178 return SimpleRetT(0, ret.idx, ret.success);
180 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
181 FOR_EACH_RANGE(i, 1, numMaps) {
182 // Check each map successively. If one succeeds, we're done!
183 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
184 ret = thisMap->findInternal(k);
185 if (LIKELY(ret.idx != thisMap->capacity_)) {
186 return SimpleRetT(i, ret.idx, ret.success);
189 // Didn't find our key...
190 return SimpleRetT(numMaps, 0, false);
193 // findAtInternal -- see encodeIndex() for details.
194 template <typename KeyT, typename ValueT,
195 typename HashFcn, typename EqualFcn, typename Allocator>
196 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::SimpleRetT
197 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
198 findAtInternal(uint32_t idx) const {
199 uint32_t subMapIdx, subMapOffset;
200 if (idx & kSecondaryMapBit_) {
201 // idx falls in a secondary map
202 idx &= ~kSecondaryMapBit_; // unset secondary bit
203 subMapIdx = idx >> kSubMapIndexShift_;
204 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
205 subMapOffset = idx & kSubMapIndexMask_;
207 // idx falls in primary map
211 return SimpleRetT(subMapIdx, subMapOffset, true);
215 template <typename KeyT, typename ValueT,
216 typename HashFcn, typename EqualFcn, typename Allocator>
217 typename AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::size_type
218 AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
219 erase(const KeyT k) {
220 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
221 FOR_EACH_RANGE(i, 0, numMaps) {
222 // Check each map successively. If one succeeds, we're done!
223 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
227 // Didn't find our key...
231 // capacity -- summation of capacities of all submaps
232 template <typename KeyT, typename ValueT,
233 typename HashFcn, typename EqualFcn, typename Allocator>
234 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
237 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
238 FOR_EACH_RANGE(i, 0, numMaps) {
239 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
245 // number of new insertions until current submaps are all at max load
246 template <typename KeyT, typename ValueT,
247 typename HashFcn, typename EqualFcn, typename Allocator>
248 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
249 spaceRemaining() const {
251 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
252 FOR_EACH_RANGE(i, 0, numMaps) {
253 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
254 spaceRem += std::max(
256 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
262 // clear -- Wipes all keys and values from primary map and destroys
263 // all secondary maps. Not thread safe.
264 template <typename KeyT, typename ValueT,
265 typename HashFcn, typename EqualFcn, typename Allocator>
266 void AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
268 subMaps_[0].load(std::memory_order_relaxed)->clear();
269 int const numMaps = numMapsAllocated_
270 .load(std::memory_order_relaxed);
271 FOR_EACH_RANGE(i, 1, numMaps) {
272 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
274 SubMap::destroy(thisMap);
275 subMaps_[i].store(nullptr, std::memory_order_relaxed);
277 numMapsAllocated_.store(1, std::memory_order_relaxed);
281 template <typename KeyT, typename ValueT,
282 typename HashFcn, typename EqualFcn, typename Allocator>
283 size_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
286 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
287 FOR_EACH_RANGE(i, 0, numMaps) {
288 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
293 // encodeIndex -- Encode the submap index and offset into return.
294 // index_ret must be pre-populated with the submap offset.
296 // We leave index_ret untouched when referring to the primary map
297 // so it can be as large as possible (31 data bits). Max size of
298 // secondary maps is limited by what can fit in the low 27 bits.
300 // Returns the following bit-encoded data in index_ret:
301 // if subMap == 0 (primary map) =>
304 // 0-30 submap offset (index_ret input)
306 // if subMap > 0 (secondary maps) =>
309 // 27-30 which subMap
310 // 0-26 subMap offset (index_ret input)
311 template <typename KeyT, typename ValueT,
312 typename HashFcn, typename EqualFcn, typename Allocator>
313 inline uint32_t AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
314 encodeIndex(uint32_t subMap, uint32_t offset) {
315 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
316 if (subMap == 0) return offset;
317 // Make sure subMap isn't too big
318 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
319 // Make sure subMap bits of offset are clear
320 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
322 // Set high-order bits to encode which submap this index belongs to
323 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
327 // Iterator implementation
329 template <typename KeyT, typename ValueT,
330 typename HashFcn, typename EqualFcn, typename Allocator>
331 template<class ContT, class IterVal, class SubIt>
332 struct AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::ahm_iterator
333 : boost::iterator_facade<ahm_iterator<ContT,IterVal,SubIt>,
335 boost::forward_traversal_tag>
337 explicit ahm_iterator() : ahm_(0) {}
339 // Conversion ctor for interoperability between const_iterator and
340 // iterator. The enable_if<> magic keeps us well-behaved for
341 // is_convertible<> (v. the iterator_facade documentation).
342 template<class OtherContT, class OtherVal, class OtherSubIt>
343 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
344 typename std::enable_if<
345 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
352 * Returns the unique index that can be used for access directly
353 * into the data storage.
355 uint32_t getIndex() const {
357 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
361 friend class AtomicHashMap;
362 explicit ahm_iterator(ContT* ahm,
370 friend class boost::iterator_core_access;
375 checkAdvanceToNextSubmap();
378 bool equal(const ahm_iterator& other) const {
379 if (ahm_ != other.ahm_) {
383 if (isEnd() || other.isEnd()) {
384 return isEnd() == other.isEnd();
387 return subMap_ == other.subMap_ &&
388 subIt_ == other.subIt_;
391 IterVal& dereference() const {
395 bool isEnd() const { return ahm_ == nullptr; }
397 void checkAdvanceToNextSubmap() {
402 SubMap* thisMap = ahm_->subMaps_[subMap_].
403 load(std::memory_order_relaxed);
404 while (subIt_ == thisMap->end()) {
405 // This sub iterator is done, advance to next one
407 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
409 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
410 subIt_ = thisMap->begin();