2 * Copyright 2013 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHMAP_H_
18 #error "This should only be included by AtomicHashMap.h"
21 #include "folly/detail/AtomicHashUtils.h"
25 template <class KeyT, class ValueT, class HashFcn>
26 const typename AtomicHashMap<KeyT, ValueT, HashFcn>::Config
27 AtomicHashMap<KeyT, ValueT, HashFcn>::defaultConfig;
29 // AtomicHashMap constructor -- Atomic wrapper that allows growth
30 // This class has a lot of overhead (184 Bytes) so only use for big maps
31 template <typename KeyT, typename ValueT, typename HashFcn>
32 AtomicHashMap<KeyT, ValueT, HashFcn>::
33 AtomicHashMap(size_t size, const Config& config)
34 : kGrowthFrac_(1.0 - config.maxLoadFactor) {
35 CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
36 subMaps_[0].store(SubMap::create(size, config).release(),
37 std::memory_order_relaxed);
38 auto numSubMaps = kNumSubMaps_;
39 FOR_EACH_RANGE(i, 1, numSubMaps) {
40 subMaps_[i].store(nullptr, std::memory_order_relaxed);
42 numMapsAllocated_.store(1, std::memory_order_relaxed);
46 template <typename KeyT, typename ValueT, typename HashFcn>
47 std::pair<typename AtomicHashMap<KeyT,ValueT,HashFcn>::iterator,bool>
48 AtomicHashMap<KeyT, ValueT, HashFcn>::
49 insert(key_type k, const mapped_type& v) {
50 SimpleRetT ret = insertInternal(k,v);
51 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
52 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
56 template <typename KeyT, typename ValueT, typename HashFcn>
57 std::pair<typename AtomicHashMap<KeyT,ValueT,HashFcn>::iterator,bool>
58 AtomicHashMap<KeyT, ValueT, HashFcn>::
59 insert(key_type k, mapped_type&& v) {
60 SimpleRetT ret = insertInternal(k, std::move(v));
61 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
62 return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)),
66 // insertInternal -- Allocates new sub maps as existing ones fill up.
67 template <typename KeyT, typename ValueT, typename HashFcn>
69 typename AtomicHashMap<KeyT, ValueT, HashFcn>::SimpleRetT
70 AtomicHashMap<KeyT, ValueT, HashFcn>::
71 insertInternal(key_type key, T&& value) {
73 int nextMapIdx = // this maintains our state
74 numMapsAllocated_.load(std::memory_order_acquire);
76 typename SubMap::SimpleRetT ret;
77 FOR_EACH_RANGE(i, 0, nextMapIdx) {
78 // insert in each map successively. If one succeeds, we're done!
79 SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed);
80 ret = subMap->insertInternal(key, std::forward<T>(value));
81 if (ret.idx == subMap->capacity_) {
82 continue; //map is full, so try the next one
84 // Either collision or success - insert in either case
85 return SimpleRetT(i, ret.idx, ret.success);
88 // If we made it this far, all maps are full and we need to try to allocate
91 SubMap* primarySubMap = subMaps_[0].load(std::memory_order_relaxed);
92 if (nextMapIdx >= kNumSubMaps_ ||
93 primarySubMap->capacity_ * kGrowthFrac_ < 1.0) {
94 // Can't allocate any more sub maps.
95 throw AtomicHashMapFullError();
98 if (tryLockMap(nextMapIdx)) {
99 // Alloc a new map and shove it in. We can change whatever
100 // we want because other threads are waiting on us...
101 size_t numCellsAllocated = (size_t)
102 (primarySubMap->capacity_ *
103 std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
104 size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
105 DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
106 (SubMap*)kLockedPtr_);
107 // create a new map using the settings stored in the first map
110 config.emptyKey = primarySubMap->kEmptyKey_;
111 config.lockedKey = primarySubMap->kLockedKey_;
112 config.erasedKey = primarySubMap->kErasedKey_;
113 config.maxLoadFactor = primarySubMap->maxLoadFactor();
114 config.entryCountThreadCacheSize =
115 primarySubMap->getEntryCountThreadCacheSize();
116 subMaps_[nextMapIdx].store(SubMap::create(newSize, config).release(),
117 std::memory_order_relaxed);
119 // Publish the map to other threads.
120 numMapsAllocated_.fetch_add(1, std::memory_order_release);
121 DCHECK_EQ(nextMapIdx + 1,
122 numMapsAllocated_.load(std::memory_order_relaxed));
124 // If we lost the race, we'll have to wait for the next map to get
125 // allocated before doing any insertion here.
127 nextMapIdx >= numMapsAllocated_.load(std::memory_order_acquire)
131 // Relaxed is ok here because either we just created this map, or we
132 // just did a spin wait with an acquire load on numMapsAllocated_.
133 SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed);
134 DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_);
135 ret = loadedMap->insertInternal(key, std::forward<T>(value));
136 if (ret.idx != loadedMap->capacity_) {
137 return SimpleRetT(nextMapIdx, ret.idx, ret.success);
139 // We took way too long and the new map is already full...try again from
140 // the top (this should pretty much never happen).
141 goto beginInsertInternal;
145 template <typename KeyT, typename ValueT, typename HashFcn>
146 typename AtomicHashMap<KeyT, ValueT, HashFcn>::iterator
147 AtomicHashMap<KeyT, ValueT, HashFcn>::
149 SimpleRetT ret = findInternal(k);
150 if (ret.i >= numMapsAllocated_.load(std::memory_order_acquire)) {
153 SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed);
154 return iterator(this, ret.i, subMap->makeIter(ret.j));
157 template <typename KeyT, typename ValueT, typename HashFcn>
158 typename AtomicHashMap<KeyT, ValueT, HashFcn>::const_iterator
159 AtomicHashMap<KeyT, ValueT, HashFcn>::
161 return const_cast<AtomicHashMap*>(this)->find(k);
165 template <typename KeyT, typename ValueT, typename HashFcn>
166 typename AtomicHashMap<KeyT, ValueT, HashFcn>::SimpleRetT
167 AtomicHashMap<KeyT, ValueT, HashFcn>::
168 findInternal(const KeyT k) const {
169 SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed);
170 typename SubMap::SimpleRetT ret = primaryMap->findInternal(k);
171 if (LIKELY(ret.idx != primaryMap->capacity_)) {
172 return SimpleRetT(0, ret.idx, ret.success);
174 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
175 FOR_EACH_RANGE(i, 1, numMaps) {
176 // Check each map successively. If one succeeds, we're done!
177 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
178 ret = thisMap->findInternal(k);
179 if (LIKELY(ret.idx != thisMap->capacity_)) {
180 return SimpleRetT(i, ret.idx, ret.success);
183 // Didn't find our key...
184 return SimpleRetT(numMaps, 0, false);
187 // findAtInternal -- see encodeIndex() for details.
188 template <typename KeyT, typename ValueT, typename HashFcn>
189 typename AtomicHashMap<KeyT, ValueT, HashFcn>::SimpleRetT
190 AtomicHashMap<KeyT, ValueT, HashFcn>::
191 findAtInternal(uint32_t idx) const {
192 uint32_t subMapIdx, subMapOffset;
193 if (idx & kSecondaryMapBit_) {
194 // idx falls in a secondary map
195 idx &= ~kSecondaryMapBit_; // unset secondary bit
196 subMapIdx = idx >> kSubMapIndexShift_;
197 DCHECK_LT(subMapIdx, numMapsAllocated_.load(std::memory_order_relaxed));
198 subMapOffset = idx & kSubMapIndexMask_;
200 // idx falls in primary map
204 return SimpleRetT(subMapIdx, subMapOffset, true);
208 template <typename KeyT, typename ValueT, typename HashFcn>
209 typename AtomicHashMap<KeyT, ValueT, HashFcn>::size_type
210 AtomicHashMap<KeyT, ValueT, HashFcn>::
211 erase(const KeyT k) {
212 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
213 FOR_EACH_RANGE(i, 0, numMaps) {
214 // Check each map successively. If one succeeds, we're done!
215 if (subMaps_[i].load(std::memory_order_relaxed)->erase(k)) {
219 // Didn't find our key...
223 // capacity -- summation of capacities of all submaps
224 template <typename KeyT, typename ValueT, typename HashFcn>
225 size_t AtomicHashMap<KeyT, ValueT, HashFcn>::
228 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
229 FOR_EACH_RANGE(i, 0, numMaps) {
230 totalCap += subMaps_[i].load(std::memory_order_relaxed)->capacity_;
236 // number of new insertions until current submaps are all at max load
237 template <typename KeyT, typename ValueT, typename HashFcn>
238 size_t AtomicHashMap<KeyT, ValueT, HashFcn>::
239 spaceRemaining() const {
241 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
242 FOR_EACH_RANGE(i, 0, numMaps) {
243 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
244 spaceRem += std::max(
246 thisMap->maxEntries_ - &thisMap->numEntries_.readFull()
252 // clear -- Wipes all keys and values from primary map and destroys
253 // all secondary maps. Not thread safe.
254 template <typename KeyT, typename ValueT, typename HashFcn>
255 void AtomicHashMap<KeyT, ValueT, HashFcn>::
257 subMaps_[0].load(std::memory_order_relaxed)->clear();
258 int const numMaps = numMapsAllocated_
259 .load(std::memory_order_relaxed);
260 FOR_EACH_RANGE(i, 1, numMaps) {
261 SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
263 SubMap::destroy(thisMap);
264 subMaps_[i].store(nullptr, std::memory_order_relaxed);
266 numMapsAllocated_.store(1, std::memory_order_relaxed);
270 template <typename KeyT, typename ValueT, typename HashFcn>
271 size_t AtomicHashMap<KeyT, ValueT, HashFcn>::
274 int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
275 FOR_EACH_RANGE(i, 0, numMaps) {
276 totalSize += subMaps_[i].load(std::memory_order_relaxed)->size();
281 // encodeIndex -- Encode the submap index and offset into return.
282 // index_ret must be pre-populated with the submap offset.
284 // We leave index_ret untouched when referring to the primary map
285 // so it can be as large as possible (31 data bits). Max size of
286 // secondary maps is limited by what can fit in the low 27 bits.
288 // Returns the following bit-encoded data in index_ret:
289 // if subMap == 0 (primary map) =>
292 // 0-30 submap offset (index_ret input)
294 // if subMap > 0 (secondary maps) =>
297 // 27-30 which subMap
298 // 0-26 subMap offset (index_ret input)
299 template <typename KeyT, typename ValueT, typename HashFcn>
300 inline uint32_t AtomicHashMap<KeyT, ValueT, HashFcn>::
301 encodeIndex(uint32_t subMap, uint32_t offset) {
302 DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big
303 if (subMap == 0) return offset;
304 // Make sure subMap isn't too big
305 DCHECK_EQ(subMap >> kNumSubMapBits_, 0);
306 // Make sure subMap bits of offset are clear
307 DCHECK_EQ(offset & (~kSubMapIndexMask_ | kSecondaryMapBit_), 0);
309 // Set high-order bits to encode which submap this index belongs to
310 return offset | (subMap << kSubMapIndexShift_) | kSecondaryMapBit_;
314 // Iterator implementation
316 template <typename KeyT, typename ValueT, typename HashFcn>
317 template<class ContT, class IterVal, class SubIt>
318 struct AtomicHashMap<KeyT, ValueT, HashFcn>::ahm_iterator
319 : boost::iterator_facade<ahm_iterator<ContT,IterVal,SubIt>,
321 boost::forward_traversal_tag>
323 explicit ahm_iterator() : ahm_(0) {}
325 // Conversion ctor for interoperability between const_iterator and
326 // iterator. The enable_if<> magic keeps us well-behaved for
327 // is_convertible<> (v. the iterator_facade documentation).
328 template<class OtherContT, class OtherVal, class OtherSubIt>
329 ahm_iterator(const ahm_iterator<OtherContT,OtherVal,OtherSubIt>& o,
330 typename std::enable_if<
331 std::is_convertible<OtherSubIt,SubIt>::value >::type* = 0)
338 * Returns the unique index that can be used for access directly
339 * into the data storage.
341 uint32_t getIndex() const {
343 return ahm_->encodeIndex(subMap_, subIt_.getIndex());
347 friend class AtomicHashMap;
348 explicit ahm_iterator(ContT* ahm,
355 checkAdvanceToNextSubmap();
358 friend class boost::iterator_core_access;
363 checkAdvanceToNextSubmap();
366 bool equal(const ahm_iterator& other) const {
367 if (ahm_ != other.ahm_) {
371 if (isEnd() || other.isEnd()) {
372 return isEnd() == other.isEnd();
375 return subMap_ == other.subMap_ &&
376 subIt_ == other.subIt_;
379 IterVal& dereference() const {
383 bool isEnd() const { return ahm_ == nullptr; }
385 void checkAdvanceToNextSubmap() {
390 SubMap* thisMap = ahm_->subMaps_[subMap_].
391 load(std::memory_order_relaxed);
392 if (subIt_ == thisMap->end()) {
393 // This sub iterator is done, advance to next one
395 ahm_->numMapsAllocated_.load(std::memory_order_acquire)) {
397 thisMap = ahm_->subMaps_[subMap_].load(std::memory_order_relaxed);
398 subIt_ = thisMap->begin();
413 #undef FOLLY_SPIN_WAIT