2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHARRAY_H_
18 #error "This should only be included by AtomicHashArray.h"
21 #include <folly/Bits.h>
22 #include <folly/detail/AtomicHashUtils.h>
26 // AtomicHashArray private constructor --
27 template <class KeyT, class ValueT,
28 class HashFcn, class EqualFcn, class Allocator>
29 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
30 AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
31 KeyT erasedKey, double _maxLoadFactor, size_t cacheSize)
32 : capacity_(capacity),
33 maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)),
34 kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
35 kAnchorMask_(nextPowTwo(capacity_) - 1), numEntries_(0, cacheSize),
36 numPendingEntries_(0, cacheSize), isFull_(0), numErases_(0) {
42 * Sets ret.second to value found and ret.index to index
43 * of key and returns true, or if key does not exist returns false and
44 * ret.index is set to capacity_.
46 template <class KeyT, class ValueT,
47 class HashFcn, class EqualFcn, class Allocator>
48 typename AtomicHashArray<KeyT, ValueT,
49 HashFcn, EqualFcn, Allocator>::SimpleRetT
50 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
51 findInternal(const KeyT key_in) {
52 DCHECK_NE(key_in, kEmptyKey_);
53 DCHECK_NE(key_in, kLockedKey_);
54 DCHECK_NE(key_in, kErasedKey_);
55 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
57 idx = probeNext(idx, numProbes)) {
58 const KeyT key = acquireLoadKey(cells_[idx]);
59 if (LIKELY(EqualFcn()(key, key_in))) {
60 return SimpleRetT(idx, true);
62 if (UNLIKELY(key == kEmptyKey_)) {
63 // if we hit an empty element, this key does not exist
64 return SimpleRetT(capacity_, false);
67 if (UNLIKELY(numProbes >= capacity_)) {
68 // probed every cell...fail
69 return SimpleRetT(capacity_, false);
77 * Returns false on failure due to key collision or full.
78 * Also sets ret.index to the index of the key. If the map is full, sets
79 * ret.index = capacity_. Also sets ret.second to cell value, thus if insert
80 * successful this will be what we just inserted, if there is a key collision
81 * this will be the previously inserted value, and if the map is full it is
84 template <class KeyT, class ValueT,
85 class HashFcn, class EqualFcn, class Allocator>
86 template <typename... ArgTs>
87 typename AtomicHashArray<KeyT, ValueT,
88 HashFcn, EqualFcn, Allocator>::SimpleRetT
89 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
90 insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) {
91 const short NO_NEW_INSERTS = 1;
92 const short NO_PENDING_INSERTS = 2;
93 CHECK_NE(key_in, kEmptyKey_);
94 CHECK_NE(key_in, kLockedKey_);
95 CHECK_NE(key_in, kErasedKey_);
97 size_t idx = keyToAnchorIdx(key_in);
100 DCHECK_LT(idx, capacity_);
101 value_type* cell = &cells_[idx];
102 if (relaxedLoadKey(*cell) == kEmptyKey_) {
103 // NOTE: isFull_ is set based on numEntries_.readFast(), so it's
104 // possible to insert more than maxEntries_ entries. However, it's not
105 // possible to insert past capacity_.
106 ++numPendingEntries_;
107 if (isFull_.load(std::memory_order_acquire)) {
108 --numPendingEntries_;
110 // Before deciding whether this insert succeeded, this thread needs to
111 // wait until no other thread can add a new entry.
113 // Correctness assumes isFull_ is true at this point. If
114 // another thread now does ++numPendingEntries_, we expect it
115 // to pass the isFull_.load() test above. (It shouldn't insert
117 detail::atomic_hash_spin_wait([&] {
119 (isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS) &&
120 (numPendingEntries_.readFull() != 0);
122 isFull_.store(NO_PENDING_INSERTS, std::memory_order_release);
124 if (relaxedLoadKey(*cell) == kEmptyKey_) {
125 // Don't insert past max load factor
126 return SimpleRetT(capacity_, false);
129 // An unallocated cell. Try once to lock it. If we succeed, insert here.
130 // If we fail, fall through to comparison below; maybe the insert that
131 // just beat us was for this very key....
132 if (tryLockCell(cell)) {
133 // Write the value - done before unlocking
135 DCHECK(relaxedLoadKey(*cell) == kLockedKey_);
136 new (&cell->second) ValueT(std::forward<ArgTs>(vCtorArgs)...);
137 unlockCell(cell, key_in); // Sets the new key
139 // Transition back to empty key---requires handling
140 // locked->empty below.
141 unlockCell(cell, kEmptyKey_);
142 --numPendingEntries_;
145 // Direct comparison rather than EqualFcn ok here
146 // (we just inserted it)
147 DCHECK(relaxedLoadKey(*cell) == key_in);
148 --numPendingEntries_;
149 ++numEntries_; // This is a thread cached atomic increment :)
150 if (numEntries_.readFast() >= maxEntries_) {
151 isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed);
153 return SimpleRetT(idx, true);
155 --numPendingEntries_;
158 DCHECK(relaxedLoadKey(*cell) != kEmptyKey_);
159 if (kLockedKey_ == acquireLoadKey(*cell)) {
160 detail::atomic_hash_spin_wait([&] {
161 return kLockedKey_ == acquireLoadKey(*cell);
165 const KeyT thisKey = acquireLoadKey(*cell);
166 if (EqualFcn()(thisKey, key_in)) {
167 // Found an existing entry for our key, but we don't overwrite the
169 return SimpleRetT(idx, false);
170 } else if (thisKey == kEmptyKey_ || thisKey == kLockedKey_) {
171 // We need to try again (i.e., don't increment numProbes or
172 // advance idx): this case can happen if the constructor for
173 // ValueT threw for this very cell (the rethrow block above).
178 if (UNLIKELY(numProbes >= capacity_)) {
179 // probed every cell...fail
180 return SimpleRetT(capacity_, false);
183 idx = probeNext(idx, numProbes);
191 * This will attempt to erase the given key key_in if the key is found. It
192 * returns 1 iff the key was located and marked as erased, and 0 otherwise.
194 * Memory is not freed or reclaimed by erase, i.e. the cell containing the
195 * erased key will never be reused. If there's an associated value, we won't
198 template <class KeyT, class ValueT,
199 class HashFcn, class EqualFcn, class Allocator>
200 size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
202 CHECK_NE(key_in, kEmptyKey_);
203 CHECK_NE(key_in, kLockedKey_);
204 CHECK_NE(key_in, kErasedKey_);
205 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
207 idx = probeNext(idx, numProbes)) {
208 DCHECK_LT(idx, capacity_);
209 value_type* cell = &cells_[idx];
210 KeyT currentKey = acquireLoadKey(*cell);
211 if (currentKey == kEmptyKey_ || currentKey == kLockedKey_) {
212 // If we hit an empty (or locked) element, this key does not exist. This
213 // is similar to how it's handled in find().
216 if (EqualFcn()(currentKey, key_in)) {
217 // Found an existing entry for our key, attempt to mark it erased.
218 // Some other thread may have erased our key, but this is ok.
219 KeyT expect = currentKey;
220 if (cellKeyPtr(*cell)->compare_exchange_strong(expect, kErasedKey_)) {
221 numErases_.fetch_add(1, std::memory_order_relaxed);
223 // Even if there's a value in the cell, we won't delete (or even
224 // default construct) it because some other thread may be accessing it.
225 // Locking it meanwhile won't work either since another thread may be
226 // holding a pointer to it.
228 // We found the key and successfully erased it.
231 // If another thread succeeds in erasing our key, we'll stop our search.
235 if (UNLIKELY(numProbes >= capacity_)) {
236 // probed every cell...fail
242 template <class KeyT, class ValueT,
243 class HashFcn, class EqualFcn, class Allocator>
244 typename AtomicHashArray<KeyT, ValueT,
245 HashFcn, EqualFcn, Allocator>::SmartPtr
246 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
247 create(size_t maxSize, const Config& c) {
248 CHECK_LE(c.maxLoadFactor, 1.0);
249 CHECK_GT(c.maxLoadFactor, 0.0);
250 CHECK_NE(c.emptyKey, c.lockedKey);
251 size_t capacity = size_t(maxSize / c.maxLoadFactor);
252 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity;
254 auto const mem = Allocator().allocate(sz);
256 new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
257 c.maxLoadFactor, c.entryCountThreadCacheSize);
259 Allocator().deallocate(mem, sz);
263 SmartPtr map(static_cast<AtomicHashArray*>((void *)mem));
266 * Mark all cells as empty.
268 * Note: we're bending the rules a little here accessing the key
269 * element in our cells even though the cell object has not been
270 * constructed, and casting them to atomic objects (see cellKeyPtr).
271 * (Also, in fact we never actually invoke the value_type
272 * constructor.) This is in order to avoid needing to default
273 * construct a bunch of value_type when we first start up: if you
274 * have an expensive default constructor for the value type this can
275 * noticeably speed construction time for an AHA.
277 FOR_EACH_RANGE(i, 0, map->capacity_) {
278 cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
279 std::memory_order_relaxed);
284 template <class KeyT, class ValueT,
285 class HashFcn, class EqualFcn, class Allocator>
286 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
287 destroy(AtomicHashArray* p) {
290 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
292 FOR_EACH_RANGE(i, 0, p->capacity_) {
293 if (p->cells_[i].first != p->kEmptyKey_) {
294 p->cells_[i].~value_type();
297 p->~AtomicHashArray();
299 Allocator().deallocate((char *)p, sz);
302 // clear -- clears all keys and values in the map and resets all counters
303 template <class KeyT, class ValueT,
304 class HashFcn, class EqualFcn, class Allocator>
305 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
307 FOR_EACH_RANGE(i, 0, capacity_) {
308 if (cells_[i].first != kEmptyKey_) {
309 cells_[i].~value_type();
310 *const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
312 CHECK(cells_[i].first == kEmptyKey_);
315 numPendingEntries_.set(0);
316 isFull_.store(0, std::memory_order_relaxed);
317 numErases_.store(0, std::memory_order_relaxed);
321 // Iterator implementation
323 template <class KeyT, class ValueT,
324 class HashFcn, class EqualFcn, class Allocator>
325 template <class ContT, class IterVal>
326 struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
327 : boost::iterator_facade<aha_iterator<ContT,IterVal>,
329 boost::forward_traversal_tag>
331 explicit aha_iterator() : aha_(0) {}
333 // Conversion ctor for interoperability between const_iterator and
334 // iterator. The enable_if<> magic keeps us well-behaved for
335 // is_convertible<> (v. the iterator_facade documentation).
336 template<class OtherContT, class OtherVal>
337 aha_iterator(const aha_iterator<OtherContT,OtherVal>& o,
338 typename std::enable_if<
339 std::is_convertible<OtherVal*,IterVal*>::value >::type* = 0)
344 explicit aha_iterator(ContT* array, size_t offset)
349 // Returns unique index that can be used with findAt().
350 // WARNING: The following function will fail silently for hashtable
351 // with capacity > 2^32
352 uint32_t getIndex() const { return offset_; }
354 void advancePastEmpty() {
355 while (offset_ < aha_->capacity_ && !isValid()) {
361 friend class AtomicHashArray;
362 friend class boost::iterator_core_access;
369 bool equal(const aha_iterator& o) const {
370 return aha_ == o.aha_ && offset_ == o.offset_;
373 IterVal& dereference() const {
374 return aha_->cells_[offset_];
377 bool isValid() const {
378 KeyT key = acquireLoadKey(aha_->cells_[offset_]);
379 return key != aha_->kEmptyKey_ &&
380 key != aha_->kLockedKey_ &&
381 key != aha_->kErasedKey_;