2 * Copyright 2015 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef FOLLY_ATOMICHASHARRAY_H_
18 #error "This should only be included by AtomicHashArray.h"
21 #include <folly/Bits.h>
22 #include <folly/detail/AtomicHashUtils.h>
26 // AtomicHashArray private constructor --
27 template <class KeyT, class ValueT,
28 class HashFcn, class EqualFcn, class Allocator>
29 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
30 AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
31 KeyT erasedKey, double _maxLoadFactor, size_t cacheSize)
32 : capacity_(capacity),
33 maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)),
34 kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
35 kAnchorMask_(nextPowTwo(capacity_) - 1), numEntries_(0, cacheSize),
36 numPendingEntries_(0, cacheSize), isFull_(0), numErases_(0) {
42 * Sets ret.second to value found and ret.index to index
43 * of key and returns true, or if key does not exist returns false and
44 * ret.index is set to capacity_.
46 template <class KeyT, class ValueT,
47 class HashFcn, class EqualFcn, class Allocator>
48 typename AtomicHashArray<KeyT, ValueT,
49 HashFcn, EqualFcn, Allocator>::SimpleRetT
50 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
51 findInternal(const KeyT key_in) {
52 DCHECK_NE(key_in, kEmptyKey_);
53 DCHECK_NE(key_in, kLockedKey_);
54 DCHECK_NE(key_in, kErasedKey_);
55 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
57 idx = probeNext(idx, numProbes)) {
58 const KeyT key = acquireLoadKey(cells_[idx]);
59 if (LIKELY(EqualFcn()(key, key_in))) {
60 return SimpleRetT(idx, true);
62 if (UNLIKELY(key == kEmptyKey_)) {
63 // if we hit an empty element, this key does not exist
64 return SimpleRetT(capacity_, false);
67 if (UNLIKELY(numProbes >= capacity_)) {
68 // probed every cell...fail
69 return SimpleRetT(capacity_, false);
77 * Returns false on failure due to key collision or full.
78 * Also sets ret.index to the index of the key. If the map is full, sets
79 * ret.index = capacity_. Also sets ret.second to cell value, thus if insert
80 * successful this will be what we just inserted, if there is a key collision
81 * this will be the previously inserted value, and if the map is full it is
84 template <class KeyT, class ValueT,
85 class HashFcn, class EqualFcn, class Allocator>
87 typename AtomicHashArray<KeyT, ValueT,
88 HashFcn, EqualFcn, Allocator>::SimpleRetT
89 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
90 insertInternal(KeyT key_in, T&& value) {
91 const short NO_NEW_INSERTS = 1;
92 const short NO_PENDING_INSERTS = 2;
93 CHECK_NE(key_in, kEmptyKey_);
94 CHECK_NE(key_in, kLockedKey_);
95 CHECK_NE(key_in, kErasedKey_);
97 size_t idx = keyToAnchorIdx(key_in);
100 DCHECK_LT(idx, capacity_);
101 value_type* cell = &cells_[idx];
102 if (relaxedLoadKey(*cell) == kEmptyKey_) {
103 // NOTE: isFull_ is set based on numEntries_.readFast(), so it's
104 // possible to insert more than maxEntries_ entries. However, it's not
105 // possible to insert past capacity_.
106 ++numPendingEntries_;
107 if (isFull_.load(std::memory_order_acquire)) {
108 --numPendingEntries_;
110 // Before deciding whether this insert succeeded, this thread needs to
111 // wait until no other thread can add a new entry.
113 // Correctness assumes isFull_ is true at this point. If
114 // another thread now does ++numPendingEntries_, we expect it
115 // to pass the isFull_.load() test above. (It shouldn't insert
118 isFull_.load(std::memory_order_acquire) != NO_PENDING_INSERTS
119 && numPendingEntries_.readFull() != 0
121 isFull_.store(NO_PENDING_INSERTS, std::memory_order_release);
123 if (relaxedLoadKey(*cell) == kEmptyKey_) {
124 // Don't insert past max load factor
125 return SimpleRetT(capacity_, false);
128 // An unallocated cell. Try once to lock it. If we succeed, insert here.
129 // If we fail, fall through to comparison below; maybe the insert that
130 // just beat us was for this very key....
131 if (tryLockCell(cell)) {
132 // Write the value - done before unlocking
134 DCHECK(relaxedLoadKey(*cell) == kLockedKey_);
136 * This happens using the copy constructor because we won't have
137 * constructed a lhs to use an assignment operator on when
138 * values are being set.
140 new (&cell->second) ValueT(std::forward<T>(value));
141 unlockCell(cell, key_in); // Sets the new key
143 // Transition back to empty key---requires handling
144 // locked->empty below.
145 unlockCell(cell, kEmptyKey_);
146 --numPendingEntries_;
149 // Direct comparison rather than EqualFcn ok here
150 // (we just inserted it)
151 DCHECK(relaxedLoadKey(*cell) == key_in);
152 --numPendingEntries_;
153 ++numEntries_; // This is a thread cached atomic increment :)
154 if (numEntries_.readFast() >= maxEntries_) {
155 isFull_.store(NO_NEW_INSERTS, std::memory_order_relaxed);
157 return SimpleRetT(idx, true);
159 --numPendingEntries_;
162 DCHECK(relaxedLoadKey(*cell) != kEmptyKey_);
163 if (kLockedKey_ == acquireLoadKey(*cell)) {
165 kLockedKey_ == acquireLoadKey(*cell)
169 const KeyT thisKey = acquireLoadKey(*cell);
170 if (EqualFcn()(thisKey, key_in)) {
171 // Found an existing entry for our key, but we don't overwrite the
173 return SimpleRetT(idx, false);
174 } else if (thisKey == kEmptyKey_ || thisKey == kLockedKey_) {
175 // We need to try again (i.e., don't increment numProbes or
176 // advance idx): this case can happen if the constructor for
177 // ValueT threw for this very cell (the rethrow block above).
182 if (UNLIKELY(numProbes >= capacity_)) {
183 // probed every cell...fail
184 return SimpleRetT(capacity_, false);
187 idx = probeNext(idx, numProbes);
195 * This will attempt to erase the given key key_in if the key is found. It
196 * returns 1 iff the key was located and marked as erased, and 0 otherwise.
198 * Memory is not freed or reclaimed by erase, i.e. the cell containing the
199 * erased key will never be reused. If there's an associated value, we won't
202 template <class KeyT, class ValueT,
203 class HashFcn, class EqualFcn, class Allocator>
204 size_t AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
206 CHECK_NE(key_in, kEmptyKey_);
207 CHECK_NE(key_in, kLockedKey_);
208 CHECK_NE(key_in, kErasedKey_);
209 for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0;
211 idx = probeNext(idx, numProbes)) {
212 DCHECK_LT(idx, capacity_);
213 value_type* cell = &cells_[idx];
214 KeyT currentKey = acquireLoadKey(*cell);
215 if (currentKey == kEmptyKey_ || currentKey == kLockedKey_) {
216 // If we hit an empty (or locked) element, this key does not exist. This
217 // is similar to how it's handled in find().
220 if (EqualFcn()(currentKey, key_in)) {
221 // Found an existing entry for our key, attempt to mark it erased.
222 // Some other thread may have erased our key, but this is ok.
223 KeyT expect = currentKey;
224 if (cellKeyPtr(*cell)->compare_exchange_strong(expect, kErasedKey_)) {
225 numErases_.fetch_add(1, std::memory_order_relaxed);
227 // Even if there's a value in the cell, we won't delete (or even
228 // default construct) it because some other thread may be accessing it.
229 // Locking it meanwhile won't work either since another thread may be
230 // holding a pointer to it.
232 // We found the key and successfully erased it.
235 // If another thread succeeds in erasing our key, we'll stop our search.
239 if (UNLIKELY(numProbes >= capacity_)) {
240 // probed every cell...fail
246 template <class KeyT, class ValueT,
247 class HashFcn, class EqualFcn, class Allocator>
248 const typename AtomicHashArray<KeyT, ValueT,
249 HashFcn, EqualFcn, Allocator>::Config
250 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::defaultConfig;
252 template <class KeyT, class ValueT,
253 class HashFcn, class EqualFcn, class Allocator>
254 typename AtomicHashArray<KeyT, ValueT,
255 HashFcn, EqualFcn, Allocator>::SmartPtr
256 AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
257 create(size_t maxSize, const Config& c) {
258 CHECK_LE(c.maxLoadFactor, 1.0);
259 CHECK_GT(c.maxLoadFactor, 0.0);
260 CHECK_NE(c.emptyKey, c.lockedKey);
261 size_t capacity = size_t(maxSize / c.maxLoadFactor);
262 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity;
264 auto const mem = Allocator().allocate(sz);
266 new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey,
267 c.maxLoadFactor, c.entryCountThreadCacheSize);
269 Allocator().deallocate(mem, sz);
273 SmartPtr map(static_cast<AtomicHashArray*>((void *)mem));
276 * Mark all cells as empty.
278 * Note: we're bending the rules a little here accessing the key
279 * element in our cells even though the cell object has not been
280 * constructed, and casting them to atomic objects (see cellKeyPtr).
281 * (Also, in fact we never actually invoke the value_type
282 * constructor.) This is in order to avoid needing to default
283 * construct a bunch of value_type when we first start up: if you
284 * have an expensive default constructor for the value type this can
285 * noticeably speed construction time for an AHA.
287 FOR_EACH_RANGE(i, 0, map->capacity_) {
288 cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
289 std::memory_order_relaxed);
294 template <class KeyT, class ValueT,
295 class HashFcn, class EqualFcn, class Allocator>
296 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
297 destroy(AtomicHashArray* p) {
300 size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_;
302 FOR_EACH_RANGE(i, 0, p->capacity_) {
303 if (p->cells_[i].first != p->kEmptyKey_) {
304 p->cells_[i].~value_type();
307 p->~AtomicHashArray();
309 Allocator().deallocate((char *)p, sz);
312 // clear -- clears all keys and values in the map and resets all counters
313 template <class KeyT, class ValueT,
314 class HashFcn, class EqualFcn, class Allocator>
315 void AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::
317 FOR_EACH_RANGE(i, 0, capacity_) {
318 if (cells_[i].first != kEmptyKey_) {
319 cells_[i].~value_type();
320 *const_cast<KeyT*>(&cells_[i].first) = kEmptyKey_;
322 CHECK(cells_[i].first == kEmptyKey_);
325 numPendingEntries_.set(0);
326 isFull_.store(0, std::memory_order_relaxed);
327 numErases_.store(0, std::memory_order_relaxed);
331 // Iterator implementation
333 template <class KeyT, class ValueT,
334 class HashFcn, class EqualFcn, class Allocator>
335 template <class ContT, class IterVal>
336 struct AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn, Allocator>::aha_iterator
337 : boost::iterator_facade<aha_iterator<ContT,IterVal>,
339 boost::forward_traversal_tag>
341 explicit aha_iterator() : aha_(0) {}
343 // Conversion ctor for interoperability between const_iterator and
344 // iterator. The enable_if<> magic keeps us well-behaved for
345 // is_convertible<> (v. the iterator_facade documentation).
346 template<class OtherContT, class OtherVal>
347 aha_iterator(const aha_iterator<OtherContT,OtherVal>& o,
348 typename std::enable_if<
349 std::is_convertible<OtherVal*,IterVal*>::value >::type* = 0)
354 explicit aha_iterator(ContT* array, size_t offset)
359 // Returns unique index that can be used with findAt().
360 // WARNING: The following function will fail silently for hashtable
361 // with capacity > 2^32
362 uint32_t getIndex() const { return offset_; }
364 void advancePastEmpty() {
365 while (offset_ < aha_->capacity_ && !isValid()) {
371 friend class AtomicHashArray;
372 friend class boost::iterator_core_access;
379 bool equal(const aha_iterator& o) const {
380 return aha_ == o.aha_ && offset_ == o.offset_;
383 IterVal& dereference() const {
384 return aha_->cells_[offset_];
387 bool isValid() const {
388 KeyT key = acquireLoadKey(aha_->cells_[offset_]);
389 return key != aha_->kEmptyKey_ &&
390 key != aha_->kLockedKey_ &&
391 key != aha_->kErasedKey_;
401 #undef FOLLY_SPIN_WAIT