2 * Copyright 2016 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #ifndef FOLLY_ATOMICUNORDEREDMAP_H
17 #define FOLLY_ATOMICUNORDEREDMAP_H
22 #include <system_error>
23 #include <type_traits>
27 #include <folly/Likely.h>
28 #include <folly/Bits.h>
29 #include <folly/Conv.h>
30 #include <folly/Random.h>
31 #include <folly/detail/AtomicUnorderedMapUtils.h>
32 #include <boost/type_traits/has_trivial_destructor.hpp>
37 /// You're probably reading this because you are looking for an
38 /// AtomicUnorderedMap<K,V> that is fully general, highly concurrent (for
39 /// reads, writes, and iteration), and makes no performance compromises.
40 /// We haven't figured that one out yet. What you will find here is a
41 /// hash table implementation that sacrifices generality so that it can
42 /// give you all of the other things.
46 /// * Insert only (*) - the only write operation supported directly by
47 /// AtomicUnorderedInsertMap is findOrConstruct. There is a (*) because
48 /// values aren't moved, so you can roll your own concurrency control for
49 /// in-place updates of values (see MutableData and MutableAtom below),
50 /// but the hash table itself doesn't help you.
52 /// * No resizing - you must specify the capacity up front, and once
53 /// the hash map gets full you won't be able to insert. Insert
54 /// performance will degrade once the load factor is high. Insert is
55 /// O(1/(1-actual_load_factor)). Note that this is a pretty strong
56 /// limitation, because you can't remove existing keys.
58 /// * 2^30 maximum default capacity - by default AtomicUnorderedInsertMap
59 /// uses uint32_t internal indexes (and steals 2 bits), limiting you
60 /// to about a billion entries. If you need more you can fill in all
61 /// of the template params so you change IndexType to uint64_t, or you
62 /// can use AtomicUnorderedInsertMap64. 64-bit indexes will increase
63 /// the space over of the map, of course.
65 /// WHAT YOU GET IN EXCHANGE:
67 /// * Arbitrary key and value types - any K and V that can be used in a
68 /// std::unordered_map can be used here. In fact, the key and value
69 /// types don't even have to be copyable or moveable!
71 /// * Keys and values in the map won't be moved - it is safe to keep
72 /// pointers or references to the keys and values in the map, because
73 /// they are never moved or destroyed (until the map itself is destroyed).
75 /// * Iterators are never invalidated - writes don't invalidate iterators,
76 /// so you can scan and insert in parallel.
78 /// * Fast wait-free reads - reads are usually only a single cache miss,
79 /// even when the hash table is very large. Wait-freedom means that
80 /// you won't see latency outliers even in the face of concurrent writes.
82 /// * Lock-free insert - writes proceed in parallel. If a thread in the
83 /// middle of a write is unlucky and gets suspended, it doesn't block
86 /// COMMENTS ON INSERT-ONLY
88 /// This map provides wait-free linearizable reads and lock-free
89 /// linearizable inserts. Inserted values won't be moved, but no
90 /// concurrency control is provided for safely updating them. To remind
91 /// you of that fact they are only provided in const form. This is the
92 /// only simple safe thing to do while preserving something like the normal
93 /// std::map iteration form, which requires that iteration be exposed
94 /// via std::pair (and prevents encapsulation of access to the value).
96 /// There are a couple of reasonable policies for doing in-place
97 /// concurrency control on the values. I am hoping that the policy can
98 /// be injected via the value type or an extra template param, to keep
99 /// the core AtomicUnorderedInsertMap insert-only:
101 /// CONST: this is the currently implemented strategy, which is simple,
102 /// performant, and not that expressive. You can always put in a value
103 /// with a mutable field (see MutableAtom below), but that doesn't look
104 /// as pretty as it should.
106 /// ATOMIC: for integers and integer-size trivially copyable structs
107 /// (via an adapter like tao/queues/AtomicStruct) the value can be a
108 /// std::atomic and read and written atomically.
110 /// SEQ-LOCK: attach a counter incremented before and after write.
111 /// Writers serialize by using CAS to make an even->odd transition,
112 /// then odd->even after the write. Readers grab the value with memcpy,
113 /// checking sequence value before and after. Readers retry until they
114 /// see an even sequence number that doesn't change. This works for
115 /// larger structs, but still requires memcpy to be equivalent to copy
116 /// assignment, and it is no longer lock-free. It scales very well,
117 /// because the readers are still invisible (no cache line writes).
119 /// LOCK: folly's SharedMutex would be a good choice here.
121 /// MEMORY ALLOCATION
123 /// Underlying memory is allocated as a big anonymous mmap chunk, which
124 /// might be cheaper than calloc() and is certainly not more expensive
125 /// for large maps. If the SkipKeyValueDeletion template param is true
126 /// then deletion of the map consists of unmapping the backing memory,
127 /// which is much faster than destructing all of the keys and values.
128 /// Feel free to override if std::is_trivial_destructor isn't recognizing
129 /// the triviality of your destructors.
130 template <typename Key,
132 typename Hash = std::hash<Key>,
133 typename KeyEqual = std::equal_to<Key>,
134 bool SkipKeyValueDeletion =
135 (boost::has_trivial_destructor<Key>::value &&
136 boost::has_trivial_destructor<Value>::value),
137 template<typename> class Atom = std::atomic,
138 typename IndexType = uint32_t,
139 typename Allocator = folly::detail::MMapAlloc>
141 struct AtomicUnorderedInsertMap {
143 typedef Key key_type;
144 typedef Value mapped_type;
145 typedef std::pair<Key,Value> value_type;
146 typedef std::size_t size_type;
147 typedef std::ptrdiff_t difference_type;
149 typedef KeyEqual key_equal;
150 typedef const value_type& const_reference;
152 typedef struct ConstIterator {
153 ConstIterator(const AtomicUnorderedInsertMap& owner, IndexType slot)
158 ConstIterator(const ConstIterator&) = default;
159 ConstIterator& operator= (const ConstIterator&) = default;
161 const value_type& operator* () const {
162 return owner_.slots_[slot_].keyValue();
165 const value_type* operator-> () const {
166 return &owner_.slots_[slot_].keyValue();
170 const ConstIterator& operator++ () {
173 if (owner_.slots_[slot_].state() == LINKED) {
181 ConstIterator operator++(int /* dummy */) {
187 bool operator== (const ConstIterator& rhs) const {
188 return slot_ == rhs.slot_;
190 bool operator!= (const ConstIterator& rhs) const {
191 return !(*this == rhs);
195 const AtomicUnorderedInsertMap& owner_;
199 friend ConstIterator;
201 /// Constructs a map that will support the insertion of maxSize key-value
202 /// pairs without exceeding the max load factor. Load factors of greater
203 /// than 1 are not supported, and once the actual load factor of the
204 /// map approaches 1 the insert performance will suffer. The capacity
205 /// is limited to 2^30 (about a billion) for the default IndexType,
206 /// beyond which we will throw invalid_argument.
207 explicit AtomicUnorderedInsertMap(
209 float maxLoadFactor = 0.8f,
210 const Allocator& alloc = Allocator())
213 size_t capacity = maxSize / std::min(1.0f, maxLoadFactor) + 128;
214 size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2);
215 if (capacity > avail && maxSize < avail) {
219 if (capacity < maxSize || capacity > avail) {
220 throw std::invalid_argument(
221 "AtomicUnorderedInsertMap capacity must fit in IndexType with 2 bits "
225 numSlots_ = capacity;
226 slotMask_ = folly::nextPowTwo(capacity * 4) - 1;
227 mmapRequested_ = sizeof(Slot) * capacity;
228 slots_ = reinterpret_cast<Slot*>(allocator_.allocate(mmapRequested_));
230 // mark the zero-th slot as in-use but not valid, since that happens
231 // to be our nil value
232 slots_[0].stateUpdate(EMPTY, CONSTRUCTING);
235 ~AtomicUnorderedInsertMap() {
236 if (!SkipKeyValueDeletion) {
237 for (size_t i = 1; i < numSlots_; ++i) {
241 allocator_.deallocate(reinterpret_cast<char*>(slots_), mmapRequested_);
244 /// Searches for the key, returning (iter,false) if it is found.
245 /// If it is not found calls the functor Func with a void* argument
246 /// that is raw storage suitable for placement construction of a Value
247 /// (see raw_value_type), then returns (iter,true). May call Func and
248 /// then return (iter,false) if there are other concurrent writes, in
249 /// which case the newly constructed value will be immediately destroyed.
251 /// This function does not block other readers or writers. If there
252 /// are other concurrent writes, many parallel calls to func may happen
253 /// and only the first one to complete will win. The values constructed
254 /// by the other calls to func will be destroyed.
258 /// AtomicUnorderedInsertMap<std::string,std::string> memo;
260 /// auto value = memo.findOrConstruct(key, [=](void* raw) {
261 /// new (raw) std::string(computation(key));
263 template<typename Func>
264 std::pair<const_iterator,bool> findOrConstruct(const Key& key, Func&& func) {
265 auto const slot = keyToSlotIdx(key);
266 auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
268 auto existing = find(key, slot);
270 return std::make_pair(ConstIterator(*this, existing), false);
273 auto idx = allocateNear(slot);
274 new (&slots_[idx].keyValue().first) Key(key);
275 func(static_cast<void*>(&slots_[idx].keyValue().second));
278 slots_[idx].next_ = prev >> 2;
280 // we can merge the head update and the CONSTRUCTING -> LINKED update
281 // into a single CAS if slot == idx (which should happen often)
282 auto after = idx << 2;
289 if (slots_[slot].headAndState_.compare_exchange_strong(prev, after)) {
292 slots_[idx].stateUpdate(CONSTRUCTING, LINKED);
294 return std::make_pair(ConstIterator(*this, idx), true);
296 // compare_exchange_strong updates its first arg on failure, so
297 // there is no need to reread prev
299 existing = find(key, slot);
301 // our allocated key and value are no longer needed
302 slots_[idx].keyValue().first.~Key();
303 slots_[idx].keyValue().second.~Value();
304 slots_[idx].stateUpdate(CONSTRUCTING, EMPTY);
306 return std::make_pair(ConstIterator(*this, existing), false);
311 /// This isn't really emplace, but it is what we need to test.
312 /// Eventually we can duplicate all of the std::pair constructor
313 /// forms, including a recursive tuple forwarding template
314 /// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/).
315 template<class K, class V>
316 std::pair<const_iterator,bool> emplace(const K& key, V&& value) {
317 return findOrConstruct(key, [&](void* raw) {
318 new (raw) Value(std::forward<V>(value));
322 const_iterator find(const Key& key) const {
323 return ConstIterator(*this, find(key, keyToSlotIdx(key)));
326 const_iterator cbegin() const {
327 IndexType slot = numSlots_ - 1;
328 while (slot > 0 && slots_[slot].state() != LINKED) {
331 return ConstIterator(*this, slot);
334 const_iterator cend() const {
335 return ConstIterator(*this, 0);
341 kMaxAllocationTries = 1000, // after this we throw
344 enum BucketState : IndexType {
350 /// Lock-free insertion is easiest by prepending to collision chains.
351 /// A large chaining hash table takes two cache misses instead of
352 /// one, however. Our solution is to colocate the bucket storage and
353 /// the head storage, so that even though we are traversing chains we
354 /// are likely to stay within the same cache line. Just make sure to
355 /// traverse head before looking at any keys. This strategy gives us
356 /// 32 bit pointers and fast iteration.
358 /// The bottom two bits are the BucketState, the rest is the index
359 /// of the first bucket for the chain whose keys map to this slot.
360 /// When things are going well the head usually links to this slot,
361 /// but that doesn't always have to happen.
362 Atom<IndexType> headAndState_;
364 /// The next bucket in the chain
368 typename std::aligned_storage<sizeof(value_type),
369 alignof(value_type)>::type raw_;
374 assert(s == EMPTY || s == LINKED);
376 keyValue().first.~Key();
377 keyValue().second.~Value();
381 BucketState state() const {
382 return BucketState(headAndState_.load(std::memory_order_acquire) & 3);
385 void stateUpdate(BucketState before, BucketState after) {
386 assert(state() == before);
387 headAndState_ += (after - before);
390 value_type& keyValue() {
391 assert(state() != EMPTY);
392 return *static_cast<value_type*>(static_cast<void*>(&raw_));
395 const value_type& keyValue() const {
396 assert(state() != EMPTY);
397 return *static_cast<const value_type*>(static_cast<const void*>(&raw_));
402 // We manually manage the slot memory so we can bypass initialization
403 // (by getting a zero-filled mmap chunk) and optionally destruction of
406 size_t mmapRequested_;
409 /// tricky, see keyToSlodIdx
412 Allocator allocator_;
415 IndexType keyToSlotIdx(const Key& key) const {
416 size_t h = hasher()(key);
418 while (h >= numSlots_) {
424 IndexType find(const Key& key, IndexType slot) const {
426 auto hs = slots_[slot].headAndState_.load(std::memory_order_acquire);
427 for (slot = hs >> 2; slot != 0; slot = slots_[slot].next_) {
428 if (ke(key, slots_[slot].keyValue().first)) {
435 /// Allocates a slot and returns its index. Tries to put it near
437 IndexType allocateNear(IndexType start) {
438 for (auto tries = 0; tries < kMaxAllocationTries; ++tries) {
439 auto slot = allocationAttempt(start, tries);
440 auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
441 if ((prev & 3) == EMPTY &&
442 slots_[slot].headAndState_.compare_exchange_strong(
443 prev, prev + CONSTRUCTING - EMPTY)) {
447 throw std::bad_alloc();
450 /// Returns the slot we should attempt to allocate after tries failed
451 /// tries, starting from the specified slot. This is pulled out so we
452 /// can specialize it differently during deterministic testing
453 IndexType allocationAttempt(IndexType start, IndexType tries) const {
454 if (LIKELY(tries < 8 && start + tries < numSlots_)) {
455 return start + tries;
458 if (sizeof(IndexType) <= 4) {
459 rv = folly::Random::rand32(numSlots_);
461 rv = folly::Random::rand64(numSlots_);
463 assert(rv < numSlots_);
468 void zeroFillSlots() {
469 using folly::detail::GivesZeroFilledMemory;
470 if (!GivesZeroFilledMemory<Allocator>::value) {
471 memset(slots_, 0, mmapRequested_);
476 /// AtomicUnorderedInsertMap64 is just a type alias that makes it easier
477 /// to select a 64 bit slot index type. Use this if you need a capacity
478 /// bigger than 2^30 (about a billion). This increases memory overheads,
480 template <typename Key,
482 typename Hash = std::hash<Key>,
483 typename KeyEqual = std::equal_to<Key>,
484 bool SkipKeyValueDeletion =
485 (boost::has_trivial_destructor<Key>::value &&
486 boost::has_trivial_destructor<Value>::value),
487 template <typename> class Atom = std::atomic,
488 typename Allocator = folly::detail::MMapAlloc>
489 using AtomicUnorderedInsertMap64 =
490 AtomicUnorderedInsertMap<Key,
494 SkipKeyValueDeletion,
499 /// MutableAtom is a tiny wrapper than gives you the option of atomically
500 /// updating values inserted into an AtomicUnorderedInsertMap<K,
501 /// MutableAtom<V>>. This relies on AtomicUnorderedInsertMap's guarantee
502 /// that it doesn't move values.
503 template <typename T,
504 template<typename> class Atom = std::atomic>
506 mutable Atom<T> data;
508 explicit MutableAtom(const T& init) : data(init) {}
511 /// MutableData is a tiny wrapper than gives you the option of using an
512 /// external concurrency control mechanism to updating values inserted
513 /// into an AtomicUnorderedInsertMap.
514 template <typename T>
517 explicit MutableData(const T& init) : data(init) {}