2 * Copyright 2013-present Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include <system_error>
25 #include <type_traits>
27 #include <boost/type_traits/has_trivial_destructor.hpp>
29 #include <folly/Conv.h>
30 #include <folly/Likely.h>
31 #include <folly/Random.h>
32 #include <folly/detail/AtomicUnorderedMapUtils.h>
33 #include <folly/lang/Bits.h>
34 #include <folly/portability/SysMman.h>
35 #include <folly/portability/Unistd.h>
39 /// You're probably reading this because you are looking for an
40 /// AtomicUnorderedMap<K,V> that is fully general, highly concurrent (for
41 /// reads, writes, and iteration), and makes no performance compromises.
42 /// We haven't figured that one out yet. What you will find here is a
43 /// hash table implementation that sacrifices generality so that it can
44 /// give you all of the other things.
48 /// * Insert only (*) - the only write operation supported directly by
49 /// AtomicUnorderedInsertMap is findOrConstruct. There is a (*) because
50 /// values aren't moved, so you can roll your own concurrency control for
51 /// in-place updates of values (see MutableData and MutableAtom below),
52 /// but the hash table itself doesn't help you.
54 /// * No resizing - you must specify the capacity up front, and once
55 /// the hash map gets full you won't be able to insert. Insert
56 /// performance will degrade once the load factor is high. Insert is
57 /// O(1/(1-actual_load_factor)). Note that this is a pretty strong
58 /// limitation, because you can't remove existing keys.
60 /// * 2^30 maximum default capacity - by default AtomicUnorderedInsertMap
61 /// uses uint32_t internal indexes (and steals 2 bits), limiting you
62 /// to about a billion entries. If you need more you can fill in all
63 /// of the template params so you change IndexType to uint64_t, or you
64 /// can use AtomicUnorderedInsertMap64. 64-bit indexes will increase
65 /// the space over of the map, of course.
67 /// WHAT YOU GET IN EXCHANGE:
69 /// * Arbitrary key and value types - any K and V that can be used in a
70 /// std::unordered_map can be used here. In fact, the key and value
71 /// types don't even have to be copyable or moveable!
73 /// * Keys and values in the map won't be moved - it is safe to keep
74 /// pointers or references to the keys and values in the map, because
75 /// they are never moved or destroyed (until the map itself is destroyed).
77 /// * Iterators are never invalidated - writes don't invalidate iterators,
78 /// so you can scan and insert in parallel.
80 /// * Fast wait-free reads - reads are usually only a single cache miss,
81 /// even when the hash table is very large. Wait-freedom means that
82 /// you won't see latency outliers even in the face of concurrent writes.
84 /// * Lock-free insert - writes proceed in parallel. If a thread in the
85 /// middle of a write is unlucky and gets suspended, it doesn't block
88 /// COMMENTS ON INSERT-ONLY
90 /// This map provides wait-free linearizable reads and lock-free
91 /// linearizable inserts. Inserted values won't be moved, but no
92 /// concurrency control is provided for safely updating them. To remind
93 /// you of that fact they are only provided in const form. This is the
94 /// only simple safe thing to do while preserving something like the normal
95 /// std::map iteration form, which requires that iteration be exposed
96 /// via std::pair (and prevents encapsulation of access to the value).
98 /// There are a couple of reasonable policies for doing in-place
99 /// concurrency control on the values. I am hoping that the policy can
100 /// be injected via the value type or an extra template param, to keep
101 /// the core AtomicUnorderedInsertMap insert-only:
103 /// CONST: this is the currently implemented strategy, which is simple,
104 /// performant, and not that expressive. You can always put in a value
105 /// with a mutable field (see MutableAtom below), but that doesn't look
106 /// as pretty as it should.
108 /// ATOMIC: for integers and integer-size trivially copyable structs
109 /// (via an adapter like tao/queues/AtomicStruct) the value can be a
110 /// std::atomic and read and written atomically.
112 /// SEQ-LOCK: attach a counter incremented before and after write.
113 /// Writers serialize by using CAS to make an even->odd transition,
114 /// then odd->even after the write. Readers grab the value with memcpy,
115 /// checking sequence value before and after. Readers retry until they
116 /// see an even sequence number that doesn't change. This works for
117 /// larger structs, but still requires memcpy to be equivalent to copy
118 /// assignment, and it is no longer lock-free. It scales very well,
119 /// because the readers are still invisible (no cache line writes).
121 /// LOCK: folly's SharedMutex would be a good choice here.
123 /// MEMORY ALLOCATION
125 /// Underlying memory is allocated as a big anonymous mmap chunk, which
126 /// might be cheaper than calloc() and is certainly not more expensive
127 /// for large maps. If the SkipKeyValueDeletion template param is true
128 /// then deletion of the map consists of unmapping the backing memory,
129 /// which is much faster than destructing all of the keys and values.
130 /// Feel free to override if std::is_trivial_destructor isn't recognizing
131 /// the triviality of your destructors.
135 typename Hash = std::hash<Key>,
136 typename KeyEqual = std::equal_to<Key>,
137 bool SkipKeyValueDeletion =
138 (boost::has_trivial_destructor<Key>::value &&
139 boost::has_trivial_destructor<Value>::value),
140 template <typename> class Atom = std::atomic,
141 typename IndexType = uint32_t,
142 typename Allocator = folly::detail::MMapAlloc>
144 struct AtomicUnorderedInsertMap {
146 typedef Key key_type;
147 typedef Value mapped_type;
148 typedef std::pair<Key,Value> value_type;
149 typedef std::size_t size_type;
150 typedef std::ptrdiff_t difference_type;
152 typedef KeyEqual key_equal;
153 typedef const value_type& const_reference;
155 typedef struct ConstIterator {
156 ConstIterator(const AtomicUnorderedInsertMap& owner, IndexType slot)
161 ConstIterator(const ConstIterator&) = default;
162 ConstIterator& operator= (const ConstIterator&) = default;
164 const value_type& operator* () const {
165 return owner_.slots_[slot_].keyValue();
168 const value_type* operator-> () const {
169 return &owner_.slots_[slot_].keyValue();
173 const ConstIterator& operator++ () {
176 if (owner_.slots_[slot_].state() == LINKED) {
184 ConstIterator operator++(int /* dummy */) {
190 bool operator== (const ConstIterator& rhs) const {
191 return slot_ == rhs.slot_;
193 bool operator!= (const ConstIterator& rhs) const {
194 return !(*this == rhs);
198 const AtomicUnorderedInsertMap& owner_;
202 friend ConstIterator;
204 /// Constructs a map that will support the insertion of maxSize key-value
205 /// pairs without exceeding the max load factor. Load factors of greater
206 /// than 1 are not supported, and once the actual load factor of the
207 /// map approaches 1 the insert performance will suffer. The capacity
208 /// is limited to 2^30 (about a billion) for the default IndexType,
209 /// beyond which we will throw invalid_argument.
210 explicit AtomicUnorderedInsertMap(
212 float maxLoadFactor = 0.8f,
213 const Allocator& alloc = Allocator())
216 size_t capacity = size_t(maxSize / std::min(1.0f, maxLoadFactor) + 128);
217 size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2);
218 if (capacity > avail && maxSize < avail) {
222 if (capacity < maxSize || capacity > avail) {
223 throw std::invalid_argument(
224 "AtomicUnorderedInsertMap capacity must fit in IndexType with 2 bits "
228 numSlots_ = capacity;
229 slotMask_ = folly::nextPowTwo(capacity * 4) - 1;
230 mmapRequested_ = sizeof(Slot) * capacity;
231 slots_ = reinterpret_cast<Slot*>(allocator_.allocate(mmapRequested_));
233 // mark the zero-th slot as in-use but not valid, since that happens
234 // to be our nil value
235 slots_[0].stateUpdate(EMPTY, CONSTRUCTING);
238 ~AtomicUnorderedInsertMap() {
239 if (!SkipKeyValueDeletion) {
240 for (size_t i = 1; i < numSlots_; ++i) {
244 allocator_.deallocate(reinterpret_cast<char*>(slots_), mmapRequested_);
247 /// Searches for the key, returning (iter,false) if it is found.
248 /// If it is not found calls the functor Func with a void* argument
249 /// that is raw storage suitable for placement construction of a Value
250 /// (see raw_value_type), then returns (iter,true). May call Func and
251 /// then return (iter,false) if there are other concurrent writes, in
252 /// which case the newly constructed value will be immediately destroyed.
254 /// This function does not block other readers or writers. If there
255 /// are other concurrent writes, many parallel calls to func may happen
256 /// and only the first one to complete will win. The values constructed
257 /// by the other calls to func will be destroyed.
261 /// AtomicUnorderedInsertMap<std::string,std::string> memo;
263 /// auto value = memo.findOrConstruct(key, [=](void* raw) {
264 /// new (raw) std::string(computation(key));
266 template <typename Func>
267 std::pair<const_iterator,bool> findOrConstruct(const Key& key, Func&& func) {
268 auto const slot = keyToSlotIdx(key);
269 auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
271 auto existing = find(key, slot);
273 return std::make_pair(ConstIterator(*this, existing), false);
276 auto idx = allocateNear(slot);
277 new (&slots_[idx].keyValue().first) Key(key);
278 func(static_cast<void*>(&slots_[idx].keyValue().second));
281 slots_[idx].next_ = prev >> 2;
283 // we can merge the head update and the CONSTRUCTING -> LINKED update
284 // into a single CAS if slot == idx (which should happen often)
285 auto after = idx << 2;
292 if (slots_[slot].headAndState_.compare_exchange_strong(prev, after)) {
295 slots_[idx].stateUpdate(CONSTRUCTING, LINKED);
297 return std::make_pair(ConstIterator(*this, idx), true);
299 // compare_exchange_strong updates its first arg on failure, so
300 // there is no need to reread prev
302 existing = find(key, slot);
304 // our allocated key and value are no longer needed
305 slots_[idx].keyValue().first.~Key();
306 slots_[idx].keyValue().second.~Value();
307 slots_[idx].stateUpdate(CONSTRUCTING, EMPTY);
309 return std::make_pair(ConstIterator(*this, existing), false);
314 /// This isn't really emplace, but it is what we need to test.
315 /// Eventually we can duplicate all of the std::pair constructor
316 /// forms, including a recursive tuple forwarding template
317 /// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/).
318 template <class K, class V>
319 std::pair<const_iterator,bool> emplace(const K& key, V&& value) {
320 return findOrConstruct(key, [&](void* raw) {
321 new (raw) Value(std::forward<V>(value));
325 const_iterator find(const Key& key) const {
326 return ConstIterator(*this, find(key, keyToSlotIdx(key)));
329 const_iterator cbegin() const {
330 IndexType slot = numSlots_ - 1;
331 while (slot > 0 && slots_[slot].state() != LINKED) {
334 return ConstIterator(*this, slot);
337 const_iterator cend() const {
338 return ConstIterator(*this, 0);
343 kMaxAllocationTries = 1000, // after this we throw
346 enum BucketState : IndexType {
352 /// Lock-free insertion is easiest by prepending to collision chains.
353 /// A large chaining hash table takes two cache misses instead of
354 /// one, however. Our solution is to colocate the bucket storage and
355 /// the head storage, so that even though we are traversing chains we
356 /// are likely to stay within the same cache line. Just make sure to
357 /// traverse head before looking at any keys. This strategy gives us
358 /// 32 bit pointers and fast iteration.
360 /// The bottom two bits are the BucketState, the rest is the index
361 /// of the first bucket for the chain whose keys map to this slot.
362 /// When things are going well the head usually links to this slot,
363 /// but that doesn't always have to happen.
364 Atom<IndexType> headAndState_;
366 /// The next bucket in the chain
370 typename std::aligned_storage<sizeof(value_type),
371 alignof(value_type)>::type raw_;
376 assert(s == EMPTY || s == LINKED);
378 keyValue().first.~Key();
379 keyValue().second.~Value();
383 BucketState state() const {
384 return BucketState(headAndState_.load(std::memory_order_acquire) & 3);
387 void stateUpdate(BucketState before, BucketState after) {
388 assert(state() == before);
389 headAndState_ += (after - before);
392 value_type& keyValue() {
393 assert(state() != EMPTY);
394 return *static_cast<value_type*>(static_cast<void*>(&raw_));
397 const value_type& keyValue() const {
398 assert(state() != EMPTY);
399 return *static_cast<const value_type*>(static_cast<const void*>(&raw_));
404 // We manually manage the slot memory so we can bypass initialization
405 // (by getting a zero-filled mmap chunk) and optionally destruction of
408 size_t mmapRequested_;
411 /// tricky, see keyToSlodIdx
414 Allocator allocator_;
417 IndexType keyToSlotIdx(const Key& key) const {
418 size_t h = hasher()(key);
420 while (h >= numSlots_) {
426 IndexType find(const Key& key, IndexType slot) const {
428 auto hs = slots_[slot].headAndState_.load(std::memory_order_acquire);
429 for (slot = hs >> 2; slot != 0; slot = slots_[slot].next_) {
430 if (ke(key, slots_[slot].keyValue().first)) {
437 /// Allocates a slot and returns its index. Tries to put it near
439 IndexType allocateNear(IndexType start) {
440 for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) {
441 auto slot = allocationAttempt(start, tries);
442 auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
443 if ((prev & 3) == EMPTY &&
444 slots_[slot].headAndState_.compare_exchange_strong(
445 prev, prev + CONSTRUCTING - EMPTY)) {
449 throw std::bad_alloc();
452 /// Returns the slot we should attempt to allocate after tries failed
453 /// tries, starting from the specified slot. This is pulled out so we
454 /// can specialize it differently during deterministic testing
455 IndexType allocationAttempt(IndexType start, IndexType tries) const {
456 if (LIKELY(tries < 8 && start + tries < numSlots_)) {
457 return IndexType(start + tries);
460 if (sizeof(IndexType) <= 4) {
461 rv = IndexType(folly::Random::rand32(numSlots_));
463 rv = IndexType(folly::Random::rand64(numSlots_));
465 assert(rv < numSlots_);
470 void zeroFillSlots() {
471 using folly::detail::GivesZeroFilledMemory;
472 if (!GivesZeroFilledMemory<Allocator>::value) {
473 memset(slots_, 0, mmapRequested_);
478 /// AtomicUnorderedInsertMap64 is just a type alias that makes it easier
479 /// to select a 64 bit slot index type. Use this if you need a capacity
480 /// bigger than 2^30 (about a billion). This increases memory overheads,
485 typename Hash = std::hash<Key>,
486 typename KeyEqual = std::equal_to<Key>,
487 bool SkipKeyValueDeletion =
488 (boost::has_trivial_destructor<Key>::value &&
489 boost::has_trivial_destructor<Value>::value),
490 template <typename> class Atom = std::atomic,
491 typename Allocator = folly::detail::MMapAlloc>
492 using AtomicUnorderedInsertMap64 =
493 AtomicUnorderedInsertMap<Key,
497 SkipKeyValueDeletion,
502 /// MutableAtom is a tiny wrapper than gives you the option of atomically
503 /// updating values inserted into an AtomicUnorderedInsertMap<K,
504 /// MutableAtom<V>>. This relies on AtomicUnorderedInsertMap's guarantee
505 /// that it doesn't move values.
506 template <typename T, template <typename> class Atom = std::atomic>
508 mutable Atom<T> data;
510 explicit MutableAtom(const T& init) : data(init) {}
513 /// MutableData is a tiny wrapper than gives you the option of using an
514 /// external concurrency control mechanism to updating values inserted
515 /// into an AtomicUnorderedInsertMap.
516 template <typename T>
519 explicit MutableData(const T& init) : data(init) {}