X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2FAtomicUnorderedMap.h;h=77b2eb303a16e9122af3a1fcc26326af7824cfdc;hb=HEAD;hp=a2c4ac73e38e10f691b5f355a28c69ba5dcf6d35;hpb=53e6886f69ae6ee428c9a72643213c80bd75c793;p=folly.git diff --git a/folly/AtomicUnorderedMap.h b/folly/AtomicUnorderedMap.h index a2c4ac73..77b2eb30 100644 --- a/folly/AtomicUnorderedMap.h +++ b/folly/AtomicUnorderedMap.h @@ -1,5 +1,5 @@ /* - * Copyright 2015 Facebook, Inc. + * Copyright 2013-present Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -13,24 +13,26 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#ifndef FOLLY_ATOMICUNORDEREDMAP_H -#define FOLLY_ATOMICUNORDEREDMAP_H + +#pragma once #include +#include #include +#include #include #include #include -#include -#include -#include -#include -#include + +#include + #include +#include #include #include -#include -#include +#include +#include +#include namespace folly { @@ -55,10 +57,12 @@ namespace folly { /// O(1/(1-actual_load_factor)). Note that this is a pretty strong /// limitation, because you can't remove existing keys. /// -/// * 2^30 maximum capacity - you'll need to use something else if you -/// have more than a billion entries. If this limit bothers you let it -/// wouldn't be too hard to parameterize the internal indexes between -/// uint32_t and uint64_t. +/// * 2^30 maximum default capacity - by default AtomicUnorderedInsertMap +/// uses uint32_t internal indexes (and steals 2 bits), limiting you +/// to about a billion entries. If you need more you can fill in all +/// of the template params so you change IndexType to uint64_t, or you +/// can use AtomicUnorderedInsertMap64. 64-bit indexes will increase +/// the space over of the map, of course. /// /// WHAT YOU GET IN EXCHANGE: /// @@ -125,15 +129,17 @@ namespace folly { /// which is much faster than destructing all of the keys and values. /// Feel free to override if std::is_trivial_destructor isn't recognizing /// the triviality of your destructors. -template , - typename KeyEqual = std::equal_to, - bool SkipKeyValueDeletion = - (boost::has_trivial_destructor::value && - boost::has_trivial_destructor::value), - template class Atom = std::atomic, - typename Allocator = folly::detail::MMapAlloc> +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (boost::has_trivial_destructor::value && + boost::has_trivial_destructor::value), + template class Atom = std::atomic, + typename IndexType = uint32_t, + typename Allocator = folly::detail::MMapAlloc> struct AtomicUnorderedInsertMap { @@ -147,7 +153,7 @@ struct AtomicUnorderedInsertMap { typedef const value_type& const_reference; typedef struct ConstIterator { - ConstIterator(const AtomicUnorderedInsertMap& owner, uint32_t slot) + ConstIterator(const AtomicUnorderedInsertMap& owner, IndexType slot) : owner_(owner) , slot_(slot) {} @@ -175,7 +181,7 @@ struct AtomicUnorderedInsertMap { } // post-increment - ConstIterator operator++ (int dummy) { + ConstIterator operator++(int /* dummy */) { auto prev = *this; ++*this; return prev; @@ -190,31 +196,33 @@ struct AtomicUnorderedInsertMap { private: const AtomicUnorderedInsertMap& owner_; - uint32_t slot_; + IndexType slot_; } const_iterator; friend ConstIterator; - /// Constructs a map that will support the insertion of maxSize - /// key-value pairs without exceeding the max load factor. Load - /// factors of greater than 1 are not supported, and once the actual load - /// factor of the map approaches 1 the insert performance will suffer. - /// The capacity is limited to 2^30 (about a billion), beyond which - /// we will throw invalid_argument. + /// Constructs a map that will support the insertion of maxSize key-value + /// pairs without exceeding the max load factor. Load factors of greater + /// than 1 are not supported, and once the actual load factor of the + /// map approaches 1 the insert performance will suffer. The capacity + /// is limited to 2^30 (about a billion) for the default IndexType, + /// beyond which we will throw invalid_argument. explicit AtomicUnorderedInsertMap( size_t maxSize, float maxLoadFactor = 0.8f, const Allocator& alloc = Allocator()) : allocator_(alloc) { - size_t capacity = maxSize / std::max(1.0f, maxLoadFactor) + 128; - if (capacity > (1 << 30) && maxSize < (1 << 30)) { + size_t capacity = size_t(maxSize / std::min(1.0f, maxLoadFactor) + 128); + size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2); + if (capacity > avail && maxSize < avail) { // we'll do our best - capacity = (1 << 30); + capacity = avail; } - if (capacity < maxSize || capacity > (1 << 30)) { + if (capacity < maxSize || capacity > avail) { throw std::invalid_argument( - "AtomicUnorderedInsertMap capacity must fit in 30 bits"); + "AtomicUnorderedInsertMap capacity must fit in IndexType with 2 bits " + "left over"); } numSlots_ = capacity; @@ -255,7 +263,7 @@ struct AtomicUnorderedInsertMap { /// auto value = memo.findOrConstruct(key, [=](void* raw) { /// new (raw) std::string(computation(key)); /// })->first; - template + template std::pair findOrConstruct(const Key& key, Func&& func) { auto const slot = keyToSlotIdx(key); auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); @@ -307,7 +315,7 @@ struct AtomicUnorderedInsertMap { /// Eventually we can duplicate all of the std::pair constructor /// forms, including a recursive tuple forwarding template /// http://functionalcpp.wordpress.com/2013/08/28/tuple-forwarding/). - template + template std::pair emplace(const K& key, V&& value) { return findOrConstruct(key, [&](void* raw) { new (raw) Value(std::forward(value)); @@ -319,7 +327,7 @@ struct AtomicUnorderedInsertMap { } const_iterator cbegin() const { - uint32_t slot = numSlots_ - 1; + IndexType slot = numSlots_ - 1; while (slot > 0 && slots_[slot].state() != LINKED) { --slot; } @@ -331,12 +339,11 @@ struct AtomicUnorderedInsertMap { } private: - - enum { + enum : IndexType { kMaxAllocationTries = 1000, // after this we throw }; - enum BucketState : uint32_t { + enum BucketState : IndexType { EMPTY = 0, CONSTRUCTING = 1, LINKED = 2, @@ -354,10 +361,10 @@ struct AtomicUnorderedInsertMap { /// of the first bucket for the chain whose keys map to this slot. /// When things are going well the head usually links to this slot, /// but that doesn't always have to happen. - Atom headAndState_; + Atom headAndState_; /// The next bucket in the chain - uint32_t next_; + IndexType next_; /// Key and Value typename std::aligned_storage= numSlots_) { @@ -416,7 +423,7 @@ struct AtomicUnorderedInsertMap { return h; } - uint32_t find(const Key& key, uint32_t slot) const { + IndexType find(const Key& key, IndexType slot) const { KeyEqual ke = {}; auto hs = slots_[slot].headAndState_.load(std::memory_order_acquire); for (slot = hs >> 2; slot != 0; slot = slots_[slot].next_) { @@ -429,8 +436,8 @@ struct AtomicUnorderedInsertMap { /// Allocates a slot and returns its index. Tries to put it near /// slots_[start]. - uint32_t allocateNear(uint32_t start) { - for (auto tries = 0; tries < kMaxAllocationTries; ++tries) { + IndexType allocateNear(IndexType start) { + for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) { auto slot = allocationAttempt(start, tries); auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire); if ((prev & 3) == EMPTY && @@ -445,11 +452,16 @@ struct AtomicUnorderedInsertMap { /// Returns the slot we should attempt to allocate after tries failed /// tries, starting from the specified slot. This is pulled out so we /// can specialize it differently during deterministic testing - uint32_t allocationAttempt(uint32_t start, uint32_t tries) const { + IndexType allocationAttempt(IndexType start, IndexType tries) const { if (LIKELY(tries < 8 && start + tries < numSlots_)) { - return start + tries; + return IndexType(start + tries); } else { - uint32_t rv = folly::Random::rand32(numSlots_); + IndexType rv; + if (sizeof(IndexType) <= 4) { + rv = IndexType(folly::Random::rand32(numSlots_)); + } else { + rv = IndexType(folly::Random::rand64(numSlots_)); + } assert(rv < numSlots_); return rv; } @@ -463,13 +475,35 @@ struct AtomicUnorderedInsertMap { } }; +/// AtomicUnorderedInsertMap64 is just a type alias that makes it easier +/// to select a 64 bit slot index type. Use this if you need a capacity +/// bigger than 2^30 (about a billion). This increases memory overheads, +/// obviously. +template < + typename Key, + typename Value, + typename Hash = std::hash, + typename KeyEqual = std::equal_to, + bool SkipKeyValueDeletion = + (boost::has_trivial_destructor::value && + boost::has_trivial_destructor::value), + template class Atom = std::atomic, + typename Allocator = folly::detail::MMapAlloc> +using AtomicUnorderedInsertMap64 = + AtomicUnorderedInsertMap; /// MutableAtom is a tiny wrapper than gives you the option of atomically /// updating values inserted into an AtomicUnorderedInsertMap>. This relies on AtomicUnorderedInsertMap's guarantee /// that it doesn't move values. -template class Atom = std::atomic> +template class Atom = std::atomic> struct MutableAtom { mutable Atom data; @@ -485,6 +519,4 @@ struct MutableData { explicit MutableData(const T& init) : data(init) {} }; - -} -#endif +} // namespace folly