From 2129f9b5e34ae3a958e50f9fcb3032086366abb8 Mon Sep 17 00:00:00 2001 From: Max Wang Date: Thu, 19 Sep 2013 13:47:22 -0700 Subject: [PATCH] Parametrize allocator in AtomicHash{Array,Map} Summary: Maybe at some point somebody won't want malloc, e.g. me. Test Plan: Ran AtomicHashArrayTest using an mmap allocator. Reviewed By: delong.j@fb.com FB internal diff: D960192 --- folly/AtomicHashArray-inl.h | 79 ++++++++++++++-------- folly/AtomicHashArray.h | 10 ++- folly/AtomicHashMap-inl.h | 101 ++++++++++++++++------------ folly/AtomicHashMap.h | 9 ++- folly/test/AtomicHashArrayTest.cpp | 102 +++++++++++++++++++++++++---- 5 files changed, 212 insertions(+), 89 deletions(-) diff --git a/folly/AtomicHashArray-inl.h b/folly/AtomicHashArray-inl.h index 236c7c22..81fa1c88 100644 --- a/folly/AtomicHashArray-inl.h +++ b/folly/AtomicHashArray-inl.h @@ -24,8 +24,9 @@ namespace folly { // AtomicHashArray private constructor -- -template -AtomicHashArray:: +template +AtomicHashArray:: AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey, KeyT erasedKey, double maxLoadFactor, size_t cacheSize) : capacity_(capacity), maxEntries_(size_t(maxLoadFactor * capacity_ + 0.5)), @@ -41,9 +42,11 @@ AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey, * of key and returns true, or if key does not exist returns false and * ret.index is set to capacity_. */ -template -typename AtomicHashArray::SimpleRetT -AtomicHashArray:: +template +typename AtomicHashArray::SimpleRetT +AtomicHashArray:: findInternal(const KeyT key_in) { DCHECK_NE(key_in, kEmptyKey_); DCHECK_NE(key_in, kLockedKey_); @@ -77,10 +80,12 @@ findInternal(const KeyT key_in) { * this will be the previously inserted value, and if the map is full it is * default. */ -template +template template -typename AtomicHashArray::SimpleRetT -AtomicHashArray:: +typename AtomicHashArray::SimpleRetT +AtomicHashArray:: insertInternal(KeyT key_in, T&& value) { const short NO_NEW_INSERTS = 1; const short NO_PENDING_INSERTS = 2; @@ -193,8 +198,9 @@ insertInternal(KeyT key_in, T&& value) { * erased key will never be reused. If there's an associated value, we won't * touch it either. */ -template -size_t AtomicHashArray:: +template +size_t AtomicHashArray:: erase(KeyT key_in) { CHECK_NE(key_in, kEmptyKey_); CHECK_NE(key_in, kLockedKey_); @@ -236,13 +242,17 @@ erase(KeyT key_in) { } } -template -const typename AtomicHashArray::Config -AtomicHashArray::defaultConfig; - -template -typename AtomicHashArray::SmartPtr -AtomicHashArray:: +template +const typename AtomicHashArray::Config +AtomicHashArray::defaultConfig; + +template +typename AtomicHashArray::SmartPtr +AtomicHashArray:: create(size_t maxSize, const Config& c) { CHECK_LE(c.maxLoadFactor, 1.0); CHECK_GT(c.maxLoadFactor, 0.0); @@ -250,10 +260,16 @@ create(size_t maxSize, const Config& c) { size_t capacity = size_t(maxSize / c.maxLoadFactor); size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * capacity; - std::unique_ptr mem(malloc(sz), free); - new(mem.get()) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey, - c.maxLoadFactor, c.entryCountThreadCacheSize); - SmartPtr map(static_cast(mem.release())); + auto const mem = Allocator().allocate(sz); + try { + new (mem) AtomicHashArray(capacity, c.emptyKey, c.lockedKey, c.erasedKey, + c.maxLoadFactor, c.entryCountThreadCacheSize); + } catch (...) { + Allocator().deallocate(mem, sz); + throw; + } + + SmartPtr map(static_cast((void *)mem)); /* * Mark all cells as empty. @@ -274,22 +290,28 @@ create(size_t maxSize, const Config& c) { return map; } -template -void AtomicHashArray:: +template +void AtomicHashArray:: destroy(AtomicHashArray* p) { assert(p); + + size_t sz = sizeof(AtomicHashArray) + sizeof(value_type) * p->capacity_; + FOR_EACH_RANGE(i, 0, p->capacity_) { if (p->cells_[i].first != p->kEmptyKey_) { p->cells_[i].~value_type(); } } p->~AtomicHashArray(); - free(p); + + Allocator().deallocate((char *)p, sz); } // clear -- clears all keys and values in the map and resets all counters -template -void AtomicHashArray:: +template +void AtomicHashArray:: clear() { FOR_EACH_RANGE(i, 0, capacity_) { if (cells_[i].first != kEmptyKey_) { @@ -307,9 +329,10 @@ clear() { // Iterator implementation -template +template template -struct AtomicHashArray::aha_iterator +struct AtomicHashArray::aha_iterator : boost::iterator_facade, IterVal, boost::forward_traversal_tag> diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h index e6b6c721..75d84f4f 100644 --- a/folly/AtomicHashArray.h +++ b/folly/AtomicHashArray.h @@ -43,11 +43,15 @@ namespace folly { template , class EqualFcn = std::equal_to> + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator> class AtomicHashMap; template , class EqualFcn = std::equal_to> + class HashFcn = std::hash, + class EqualFcn = std::equal_to, + class Allocator = std::allocator> class AtomicHashArray : boost::noncopyable { static_assert((std::is_convertible::value || std::is_convertible::value || @@ -215,7 +219,7 @@ class AtomicHashArray : boost::noncopyable { /* Private data and helper functions... */ private: - friend class AtomicHashMap; + friend class AtomicHashMap; struct SimpleRetT { size_t idx; bool success; SimpleRetT(size_t i, bool s) : idx(i), success(s) {} diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index 1548b7f0..a2f497af 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -22,14 +22,16 @@ namespace folly { -template -const typename AtomicHashMap::Config -AtomicHashMap::defaultConfig; +template +const typename AtomicHashMap::Config +AtomicHashMap::defaultConfig; // AtomicHashMap constructor -- Atomic wrapper that allows growth // This class has a lot of overhead (184 Bytes) so only use for big maps -template -AtomicHashMap:: +template +AtomicHashMap:: AtomicHashMap(size_t size, const Config& config) : kGrowthFrac_(config.growthFactor < 0 ? 1.0 - config.maxLoadFactor : config.growthFactor) { @@ -44,9 +46,11 @@ AtomicHashMap(size_t size, const Config& config) } // insert -- -template -std::pair::iterator,bool> -AtomicHashMap:: +template +std::pair::iterator, bool> +AtomicHashMap:: insert(key_type k, const mapped_type& v) { SimpleRetT ret = insertInternal(k,v); SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); @@ -54,9 +58,11 @@ insert(key_type k, const mapped_type& v) { ret.success); } -template -std::pair::iterator,bool> -AtomicHashMap:: +template +std::pair::iterator, bool> +AtomicHashMap:: insert(key_type k, mapped_type&& v) { SimpleRetT ret = insertInternal(k, std::move(v)); SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); @@ -65,10 +71,11 @@ insert(key_type k, mapped_type&& v) { } // insertInternal -- Allocates new sub maps as existing ones fill up. -template +template template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +typename AtomicHashMap::SimpleRetT +AtomicHashMap:: insertInternal(key_type key, T&& value) { beginInsertInternal: int nextMapIdx = // this maintains our state @@ -142,9 +149,10 @@ insertInternal(key_type key, T&& value) { } // find -- -template -typename AtomicHashMap::iterator -AtomicHashMap:: +template +typename AtomicHashMap::iterator +AtomicHashMap:: find(KeyT k) { SimpleRetT ret = findInternal(k); if (ret.i >= numMapsAllocated_.load(std::memory_order_acquire)) { @@ -154,17 +162,20 @@ find(KeyT k) { return iterator(this, ret.i, subMap->makeIter(ret.j)); } -template -typename AtomicHashMap::const_iterator -AtomicHashMap:: +template +typename AtomicHashMap::const_iterator +AtomicHashMap:: find(KeyT k) const { return const_cast(this)->find(k); } // findInternal -- -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +template +typename AtomicHashMap::SimpleRetT +AtomicHashMap:: findInternal(const KeyT k) const { SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed); typename SubMap::SimpleRetT ret = primaryMap->findInternal(k); @@ -185,9 +196,10 @@ findInternal(const KeyT k) const { } // findAtInternal -- see encodeIndex() for details. -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +template +typename AtomicHashMap::SimpleRetT +AtomicHashMap:: findAtInternal(uint32_t idx) const { uint32_t subMapIdx, subMapOffset; if (idx & kSecondaryMapBit_) { @@ -205,9 +217,10 @@ findAtInternal(uint32_t idx) const { } // erase -- -template -typename AtomicHashMap::size_type -AtomicHashMap:: +template +typename AtomicHashMap::size_type +AtomicHashMap:: erase(const KeyT k) { int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); FOR_EACH_RANGE(i, 0, numMaps) { @@ -221,8 +234,9 @@ erase(const KeyT k) { } // capacity -- summation of capacities of all submaps -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: capacity() const { size_t totalCap(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -234,8 +248,9 @@ capacity() const { // spaceRemaining -- // number of new insertions until current submaps are all at max load -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: spaceRemaining() const { size_t spaceRem(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -251,8 +266,9 @@ spaceRemaining() const { // clear -- Wipes all keys and values from primary map and destroys // all secondary maps. Not thread safe. -template -void AtomicHashMap:: +template +void AtomicHashMap:: clear() { subMaps_[0].load(std::memory_order_relaxed)->clear(); int const numMaps = numMapsAllocated_ @@ -267,8 +283,9 @@ clear() { } // size -- -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: size() const { size_t totalSize(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -296,8 +313,9 @@ size() const { // 31 1 // 27-30 which subMap // 0-26 subMap offset (index_ret input) -template -inline uint32_t AtomicHashMap:: +template +inline uint32_t AtomicHashMap:: encodeIndex(uint32_t subMap, uint32_t offset) { DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big if (subMap == 0) return offset; @@ -313,9 +331,10 @@ encodeIndex(uint32_t subMap, uint32_t offset) { // Iterator implementation -template +template template -struct AtomicHashMap::ahm_iterator +struct AtomicHashMap::ahm_iterator : boost::iterator_facade, IterVal, boost::forward_traversal_tag> diff --git a/folly/AtomicHashMap.h b/folly/AtomicHashMap.h index 2738f399..bac9b4b9 100644 --- a/folly/AtomicHashMap.h +++ b/folly/AtomicHashMap.h @@ -135,7 +135,9 @@ namespace folly { * make_pair everywhere), and providing both can lead to some gross * template error messages. * - * - Not Allocator-aware. + * - The Allocator must not be stateful (a new instance will be spun up for + * each allocation), and its allocate() method must take a raw number of + * bytes. * * - KeyT must be a 32 bit or 64 bit atomic integer type, and you must * define special 'locked' and 'empty' key values in the ctor @@ -153,9 +155,10 @@ struct AtomicHashMapFullError : std::runtime_error { {} }; -template +template class AtomicHashMap : boost::noncopyable { - typedef AtomicHashArray SubMap; + typedef AtomicHashArray SubMap; public: typedef KeyT key_type; diff --git a/folly/test/AtomicHashArrayTest.cpp b/folly/test/AtomicHashArrayTest.cpp index d78684dc..1fa8d994 100644 --- a/folly/test/AtomicHashArrayTest.cpp +++ b/folly/test/AtomicHashArrayTest.cpp @@ -14,23 +14,86 @@ * limitations under the License. */ +#include +#include +#include + #include "folly/AtomicHashArray.h" #include "folly/Hash.h" #include "folly/Conv.h" +#include "folly/Memory.h" #include using namespace std; using namespace folly; +template +class MmapAllocator { + public: + typedef T value_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + + typedef ptrdiff_t difference_type; + typedef size_t size_type; + + T* address(T& x) const { + return std::addressof(x); + } + + const T* address(const T& x) const { + return std::addressof(x); + } + + size_t max_size() const { + return std::numeric_limits::max(); + } + + template struct rebind { + typedef MmapAllocator other; + }; + + bool operator!=(const MmapAllocator& other) const { + return !(*this == other); + } + + bool operator==(const MmapAllocator& other) const { + return true; + } + + template + void construct(T* p, Args&&... args) { + new (p) T(std::forward(args)...); + } + + void destroy(T* p) { + p->~T(); + } + + T *allocate(size_t n) { + void *p = mmap(nullptr, n * sizeof(T), PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + if (!p) throw std::bad_alloc(); + return (T *)p; + } + + void deallocate(T *p, size_t n) { + munmap(p, n * sizeof(T)); + } +}; + template pair createEntry(int i) { return pair(to(folly::hash::jenkins_rev_mix32(i) % 1000), to(i + 3)); } -template +template> void testMap() { - typedef AtomicHashArray MyArr; + typedef AtomicHashArray, + std::equal_to, Allocator> MyArr; auto arr = MyArr::create(150); map ref; for (int i = 0; i < 100; ++i) { @@ -75,9 +138,10 @@ void testMap() { } } -template +template> void testNoncopyableMap() { - typedef AtomicHashArray> MyArr; + typedef AtomicHashArray, std::hash, + std::equal_to, Allocator> MyArr; auto arr = MyArr::create(150); for (int i = 0; i < 100; i++) { arr->insert(make_pair(i,std::unique_ptr(new ValueT(i)))); @@ -90,24 +154,34 @@ void testNoncopyableMap() { TEST(Aha, InsertErase_i32_i32) { - testMap(); - testNoncopyableMap(); + testMap(); + testMap>(); + testNoncopyableMap(); + testNoncopyableMap>(); } TEST(Aha, InsertErase_i64_i32) { - testMap(); - testNoncopyableMap(); + testMap(); + testMap>(); + testNoncopyableMap(); + testNoncopyableMap>(); } TEST(Aha, InsertErase_i64_i64) { - testMap(); - testNoncopyableMap(); + testMap(); + testMap>(); + testNoncopyableMap(); + testNoncopyableMap>(); } TEST(Aha, InsertErase_i32_i64) { - testMap(); - testNoncopyableMap(); + testMap(); + testMap>(); + testNoncopyableMap(); + testNoncopyableMap>(); } TEST(Aha, InsertErase_i32_str) { - testMap(); + testMap(); + testMap>(); } TEST(Aha, InsertErase_i64_str) { - testMap(); + testMap(); + testMap>(); } -- 2.34.1