From: Daniel Andersson Date: Tue, 1 Dec 2015 15:46:00 +0000 (-0800) Subject: Enable find/emplace for key types other than KeyT. X-Git-Tag: deprecate-dynamic-initializer~218 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=4e5cb0efba9454e3abb2dd203116ed8e9a056468;p=folly.git Enable find/emplace for key types other than KeyT. Summary: Add template parameters to support arbitrary types when looking up a key. This is useful to avoid the cost of 'materializing' a key which is likely to be present in the array (or, alternatively, switching on a tagged union KeyT). Use the new capability to greatly simplify the lookup logic in HHVMs static string table. Reviewed By: nbronson Differential Revision: D2662451 fb-gh-sync-id: 707fa033f350b80ca8080af17f1a8a74c59f2e88 --- diff --git a/folly/AtomicHashArray-inl.h b/folly/AtomicHashArray-inl.h index 1125bb98..35a1927c 100644 --- a/folly/AtomicHashArray-inl.h +++ b/folly/AtomicHashArray-inl.h @@ -18,15 +18,18 @@ #error "This should only be included by AtomicHashArray.h" #endif +#include + #include #include namespace folly { // AtomicHashArray private constructor -- -template -AtomicHashArray:: +template +AtomicHashArray:: AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey, KeyT erasedKey, double _maxLoadFactor, size_t cacheSize) : capacity_(capacity), @@ -43,20 +46,22 @@ AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey, * of key and returns true, or if key does not exist returns false and * ret.index is set to capacity_. */ -template -typename AtomicHashArray::SimpleRetT -AtomicHashArray:: -findInternal(const KeyT key_in) { - DCHECK_NE(key_in, kEmptyKey_); - DCHECK_NE(key_in, kLockedKey_); - DCHECK_NE(key_in, kErasedKey_); - for (size_t idx = keyToAnchorIdx(key_in), numProbes = 0; +template +template +typename AtomicHashArray::SimpleRetT +AtomicHashArray:: +findInternal(const LookupKeyT key_in) { + checkLegalKeyIfKey(key_in); + + for (size_t idx = keyToAnchorIdx(key_in), + numProbes = 0; ; idx = ProbeFcn()(idx, numProbes, capacity_)) { const KeyT key = acquireLoadKey(cells_[idx]); - if (LIKELY(EqualFcn()(key, key_in))) { + if (LIKELY(LookupEqualFcn()(key, key_in))) { return SimpleRetT(idx, true); } if (UNLIKELY(key == kEmptyKey_)) { @@ -83,20 +88,23 @@ findInternal(const KeyT key_in) { * this will be the previously inserted value, and if the map is full it is * default. */ -template -template -typename AtomicHashArray::SimpleRetT -AtomicHashArray:: -insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) { +template +template +typename AtomicHashArray::SimpleRetT +AtomicHashArray:: +insertInternal(LookupKeyT key_in, ArgTs&&... vCtorArgs) { const short NO_NEW_INSERTS = 1; const short NO_PENDING_INSERTS = 2; - CHECK_NE(key_in, kEmptyKey_); - CHECK_NE(key_in, kLockedKey_); - CHECK_NE(key_in, kErasedKey_); + checkLegalKeyIfKey(key_in); - size_t idx = keyToAnchorIdx(key_in); + size_t idx = keyToAnchorIdx(key_in); size_t numProbes = 0; for (;;) { DCHECK_LT(idx, capacity_); @@ -132,11 +140,20 @@ insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) { // If we fail, fall through to comparison below; maybe the insert that // just beat us was for this very key.... if (tryLockCell(cell)) { + KeyT key_new; // Write the value - done before unlocking try { + key_new = LookupKeyToKeyFcn()(key_in); + typedef typename std::remove_const::type + LookupKeyTNoConst; + constexpr bool kAlreadyChecked = + std::is_same::value; + if (!kAlreadyChecked) { + checkLegalKeyIfKey(key_new); + } DCHECK(relaxedLoadKey(*cell) == kLockedKey_); new (&cell->second) ValueT(std::forward(vCtorArgs)...); - unlockCell(cell, key_in); // Sets the new key + unlockCell(cell, key_new); // Sets the new key } catch (...) { // Transition back to empty key---requires handling // locked->empty below. @@ -147,7 +164,7 @@ insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) { // An erase() can race here and delete right after our insertion // Direct comparison rather than EqualFcn ok here // (we just inserted it) - DCHECK(relaxedLoadKey(*cell) == key_in || + DCHECK(relaxedLoadKey(*cell) == key_new || relaxedLoadKey(*cell) == kErasedKey_); --numPendingEntries_; ++numEntries_; // This is a thread cached atomic increment :) @@ -167,7 +184,7 @@ insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) { } const KeyT thisKey = acquireLoadKey(*cell); - if (EqualFcn()(thisKey, key_in)) { + if (LookupEqualFcn()(thisKey, key_in)) { // Found an existing entry for our key, but we don't overwrite the // previous value. return SimpleRetT(idx, false); @@ -191,7 +208,6 @@ insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) { } } - /* * erase -- * @@ -202,9 +218,10 @@ insertInternal(KeyT key_in, ArgTs&&... vCtorArgs) { * erased key will never be reused. If there's an associated value, we won't * touch it either. */ -template -size_t AtomicHashArray:: +template +size_t AtomicHashArray:: erase(KeyT key_in) { CHECK_NE(key_in, kEmptyKey_); CHECK_NE(key_in, kLockedKey_); @@ -250,11 +267,12 @@ erase(KeyT key_in) { } } -template -typename AtomicHashArray::SmartPtr -AtomicHashArray:: +template +typename AtomicHashArray::SmartPtr +AtomicHashArray:: create(size_t maxSize, const Config& c) { CHECK_LE(c.maxLoadFactor, 1.0); CHECK_GT(c.maxLoadFactor, 0.0); @@ -292,9 +310,10 @@ create(size_t maxSize, const Config& c) { return map; } -template -void AtomicHashArray:: +template +void AtomicHashArray:: destroy(AtomicHashArray* p) { assert(p); @@ -311,9 +330,10 @@ destroy(AtomicHashArray* p) { } // clear -- clears all keys and values in the map and resets all counters -template -void AtomicHashArray:: +template +void AtomicHashArray:: clear() { FOR_EACH_RANGE(i, 0, capacity_) { if (cells_[i].first != kEmptyKey_) { @@ -331,10 +351,11 @@ clear() { // Iterator implementation -template +template template -struct AtomicHashArray:: +struct AtomicHashArray:: aha_iterator : boost::iterator_facade, IterVal, diff --git a/folly/AtomicHashArray.h b/folly/AtomicHashArray.h index 71ef2c76..33c59247 100644 --- a/folly/AtomicHashArray.h +++ b/folly/AtomicHashArray.h @@ -62,18 +62,46 @@ struct AtomicHashArrayQuadraticProbeFcn } }; +// Enables specializing checkLegalKey without specializing its class. +namespace detail { +// Local copy of folly::gen::Identity, to avoid heavy dependencies. +class AHAIdentity { + public: + template + auto operator()(Value&& value) const -> + decltype(std::forward(value)) { + return std::forward(value); + } +}; + +template +inline void checkLegalKeyIfKeyTImpl(NotKeyT ignored, KeyT emptyKey, + KeyT lockedKey, KeyT erasedKey) { +} + +template +inline void checkLegalKeyIfKeyTImpl(KeyT key_in, KeyT emptyKey, + KeyT lockedKey, KeyT erasedKey) { + DCHECK_NE(key_in, emptyKey); + DCHECK_NE(key_in, lockedKey); + DCHECK_NE(key_in, erasedKey); +} +} // namespace detail + template , class EqualFcn = std::equal_to, class Allocator = std::allocator, - class ProbeFcn = AtomicHashArrayLinearProbeFcn> + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = detail::AHAIdentity> class AtomicHashMap; template , class EqualFcn = std::equal_to, class Allocator = std::allocator, - class ProbeFcn = AtomicHashArrayLinearProbeFcn> + class ProbeFcn = AtomicHashArrayLinearProbeFcn, + class KeyConvertFcn = detail::AHAIdentity> class AtomicHashArray : boost::noncopyable { static_assert((std::is_convertible::value || std::is_convertible::value || @@ -84,6 +112,9 @@ class AtomicHashArray : boost::noncopyable { public: typedef KeyT key_type; typedef ValueT mapped_type; + typedef HashFcn hasher; + typedef EqualFcn key_equal; + typedef KeyConvertFcn key_convert; typedef std::pair value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; @@ -164,11 +195,37 @@ class AtomicHashArray : boost::noncopyable { // Cannot have pre-instantiated const Config instance because of SIOF. static SmartPtr create(size_t maxSize, const Config& c = Config()); - iterator find(KeyT k) { - return iterator(this, findInternal(k).idx); + /* + * find -- + * + * + * Returns the iterator to the element if found, otherwise end(). + * + * As an optional feature, the type of the key to look up (LookupKeyT) is + * allowed to be different from the type of keys actually stored (KeyT). + * + * This enables use cases where materializing the key is costly and usually + * redudant, e.g., canonicalizing/interning a set of strings and being able + * to look up by StringPiece. To use this feature, LookupHashFcn must take + * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first + * and second parameter, respectively. + * + * See folly/test/ArrayHashArrayTest.cpp for sample usage. + */ + template + iterator find(LookupKeyT k) { + return iterator(this, + findInternal(k).idx); } - const_iterator find(KeyT k) const { - return const_cast(this)->find(k); + + template + const_iterator find(LookupKeyT k) const { + return const_cast(this)-> + find(k); } /* @@ -194,10 +251,24 @@ class AtomicHashArray : boost::noncopyable { * * Same contract as insert(), but performs in-place construction * of the value type using the specified arguments. + * + * Also, like find(), this method optionally allows 'key_in' to have a type + * different from that stored in the table; see find(). If and only if no + * equal key is already present, this method converts 'key_in' to a key of + * type KeyT using the provided LookupKeyToKeyFcn. */ - template - std::pair emplace(KeyT key_in, ArgTs&&... vCtorArgs) { - SimpleRetT ret = insertInternal(key_in, std::forward(vCtorArgs)...); + template + std::pair emplace(LookupKeyT key_in, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal( + key_in, + std::forward(vCtorArgs)...); return std::make_pair(iterator(this, ret.idx), ret.success); } @@ -276,10 +347,22 @@ friend class AtomicHashMap - SimpleRetT insertInternal(KeyT key, ArgTs&&... vCtorArgs); + template + SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs); - SimpleRetT findInternal(const KeyT key); + template + SimpleRetT findInternal(const LookupKeyT key); + + template + void checkLegalKeyIfKey(MaybeKeyT key) { + detail::checkLegalKeyIfKeyTImpl(key, kEmptyKey_, kLockedKey_, kErasedKey_); + } static std::atomic* cellKeyPtr(const value_type& r) { // We need some illegal casting here in order to actually store @@ -330,8 +413,9 @@ friend class AtomicHashMap + inline size_t keyToAnchorIdx(const LookupKeyT k) const { + const size_t hashVal = LookupHashFcn()(k); const size_t probe = hashVal & kAnchorMask_; return LIKELY(probe < capacity_) ? probe : hashVal % capacity_; } diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index 61c23b14..6526351f 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -29,8 +29,10 @@ template -AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +AtomicHashMap:: AtomicHashMap(size_t finalSizeEst, const Config& config) : kGrowthFrac_(config.growthFactor < 0 ? 1.0 - config.maxLoadFactor : config.growthFactor) { @@ -50,13 +52,23 @@ template -template -std::pair::iterator, bool> -AtomicHashMap:: -emplace(key_type k, ArgTs&&... vCtorArgs) { - SimpleRetT ret = insertInternal(k, std::forward(vCtorArgs)...); + typename ProbeFcn, + typename KeyConvertFcn> +template +std::pair::iterator, bool> +AtomicHashMap:: +emplace(LookupKeyT k, ArgTs&&... vCtorArgs) { + SimpleRetT ret = insertInternal( + k, std::forward(vCtorArgs)...); SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success); @@ -68,12 +80,19 @@ template -template -typename AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +template +typename AtomicHashMap:: SimpleRetT -AtomicHashMap:: -insertInternal(key_type key, ArgTs&&... vCtorArgs) { +AtomicHashMap:: +insertInternal(LookupKeyT key, ArgTs&&... vCtorArgs) { beginInsertInternal: auto nextMapIdx = // this maintains our state numMapsAllocated_.load(std::memory_order_acquire); @@ -81,7 +100,11 @@ insertInternal(key_type key, ArgTs&&... vCtorArgs) { FOR_EACH_RANGE(i, 0, nextMapIdx) { // insert in each map successively. If one succeeds, we're done! SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed); - ret = subMap->insertInternal(key, std::forward(vCtorArgs)...); + ret = subMap->template insertInternal( + key, std::forward(vCtorArgs)...); if (ret.idx == subMap->capacity_) { continue; //map is full, so try the next one } @@ -151,12 +174,16 @@ template -typename AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +template +typename AtomicHashMap:: iterator -AtomicHashMap::find( - KeyT k) { - SimpleRetT ret = findInternal(k); +AtomicHashMap::find( + LookupKeyT k) { + SimpleRetT ret = findInternal(k); if (!ret.success) { return end(); } @@ -169,12 +196,17 @@ template + typename ProbeFcn, + typename KeyConvertFcn> +template typename AtomicHashMap::const_iterator -AtomicHashMap:: -find(KeyT k) const { - return const_cast(this)->find(k); + HashFcn, EqualFcn, Allocator, ProbeFcn, KeyConvertFcn>::const_iterator +AtomicHashMap:: +find(LookupKeyT k) const { + return const_cast(this)->find(k); } // findInternal -- @@ -183,13 +215,20 @@ template -typename AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +template +typename AtomicHashMap:: SimpleRetT -AtomicHashMap:: - findInternal(const KeyT k) const { +AtomicHashMap:: + findInternal(const LookupKeyT k) const { SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed); - typename SubMap::SimpleRetT ret = primaryMap->findInternal(k); + typename SubMap::SimpleRetT ret = + primaryMap->template findInternal(k); if (LIKELY(ret.idx != primaryMap->capacity_)) { return SimpleRetT(0, ret.idx, ret.success); } @@ -197,7 +236,9 @@ AtomicHashMap:: FOR_EACH_RANGE(i, 1, numMaps) { // Check each map successively. If one succeeds, we're done! SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); - ret = thisMap->findInternal(k); + ret = thisMap->template findInternal(k); if (LIKELY(ret.idx != thisMap->capacity_)) { return SimpleRetT(i, ret.idx, ret.success); } @@ -212,10 +253,13 @@ template -typename AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +typename AtomicHashMap:: SimpleRetT -AtomicHashMap:: +AtomicHashMap:: findAtInternal(uint32_t idx) const { uint32_t subMapIdx, subMapOffset; if (idx & kSecondaryMapBit_) { @@ -238,10 +282,13 @@ template -typename AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +typename AtomicHashMap:: size_type -AtomicHashMap:: +AtomicHashMap:: erase(const KeyT k) { int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); FOR_EACH_RANGE(i, 0, numMaps) { @@ -260,8 +307,10 @@ template -size_t AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +size_t AtomicHashMap:: capacity() const { size_t totalCap(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -278,8 +327,10 @@ template -size_t AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +size_t AtomicHashMap:: spaceRemaining() const { size_t spaceRem(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -300,8 +351,10 @@ template -void AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +void AtomicHashMap:: clear() { subMaps_[0].load(std::memory_order_relaxed)->clear(); int const numMaps = numMapsAllocated_ @@ -321,8 +374,10 @@ template -size_t AtomicHashMap:: + typename ProbeFcn, + typename KeyConvertFcn> +size_t AtomicHashMap:: size() const { size_t totalSize(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -355,9 +410,11 @@ template + typename ProbeFcn, + typename KeyConvertFcn> inline uint32_t -AtomicHashMap:: +AtomicHashMap:: encodeIndex(uint32_t subMap, uint32_t offset) { DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big if (subMap == 0) return offset; @@ -378,9 +435,11 @@ template + typename ProbeFcn, + typename KeyConvertFcn> template -struct AtomicHashMap:: +struct AtomicHashMap:: ahm_iterator : boost::iterator_facade, IterVal, boost::forward_traversal_tag> { diff --git a/folly/AtomicHashMap.h b/folly/AtomicHashMap.h index c6f06bd8..68e17e69 100644 --- a/folly/AtomicHashMap.h +++ b/folly/AtomicHashMap.h @@ -155,10 +155,11 @@ struct AtomicHashMapFullError : std::runtime_error { {} }; -template +template class AtomicHashMap : boost::noncopyable { -typedef AtomicHashArray +typedef AtomicHashArray SubMap; public: @@ -167,6 +168,7 @@ typedef AtomicHashArray typedef std::pair value_type; typedef HashFcn hasher; typedef EqualFcn key_equal; + typedef KeyConvertFcn key_convert; typedef value_type* pointer; typedef value_type& reference; typedef const value_type& const_reference; @@ -239,19 +241,44 @@ typedef AtomicHashArray * * Same contract as insert(), but performs in-place construction * of the value type using the specified arguments. + * + * Also, like find(), this method optionally allows 'key_in' to have a type + * different from that stored in the table; see find(). If and only if no + * equal key is already present, this method converts 'key_in' to a key of + * type KeyT using the provided LookupKeyToKeyFcn. */ - template - std::pair emplace(key_type k, ArgTs&&... vCtorArg); + template + std::pair emplace(LookupKeyT k, ArgTs&&... vCtorArg); /* * find -- * - * Returns an iterator into the map. + * Returns the iterator to the element if found, otherwise end(). + * + * As an optional feature, the type of the key to look up (LookupKeyT) is + * allowed to be different from the type of keys actually stored (KeyT). * - * If the key is not found, returns end(). + * This enables use cases where materializing the key is costly and usually + * redudant, e.g., canonicalizing/interning a set of strings and being able + * to look up by StringPiece. To use this feature, LookupHashFcn must take + * a LookupKeyT, and LookupEqualFcn must take KeyT and LookupKeyT as first + * and second parameter, respectively. + * + * See folly/test/ArrayHashMapTest.cpp for sample usage. */ - iterator find(key_type k); - const_iterator find(key_type k) const; + template + iterator find(LookupKeyT k); + + template + const_iterator find(LookupKeyT k) const; /* * erase -- @@ -403,10 +430,17 @@ typedef AtomicHashArray SimpleRetT() = default; }; - template - SimpleRetT insertInternal(KeyT key, ArgTs&&... value); - - SimpleRetT findInternal(const KeyT k) const; + template + SimpleRetT insertInternal(LookupKeyT key, ArgTs&&... value); + + template + SimpleRetT findInternal(const LookupKeyT k) const; SimpleRetT findAtInternal(uint32_t idx) const; diff --git a/folly/test/AtomicHashArrayTest.cpp b/folly/test/AtomicHashArrayTest.cpp index 1f83bc75..cbf68810 100644 --- a/folly/test/AtomicHashArrayTest.cpp +++ b/folly/test/AtomicHashArrayTest.cpp @@ -247,3 +247,91 @@ TEST(Aha, InsertErase_i64_str) { TEST(Aha, Create_cstr_i64) { auto obj = AtomicHashArray::create(12); } + +static bool legalKey(char* a); + +// Support two additional key lookup types (char and StringPiece) using +// one set of traits. +struct EqTraits { + bool operator()(char* a, char* b) { + return legalKey(a) && (strcmp(a, b) == 0); + } + bool operator()(char* a, const char& b) { + return legalKey(a) && (a[0] != '\0') && (a[0] == b); + } + bool operator()(char* a, const StringPiece b) { + return legalKey(a) && + (strlen(a) == b.size()) && (strncmp(a, b.begin(), b.size()) == 0); + } +}; + +struct HashTraits { + size_t operator()(char* a) { + size_t result = 0; + while (a[0] != 0) result += static_cast(*(a++)); + return result; + } + size_t operator()(const char& a) { + return static_cast(a); + } + size_t operator()(const StringPiece a) { + size_t result = 0; + for (const auto& ch : a) result += static_cast(ch); + return result; + } +}; + +// Creates malloc'ed null-terminated strings. +struct KeyConvertTraits { + char* operator()(const char& a) { + return strndup(&a, 1); + } + char* operator()(const StringPiece a) { + return strndup(a.begin(), a.size()); + } +}; + +typedef AtomicHashArray, AtomicHashArrayQuadraticProbeFcn, + KeyConvertTraits> + AHACstrInt; +AHACstrInt::Config cstrIntCfg; + +static bool legalKey(char* a) { + return a != cstrIntCfg.emptyKey && + a != cstrIntCfg.lockedKey && + a != cstrIntCfg.erasedKey; +} + +TEST(Aha, LookupAny) { + auto arr = AHACstrInt::create(12); + + arr->insert(std::make_pair(strdup("f"), 42)); + EXPECT_EQ(42, arr->find("f")->second); + { + // Look up a single char, successfully. + auto it = arr->find('f'); + EXPECT_EQ(42, it->second); + } + { + // Look up a single char, unsuccessfully. + auto it = arr->find('g'); + EXPECT_TRUE(it == arr->end()); + } + { + // Insert a new char key. + auto res = arr->emplace('h', static_cast(123)); + EXPECT_TRUE(res.second); + EXPECT_TRUE(res.first != arr->end()); + // Look up the string version. + EXPECT_EQ(123, arr->find("h")->second); + } + { + // Fail to emplace an existing key. + auto res = arr->emplace('f', static_cast(123)); + EXPECT_FALSE(res.second); + EXPECT_TRUE(res.first != arr->end()); + } + + for (auto it : *arr) free(it.first); +} diff --git a/folly/test/AtomicHashMapTest.cpp b/folly/test/AtomicHashMapTest.cpp index 63523eda..51f0b102 100644 --- a/folly/test/AtomicHashMapTest.cpp +++ b/folly/test/AtomicHashMapTest.cpp @@ -29,6 +29,7 @@ using std::vector; using std::string; using folly::AtomicHashMap; using folly::AtomicHashArray; +using folly::StringPiece; // Tunables: DEFINE_double(targetLoadFactor, 0.75, "Target memory utilization fraction."); @@ -113,6 +114,69 @@ static int genVal(int key) { return key / 3; } +static bool legalKey(const char* a); + +struct EqTraits { + bool operator()(const char* a, const char* b) { + return legalKey(a) && (strcmp(a, b) == 0); + } + bool operator()(const char* a, const char& b) { + return legalKey(a) && (a[0] != '\0') && (a[0] == b); + } + bool operator()(const char* a, const StringPiece b) { + return legalKey(a) && + (strlen(a) == b.size()) && (strcmp(a, b.begin()) == 0); + } +}; + +struct HashTraits { + size_t operator()(const char* a) { + size_t result = 0; + while (a[0] != 0) result += static_cast(*(a++)); + return result; + } + size_t operator()(const char& a) { + return static_cast(a); + } + size_t operator()(const StringPiece a) { + size_t result = 0; + for (const auto& ch : a) result += static_cast(ch); + return result; + } +}; + +typedef AtomicHashMap AHMCstrInt; +AHMCstrInt::Config cstrIntCfg; + +static bool legalKey(const char* a) { + return a != cstrIntCfg.emptyKey && + a != cstrIntCfg.lockedKey && + a != cstrIntCfg.erasedKey; +} + +TEST(Ahm, BasicLookup) { + AHMCstrInt myMap(1024, cstrIntCfg); + EXPECT_TRUE(myMap.begin() == myMap.end()); + myMap.insert(std::make_pair("f", 42)); + EXPECT_EQ(42, myMap.find("f")->second); + { + // Look up a single char, successfully. + auto it = myMap.find('f'); + EXPECT_EQ(42, it->second); + } + { + // Look up a single char, unsuccessfully. + auto it = myMap.find('g'); + EXPECT_TRUE(it == myMap.end()); + } + { + // Look up a string piece, successfully. + const StringPiece piece("f"); + auto it = myMap.find(piece); + EXPECT_EQ(42, it->second); + } +} + TEST(Ahm, grow) { VLOG(1) << "Overhead: " << sizeof(AHArrayT) << " (array) " << sizeof(AHMapT) + sizeof(AHArrayT) << " (map/set) Bytes.";