X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=folly%2FAtomicHashMap-inl.h;h=264064b7c19e5461e70f72bae1a4d6e25793966f;hb=fd915b73606e09a5f46a1bca0a5d3643a1567014;hp=f2738649c8c62b04d9f9640778df00f61e9805ca;hpb=27494a20393fa45072e7d526d358835f3abe312a;p=folly.git diff --git a/folly/AtomicHashMap-inl.h b/folly/AtomicHashMap-inl.h index f2738649..264064b7 100644 --- a/folly/AtomicHashMap-inl.h +++ b/folly/AtomicHashMap-inl.h @@ -1,5 +1,5 @@ /* - * Copyright 2012 Facebook, Inc. + * Copyright 2014 Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,20 +18,23 @@ #error "This should only be included by AtomicHashMap.h" #endif -#include "folly/detail/AtomicHashUtils.h" +#include namespace folly { -template -const typename AtomicHashMap::Config -AtomicHashMap::defaultConfig; +template +const typename AtomicHashMap::Config +AtomicHashMap::defaultConfig; // AtomicHashMap constructor -- Atomic wrapper that allows growth // This class has a lot of overhead (184 Bytes) so only use for big maps -template -AtomicHashMap:: +template +AtomicHashMap:: AtomicHashMap(size_t size, const Config& config) - : kGrowthFrac_(1.0 - config.maxLoadFactor) { + : kGrowthFrac_(config.growthFactor < 0 ? + 1.0 - config.maxLoadFactor : config.growthFactor) { CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0); subMaps_[0].store(SubMap::create(size, config).release(), std::memory_order_relaxed); @@ -43,30 +46,45 @@ AtomicHashMap(size_t size, const Config& config) } // insert -- -template -std::pair::iterator,bool> -AtomicHashMap:: -insert(const value_type& r) { - SimpleRetT ret = insertInternal(r); +template +std::pair::iterator, bool> +AtomicHashMap:: +insert(key_type k, const mapped_type& v) { + SimpleRetT ret = insertInternal(k,v); + SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); + return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), + ret.success); +} + +template +std::pair::iterator, bool> +AtomicHashMap:: +insert(key_type k, mapped_type&& v) { + SimpleRetT ret = insertInternal(k, std::move(v)); SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); return std::make_pair(iterator(this, ret.i, subMap->makeIter(ret.j)), ret.success); } // insertInternal -- Allocates new sub maps as existing ones fill up. -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: -insertInternal(const value_type& r) { +template +template +typename AtomicHashMap::SimpleRetT +AtomicHashMap:: +insertInternal(key_type key, T&& value) { beginInsertInternal: int nextMapIdx = // this maintains our state numMapsAllocated_.load(std::memory_order_acquire); - uint32_t idx = 0; typename SubMap::SimpleRetT ret; FOR_EACH_RANGE(i, 0, nextMapIdx) { // insert in each map successively. If one succeeds, we're done! SubMap* subMap = subMaps_[i].load(std::memory_order_relaxed); - ret = subMap->insertInternal(r); + ret = subMap->insertInternal(key, std::forward(value)); if (ret.idx == subMap->capacity_) { continue; //map is full, so try the next one } @@ -121,7 +139,7 @@ insertInternal(const value_type& r) { // just did a spin wait with an acquire load on numMapsAllocated_. SubMap* loadedMap = subMaps_[nextMapIdx].load(std::memory_order_relaxed); DCHECK(loadedMap && loadedMap != (SubMap*)kLockedPtr_); - ret = loadedMap->insertInternal(r); + ret = loadedMap->insertInternal(key, std::forward(value)); if (ret.idx != loadedMap->capacity_) { return SimpleRetT(nextMapIdx, ret.idx, ret.success); } @@ -131,29 +149,33 @@ insertInternal(const value_type& r) { } // find -- -template -typename AtomicHashMap::iterator -AtomicHashMap:: +template +typename AtomicHashMap::iterator +AtomicHashMap:: find(KeyT k) { SimpleRetT ret = findInternal(k); - if (ret.i >= numMapsAllocated_.load(std::memory_order_acquire)) { + if (!ret.success) { return end(); } SubMap* subMap = subMaps_[ret.i].load(std::memory_order_relaxed); return iterator(this, ret.i, subMap->makeIter(ret.j)); } -template -typename AtomicHashMap::const_iterator -AtomicHashMap:: +template +typename AtomicHashMap::const_iterator +AtomicHashMap:: find(KeyT k) const { return const_cast(this)->find(k); } // findInternal -- -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +template +typename AtomicHashMap::SimpleRetT +AtomicHashMap:: findInternal(const KeyT k) const { SubMap* const primaryMap = subMaps_[0].load(std::memory_order_relaxed); typename SubMap::SimpleRetT ret = primaryMap->findInternal(k); @@ -163,7 +185,7 @@ findInternal(const KeyT k) const { int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); FOR_EACH_RANGE(i, 1, numMaps) { // Check each map successively. If one succeeds, we're done! - SubMap* thisMap = subMaps_[i].load(std::memory_order_release); + SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed); ret = thisMap->findInternal(k); if (LIKELY(ret.idx != thisMap->capacity_)) { return SimpleRetT(i, ret.idx, ret.success); @@ -174,9 +196,10 @@ findInternal(const KeyT k) const { } // findAtInternal -- see encodeIndex() for details. -template -typename AtomicHashMap::SimpleRetT -AtomicHashMap:: +template +typename AtomicHashMap::SimpleRetT +AtomicHashMap:: findAtInternal(uint32_t idx) const { uint32_t subMapIdx, subMapOffset; if (idx & kSecondaryMapBit_) { @@ -194,9 +217,10 @@ findAtInternal(uint32_t idx) const { } // erase -- -template -typename AtomicHashMap::size_type -AtomicHashMap:: +template +typename AtomicHashMap::size_type +AtomicHashMap:: erase(const KeyT k) { int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); FOR_EACH_RANGE(i, 0, numMaps) { @@ -210,8 +234,9 @@ erase(const KeyT k) { } // capacity -- summation of capacities of all submaps -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: capacity() const { size_t totalCap(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -223,8 +248,9 @@ capacity() const { // spaceRemaining -- // number of new insertions until current submaps are all at max load -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: spaceRemaining() const { size_t spaceRem(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -240,8 +266,9 @@ spaceRemaining() const { // clear -- Wipes all keys and values from primary map and destroys // all secondary maps. Not thread safe. -template -void AtomicHashMap:: +template +void AtomicHashMap:: clear() { subMaps_[0].load(std::memory_order_relaxed)->clear(); int const numMaps = numMapsAllocated_ @@ -256,8 +283,9 @@ clear() { } // size -- -template -size_t AtomicHashMap:: +template +size_t AtomicHashMap:: size() const { size_t totalSize(0); int const numMaps = numMapsAllocated_.load(std::memory_order_acquire); @@ -285,8 +313,9 @@ size() const { // 31 1 // 27-30 which subMap // 0-26 subMap offset (index_ret input) -template -inline uint32_t AtomicHashMap:: +template +inline uint32_t AtomicHashMap:: encodeIndex(uint32_t subMap, uint32_t offset) { DCHECK_EQ(offset & kSecondaryMapBit_, 0); // offset can't be too big if (subMap == 0) return offset; @@ -302,9 +331,10 @@ encodeIndex(uint32_t subMap, uint32_t offset) { // Iterator implementation -template +template template -struct AtomicHashMap::ahm_iterator +struct AtomicHashMap::ahm_iterator : boost::iterator_facade, IterVal, boost::forward_traversal_tag>