typedef typename base_class::stat stat; ///< Internal statistics
static CDS_CONSTEXPR bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false)
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef LazyKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
public:
typedef cds::urcu::gc<RCU> gc; ///< Garbage collector
+ typedef Traits traits; ///< List traits
#ifdef CDS_DOXYGEN_INVOKED
typedef Key key_type ; ///< Key type
typedef Value mapped_type ; ///< Type of value stored in the list
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef LazyKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
typedef typename base_class::stat stat; ///< Internal statistics
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef MichaelKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename maker::intrusive_traits::compare intrusive_key_comparator;
typedef typename base_class::atomic_node_ptr head_type;
- //@endcond
-
- protected:
- //@cond
- template <typename K>
- static node_type * alloc_node(const K& key)
- {
- return cxx_allocator().New( key );
- }
-
- template <typename K, typename V>
- static node_type * alloc_node( const K& key, const V& val )
- {
- return cxx_allocator().New( key, val );
- }
-
- template <typename K, typename... Args>
- static node_type * alloc_node( K&& key, Args&&... args )
- {
- return cxx_allocator().MoveNew( std::forward<K>(key), std::forward<Args>(args)... );
- }
-
- static void free_node( node_type * pNode )
- {
- cxx_allocator().Delete( pNode );
- }
struct node_disposer {
void operator()( node_type * pNode )
}
};
typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
-
- head_type& head()
- {
- return base_class::m_pHead;
- }
-
- head_type const& head() const
- {
- return base_class::m_pHead;
- }
//@endcond
protected:
}
//@}
- protected:
- //@cond
- iterator node_to_iterator( node_type * pNode )
- {
- if ( pNode )
- return iterator( *pNode );
- return end();
- }
- //@endcond
-
public:
/// Default constructor
/**
{
return base_class::find_at( refHead, key, cmp );
}
+
+ template <typename K>
+ static node_type * alloc_node( const K& key )
+ {
+ return cxx_allocator().New( key );
+ }
+
+ template <typename K, typename V>
+ static node_type * alloc_node( const K& key, const V& val )
+ {
+ return cxx_allocator().New( key, val );
+ }
+
+ template <typename K, typename... Args>
+ static node_type * alloc_node( K&& key, Args&&... args )
+ {
+ return cxx_allocator().MoveNew( std::forward<K>( key ), std::forward<Args>( args )... );
+ }
+
+ static void free_node( node_type * pNode )
+ {
+ cxx_allocator().Delete( pNode );
+ }
+
+ head_type& head()
+ {
+ return base_class::m_pHead;
+ }
+
+ head_type const& head() const
+ {
+ return base_class::m_pHead;
+ }
+
+ iterator node_to_iterator( node_type * pNode )
+ {
+ if ( pNode )
+ return iterator( *pNode );
+ return end();
+ }
//@endcond
};
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef MichaelKVList<
+ gc
+ , key_type, mapped_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename maker::intrusive_traits::compare intrusive_key_comparator;
typedef typename base_class::atomic_node_ptr head_type;
+
+ struct node_disposer {
+ void operator()( node_type * pNode )
+ {
+ free_node( pNode );
+ }
+ };
+ typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
//@endcond
public:
/// Result of \p get(), \p get_with() functions - pointer to the node found
typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr;
- protected:
- //@cond
- template <typename K>
- static node_type * alloc_node(const K& key)
- {
- return cxx_allocator().New( key );
- }
-
- template <typename K, typename V>
- static node_type * alloc_node( const K& key, const V& val )
- {
- return cxx_allocator().New( key, val );
- }
-
- template <typename K, typename... Args>
- static node_type * alloc_node( K&& key, Args&&... args )
- {
- return cxx_allocator().MoveNew( std::forward<K>(key), std::forward<Args>(args)...);
- }
-
- static void free_node( node_type * pNode )
- {
- cxx_allocator().Delete( pNode );
- }
-
- struct node_disposer {
- void operator()( node_type * pNode )
- {
- free_node( pNode );
- }
- };
- typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
-
- head_type& head()
- {
- return base_class::m_pHead;
- }
-
- head_type& head() const
- {
- return const_cast<head_type&>( base_class::m_pHead );
- }
- //@endcond
protected:
//@cond
return raw_ptr( base_class::get_at( refHead, val, cmp ));
}
+ template <typename K>
+ static node_type * alloc_node( const K& key )
+ {
+ return cxx_allocator().New( key );
+ }
+
+ template <typename K, typename V>
+ static node_type * alloc_node( const K& key, const V& val )
+ {
+ return cxx_allocator().New( key, val );
+ }
+
+ template <typename K, typename... Args>
+ static node_type * alloc_node( K&& key, Args&&... args )
+ {
+ return cxx_allocator().MoveNew( std::forward<K>( key ), std::forward<Args>( args )... );
+ }
+
+ static void free_node( node_type * pNode )
+ {
+ cxx_allocator().Delete( pNode );
+ }
+
+ head_type& head()
+ {
+ return base_class::m_pHead;
+ }
+
+ head_type& head() const
+ {
+ return const_cast<head_type&>(base_class::m_pHead);
+ }
//@endcond
};
typedef typename traits::allocator allocator; ///< Bucket table allocator
typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+ /// Guarded pointer - a result of \p get() and \p extract() functions
+ typedef typename ordered_list::guarded_ptr guarded_ptr;
+#endif
/// Hash functor for \ref key_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
static CDS_CONSTEXPR const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; ///< Count of hazard pointer required
-#ifdef CDS_DOXYGEN_INVOKED
- /// Wrapped internal statistics for \p ordered_list
- typedef implementatin_specific bucket_stat;
-#else
+ //@cond
typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
-#endif
-#ifdef CDS_DOXYGEN_INVOKED
- /// Internal bucket type - rebind \p ordered_list with empty item counter and wrapped internal statistics
- typedef modified_ordered_list internal_bucket_type;
-#else
typedef typename ordered_list::template rebind_traits<
cds::opt::item_counter< cds::atomicity::empty_item_counter >
, cds::opt::stat< typename bucket_stat::wrapped_stat >
>::type internal_bucket_type;
-#endif
- /// Guarded pointer - a result of \p get() and \p extract() functions
typedef typename internal_bucket_type::guarded_ptr guarded_ptr;
-
- //@cond
- /// Bucket table allocator
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ typedef typename bucket_stat::stat stat;
//@endcond
protected:
//@cond
const size_t m_nHashBitmask;
- internal_bucket_type * m_Buckets; ///< bucket table
+ internal_bucket_type* m_Buckets; ///< bucket table
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H
{
public:
typedef cds::gc::nogc gc; ///< No garbage collector
- typedef OrderedList bucket_type; ///< type of ordered list used as a bucket implementation
+ typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
typedef Traits traits; ///< Map traits
- typedef typename bucket_type::key_type key_type; ///< key type
- typedef typename bucket_type::mapped_type mapped_type; ///< type of value to be stored in the map
- typedef typename bucket_type::value_type value_type; ///< Pair used as the some functor's argument
+ typedef typename ordered_list::key_type key_type; ///< key type
+ typedef typename ordered_list::mapped_type mapped_type; ///< type of value to be stored in the map
+ typedef typename ordered_list::value_type value_type; ///< Pair used as the some functor's argument
- typedef typename bucket_type::key_comparator key_comparator; ///< key comparing functor
+ typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
/// Hash functor for \ref key_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
- typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
+
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "cds::atomicity::empty_item_counter is not allowed as a item counter");
protected:
//@cond
- typedef typename bucket_type::iterator bucket_iterator;
- typedef typename bucket_type::const_iterator bucket_const_iterator;
- //@endcond
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
- private:
+ /// Bucket table allocator
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+
+ typedef typename internal_bucket_type::iterator bucket_iterator;
+ typedef typename internal_bucket_type::const_iterator bucket_const_iterator;
+ //@endcond
+
+ public:
//@cond
- const size_t m_nHashBitmask;
+ typedef typename bucket_stat::stat stat;
//@endcond
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename K>
- size_t hash_value( K const & key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
-
- /// Returns the bucket (ordered list) for \p key
- template <typename K>
- bucket_type& bucket( K const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ const size_t m_nHashBitmask;
+ item_counter m_ItemCounter; ///< Item counter
+ hash m_HashFunctor; ///< Hash functor
+ internal_bucket_type* m_Buckets; ///< bucket table
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
//@cond
template <bool IsConst>
- class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst >
+ class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst >
{
- typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class;
+ typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class;
friend class MichaelHashMap;
protected:
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initialize the map
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash set
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count()))
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash set and destroys it
~MichaelHashMap()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node with key and default value
template <typename K>
iterator insert( const K& key )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.insert( key );
if ( it != refBucket.end() ) {
template <typename K, typename V>
iterator insert( K const& key, V const& val )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.insert( key, val );
if ( it != refBucket.end() ) {
template <typename K, typename Func>
iterator insert_with( const K& key, Func func )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.insert_with( key, func );
if ( it != refBucket.end() ) {
template <typename K, typename... Args>
iterator emplace( K&& key, Args&&... args )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.emplace( std::forward<K>(key), std::forward<Args>(args)... );
if ( it != refBucket.end() ) {
template <typename K>
std::pair<iterator, bool> update( const K& key, bool bAllowInsert = true )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
std::pair<bucket_iterator, bool> ret = refBucket.update( key, bAllowInsert );
if ( ret.second )
template <typename K>
iterator contains( K const& key )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.contains( key );
if ( it != refBucket.end() )
template <typename K, typename Less>
iterator contains( K const& key, Less pred )
{
- bucket_type& refBucket = bucket( key );
+ internal_bucket_type& refBucket = bucket( key );
bucket_iterator it = refBucket.contains( key, pred );
if ( it != refBucket.end() )
return m_ItemCounter;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
/// Returns the size of hash table
/**
Since \p %MichaelHashMap cannot dynamically extend the hash table size,
{
return m_nHashBitmask + 1;
}
+
+ protected:
+ //@cond
+ /// Calculates hash value of \p key
+ template <typename K>
+ size_t hash_value( K const & key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename K>
+ internal_bucket_type& bucket( K const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
+
+ private:
+ //@cond
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+ //@endcond
};
}} // namespace cds::container
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H
{
public:
typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector
- typedef OrderedList bucket_type; ///< type of ordered list used as a bucket implementation
+ typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation
typedef Traits traits; ///< Map traits
- typedef typename bucket_type::key_type key_type ; ///< key type
- typedef typename bucket_type::mapped_type mapped_type ; ///< value type
- typedef typename bucket_type::value_type value_type ; ///< key/value pair stored in the list
- typedef typename bucket_type::key_comparator key_comparator ; ///< key comparison functor
+ typedef typename ordered_list::key_type key_type; ///< key type
+ typedef typename ordered_list::mapped_type mapped_type; ///< value type
+ typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the list
+ typedef typename ordered_list::key_comparator key_comparator;///< key comparison functor
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+ typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ /// Type of \p get() member function return value
+ typedef typename ordered_list::raw_ptr raw_ptr;
+ typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
+#endif
/// Hash functor for \ref key_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- /// Bucket table allocator
- typedef cds::details::Allocator< bucket_type, typename traits::allocator > bucket_table_allocator;
-
- typedef typename bucket_type::rcu_lock rcu_lock; ///< RCU scoped lock
- typedef typename bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
/// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
- static CDS_CONSTEXPR const bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal;
- /// Type of \p get() member function return value
- typedef typename bucket_type::raw_ptr raw_ptr;
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
+
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert(!std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
+ "cds::atomicity::empty_item_counter is not allowed as a item counter");
protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- bucket_type * m_Buckets; ///< bucket table
+ //@cond
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
- private:
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type;
+
+ /// Bucket table allocator
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
+
+ public:
//@cond
- const size_t m_nHashBitmask;
+ typedef typename bucket_stat::stat stat;
+ typedef typename internal_bucket_type::exempt_ptr exempt_ptr;
+ typedef typename internal_bucket_type::raw_ptr raw_ptr;
+ typedef typename internal_bucket_type::rcu_lock rcu_lock;
//@endcond
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( Q const& key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
-
- /// Returns the bucket (ordered list) for \p key
- template <typename Q>
- bucket_type& bucket( Q const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
- template <typename Q>
- bucket_type const& bucket( Q const& key ) const
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ const size_t m_nHashBitmask;
+ item_counter m_ItemCounter; ///< Item counter
+ hash m_HashFunctor; ///< Hash functor
+ internal_bucket_type * m_Buckets; ///< bucket table
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
//@cond
template <bool IsConst>
- class iterator_type: private cds::intrusive::michael_set::details::iterator< bucket_type, IsConst >
+ class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst >
{
- typedef cds::intrusive::michael_set::details::iterator< bucket_type, IsConst > base_class;
+ typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class;
friend class MichaelHashMap;
protected:
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initializes the map
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash map
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count()))
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
- "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash map and destroys it
~MichaelHashMap()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
}
/// Inserts new node with key and default value
If the item is not found the function return an empty \p exempt_ptr.
The function just excludes the key from the map and returns a pointer to item found.
- Depends on \p bucket_type you should or should not lock RCU before calling of this function:
+ Depends on \p ordered_list you should or should not lock RCU before calling of this function:
- for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked
- for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked
See ordered list implementation for details.
/** \anchor cds_nonintrusive_MichaelHashMap_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
If \p key is not found it returns \p nullptr.
- Note the type of returned value depends on underlying \p bucket_type.
+ Note the type of returned value depends on underlying \p ordered_list.
For details, see documentation of ordered list you use.
Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type.
return m_ItemCounter;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
/// Returns the size of hash table
/**
Since \p %MichaelHashMap cannot dynamically extend the hash table size,
{
return m_nHashBitmask + 1;
}
+
+ protected:
+ //@cond
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename Q>
+ internal_bucket_type& bucket( Q const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ template <typename Q>
+ internal_bucket_type const& bucket( Q const& key ) const
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ //@endcond
+ private:
+ //@cond
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+ //@endcond
};
}} // namespace cds::container
typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
"cds::atomicity::empty_item_counter is not allowed as a item counter");
-#ifdef CDS_DOXYGEN_INVOKED
- /// Wrapped internal statistics for \p ordered_list
- typedef implementatin_specific bucket_stat;
-#else
+ //@cond
typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
-#endif
-#ifdef CDS_DOXYGEN_INVOKED
- /// Internal bucket type - rebind \p ordered_list with empty item counter and wrapped internal statistics
- typedef modified_ordered_list internal_bucket_type;
-#else
typedef typename ordered_list::template rebind_traits<
cds::opt::item_counter< cds::atomicity::empty_item_counter >
, cds::opt::stat< typename bucket_stat::wrapped_stat >
>::type internal_bucket_type;
-#endif
- /// Guarded pointer - a result of \p get() and \p extract() functions
- typedef typename internal_bucket_type::guarded_ptr guarded_ptr;
-
- //@cond
/// Bucket table allocator
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+
+ typedef typename bucket_stat::stat stat;
//@endcond
+ /// Guarded pointer - a result of \p get() and \p extract() functions
+ typedef typename internal_bucket_type::guarded_ptr guarded_ptr;
+
protected:
//@cond
size_t const m_nHashBitmask;
internal_bucket_type * m_Buckets; ///< bucket table
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H
typedef typename ordered_list::value_type value_type; ///< type of value stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename internal_bucket_type::const_iterator bucket_const_iterator;
//@endcond
+ public:
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
protected:
//@cond
const size_t m_nHashBitmask;
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
internal_bucket_type* m_Buckets; ///< bucket table
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
- typedef typename ordered_list::stat stat; ///< Internal statistics
+#ifdef CDS_DOXYGEN_INVOKED
+ typedef typename ordered_list::stat stat; ///< Internal statistics
+ typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ typedef typename ordered_list::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
"atomicity::empty_item_counter is not allowed as a item counter");
-#ifdef CDS_DOXYGEN_INVOKED
- /// Wrapped internal statistics for \p ordered_list
- typedef implementatin_specific bucket_stat;
-
- /// Internal bucket type - rebind \p ordered_list with empty item counter and wrapped internal statistics
- typedef modified_ordered_list internal_bucket_type;
-#else
+ //@cond
typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
typedef typename ordered_list::template rebind_traits<
using base_class::insert_node;
using base_class::node_to_value;
};
-#endif
- typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
- typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+ typedef typename internal_bucket_type::exempt_ptr exempt_ptr;
+ typedef typename internal_bucket_type::raw_ptr raw_ptr;
+ typedef typename bucket_stat::stat stat;
+ //@endcond
protected:
//@cond
item_counter m_ItemCounter; ///< Item counter
hash m_HashFunctor; ///< Hash functor
internal_bucket_type* m_Buckets; ///< bucket table
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
typedef typename ordered_list::value_type value_type ; ///< type of value to be stored in the set
typedef typename ordered_list::key_comparator key_comparator ; ///< key comparing functor
typedef typename ordered_list::disposer disposer ; ///< Node disposer functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat ; ///< Internal statistics
+#endif
/// Hash functor for \p value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
>::type internal_bucket_type;
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
- hash m_HashFunctor; ///< Hash functor
- size_t const m_nHashBitmask;
- internal_bucket_type* m_Buckets; ///< bucket table
- item_counter m_ItemCounter; ///< Item counter
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ public:
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
+ protected:
+ //@cond
+ hash m_HashFunctor; ///< Hash functor
+ size_t const m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the set
typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \p value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
>::type internal_bucket_type;
typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
+ //@endcond
- hash m_HashFunctor; ///< Hash functor
- const size_t m_nHashBitmask;
- internal_bucket_type * m_Buckets; ///< bucket table
- item_counter m_ItemCounter; ///< Item counter
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ public:
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
+ protected:
+ //@cond
+ hash m_HashFunctor; ///< Hash functor
+ const size_t m_nHashBitmask;
+ internal_bucket_type * m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
protected:
typedef typename ordered_list::value_type value_type; ///< type of value stored in the list
typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
typedef typename ordered_list::disposer disposer; ///< Node disposer functor
+#ifdef CDS_DOXYGEN_INVOKED
typedef typename ordered_list::stat stat; ///< Internal statistics
+#endif
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+ //@cond
+ typedef typename bucket_stat::stat stat;
+ //@endcond
+
private:
//@cond
- hash m_HashFunctor; ///< Hash functor
- size_t const m_nHashBitmask;
- internal_bucket_type* m_Buckets; ///< bucket table
- item_counter m_ItemCounter; ///< Item counter
- typename bucket_stat::stat m_Stat; ///< Internal statistics
+ hash m_HashFunctor; ///< Hash functor
+ size_t const m_nHashBitmask;
+ internal_bucket_type* m_Buckets; ///< bucket table
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
//@endcond
public:
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelIterableSet_DHP, wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelIterableSet_HP, wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_DHP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_DHP, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_DHP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_HP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_HP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_NoGC, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_NoGC, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelLazySet_NoGC, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_DHP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_DHP, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_DHP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_HP, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_HP, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_HP, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_NoGC, base_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_NoGC, member_cmp )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( IntrusiveMichaelSet_NoGC, member_wrapped_stat )
set_type s( kSize, 2 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelLazySet, base_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelLazySet, member_cmp )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelLazySet, member_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelSet, base_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelSet, member_cmp )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( IntrusiveMichaelSet, member_wrapped_stat )
set_type s( TestFixture::kSize, 2 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelIterableMap_DHP, wrapped_stat )
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelIterableMap_HP, wrapped_stat )
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
map_type m( kSize, 2 );
test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazyMap_DHP, wrapped_stat )
map_type m( kSize, 2 );
test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
map_type m( kSize, 2 );
test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazyMap_HP, wrapped_stat )
map_type m( kSize, 2 );
test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_nogc.h"
typedef hash1 hash;
typedef cds::atomicity::item_counter item_counter;
};
- typedef cc::MichaelHashMap< gc_type, list_type, map_traits >map_type;
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ }
+
+ TEST_F( MichaelLazyMap_NoGC, stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelLazyMap_NoGC, wrapped_stat )
+ {
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
map_type m( kSize, 4 );
test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelMap_DHP, wrapped_stat )
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelMap_HP, wrapped_stat )
};
typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
- map_type s( kSize, 8 );
- test( s );
+ map_type m( kSize, 8 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
}
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "test_map_nogc.h"
test( m );
}
+ TEST_F( MichaelMap_NoGC, stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TEST_F( MichaelMap_NoGC, wrapped_stat )
+ {
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef base_class::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList< gc_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< gc_type, list_type, map_traits > map_type;
+
+ map_type m( kSize, 4 );
+ test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TEST_MICHAEL_LAZY_RCU_H
#define CDSUNIT_MAP_TEST_MICHAEL_LAZY_RCU_H
this->test( m );
}
+ TYPED_TEST_P( MichaelLazyMap, stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct set_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, set_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TYPED_TEST_P( MichaelLazyMap, wrapped_stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct set_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, set_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( MichaelLazyMap,
- compare, less, cmpmix, backoff, seq_cst, mutex
+ compare, less, cmpmix, backoff, seq_cst, mutex, stat, wrapped_stat
);
} // namespace
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_MAP_TEST_MICHAEL_MICHAEL_RCU_H
#define CDSUNIT_MAP_TEST_MICHAEL_MICHAEL_RCU_H
this->test( m );
}
+ TYPED_TEST_P( MichaelMap, stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, map_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
+
+ TYPED_TEST_P( MichaelMap, wrapped_stat )
+ {
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::key_type key_type;
+ typedef typename TestFixture::value_type value_type;
+
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelKVList< rcu_type, key_type, value_type, list_traits > list_type;
+
+ struct map_traits: public cc::michael_map::traits
+ {
+ typedef typename TestFixture::hash1 hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashMap< rcu_type, list_type, map_traits >map_type;
+
+ map_type m( TestFixture::kSize, 4 );
+ this->test( m );
+ EXPECT_GE( m.statistics().m_nInsertSuccess, 0 );
+ }
REGISTER_TYPED_TEST_CASE_P( MichaelMap,
- compare, less, cmpmix, backoff, seq_cst
+ compare, less, cmpmix, backoff, seq_cst, stat, wrapped_stat
);
}
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelIterableSet_DHP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelIterableSet_HP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazySet_DHP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazySet_HP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelLazySet_NoGC, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelSet_DHP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelSet_HP, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TEST_F( MichaelSet_NoGC, wrapped_stat )
set_type s( kSize, 4 );
test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
} // namespace
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( MichaelLazySet, wrapped_stat )
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
TYPED_TEST_P( MichaelSet, wrapped_stat )
set_type s( TestFixture::kSize, 4 );
this->test( s );
+ EXPECT_GE( s.statistics().m_nInsertSuccess, 0 );
}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as