-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
-#ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H
-#define __CDS_INTRUSIVE_SPLIT_LIST_RCU_H
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
+#define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
+
+#include <limits>
#include <cds/intrusive/details/split_list_base.h>
#include <cds/details/binary_functor_wrapper.h>
+#include <cds/details/type_padding.h>
namespace cds { namespace intrusive {
The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
the comparing functor for the type \p T and other features specific for the ordered list.
- \p Traits - set traits, default isd \p split_list::traits.
- Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
+ Instead of defining \p Traits struct you can use option-based syntax provided by \p split_list::make_traits metafunction.
- @note About reqired features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
+ @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
\par How to use
Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
protected:
//@cond
- typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
+ typedef split_list::details::rebind_list_traits<OrderedList, traits> ordered_list_adapter;
//@endcond
public:
# ifdef CDS_DOXYGEN_INVOKED
typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
# else
- typedef typename wrapped_ordered_list::result ordered_list;
+ typedef typename ordered_list_adapter::result ordered_list;
# endif
typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
typedef typename ordered_list::disposer disposer; ///< Node disposer functor
typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function
/// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
+ typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal
typedef typename traits::item_counter item_counter; ///< Item counter type
typedef typename traits::back_off back_off; ///< back-off strategy for spinning
typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
typedef typename traits::stat stat; ///< Internal statistics
+ // GC and OrderedList::gc must be the same
+ static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+ // atomicity::empty_item_counter is not allowed as a item counter
+ static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
+ "cds::atomicity::empty_item_counter is not allowed as a item counter");
+
protected:
+ //@cond
typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
typedef split_list::node<list_node_type> node_type; ///< split-list node type
- typedef node_type dummy_node_type; ///< dummy node type
/// Split-list node traits
/**
This traits is intended for converting between underlying ordered list node type \ref list_node_type
and split-list node type \ref node_type
*/
- typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
+ typedef typename ordered_list_adapter::node_traits node_traits;
- //@cond
/// Bucket table implementation
typedef typename split_list::details::bucket_table_selector<
traits::dynamic_bucket_table
, gc
- , dummy_node_type
+ , typename ordered_list_adapter::aux_node
, opt::allocator< typename traits::allocator >
, opt::memory_model< memory_model >
+ , opt::free_list< typename traits::free_list >
>::type bucket_table;
+ typedef typename bucket_table::aux_node_type aux_node_type; ///< auxiliary node type
+
//@endcond
protected:
class ordered_list_wrapper: public ordered_list
{
typedef ordered_list base_class;
- typedef typename base_class::auxiliary_head bucket_head_type;
+ typedef typename base_class::auxiliary_head bucket_head_type;
public:
- bool insert_at( dummy_node_type * pHead, value_type& val )
+ bool insert_at( aux_node_type * pHead, value_type& val )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Func>
- bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
+ bool insert_at( aux_node_type * pHead, value_type& val, Func f )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Func>
- std::pair<bool, bool> ensure_at( dummy_node_type * pHead, value_type& val, Func func )
+ std::pair<bool, bool> update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
- return base_class::ensure_at( h, val, func );
+ return base_class::update_at( h, val, func, bAllowInsert );
}
- bool unlink_at( dummy_node_type * pHead, value_type& val )
+ bool unlink_at( aux_node_type * pHead, value_type& val )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Q, typename Compare, typename Func>
- bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
+ bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Q, typename Compare>
- bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
+ bool erase_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Q, typename Compare>
- value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
+ value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Q, typename Compare, typename Func>
- bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f ) const
+ bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Q, typename Compare>
- bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp ) const
+ bool find_at( aux_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
}
template <typename Q, typename Compare>
- value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp ) const
+ raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
{
assert( pHead != nullptr );
bucket_head_type h(pHead);
return base_class::get_at( h, val, cmp );
}
- bool insert_aux_node( dummy_node_type * pNode )
+ bool insert_aux_node( aux_node_type * pNode )
{
return base_class::insert_aux_node( pNode );
}
- bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode )
+ bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode )
{
bucket_head_type h(pHead);
return base_class::insert_aux_node( h, pNode );
};
//@endcond
- protected:
- ordered_list_wrapper m_List; ///< Ordered list containing split-list items
- bucket_table m_Buckets; ///< bucket table
- atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- stat m_Stat; ///< Internal stattistics accumulator
-
- protected:
- //@cond
- typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator;
-
- dummy_node_type * alloc_dummy_node( size_t nHash )
- {
- m_Stat.onHeadNodeAllocated();
- return dummy_node_allocator().New( nHash );
- }
- void free_dummy_node( dummy_node_type * p )
- {
- dummy_node_allocator().Delete( p );
- m_Stat.onHeadNodeFreed();
- }
-
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( Q const& key ) const
- {
- return m_HashFunctor( key );
- }
-
- size_t bucket_no( size_t nHash ) const
- {
- return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
- }
-
- static size_t parent_bucket( size_t nBucket )
- {
- assert( nBucket > 0 );
- return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
- }
-
- dummy_node_type * init_bucket( size_t nBucket )
- {
- assert( nBucket > 0 );
- size_t nParent = parent_bucket( nBucket );
-
- dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
- if ( pParentBucket == nullptr ) {
- pParentBucket = init_bucket( nParent );
- m_Stat.onRecursiveInitBucket();
- }
-
- assert( pParentBucket != nullptr );
-
- // Allocate a dummy node for new bucket
- {
- dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) );
- if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
- m_Buckets.bucket( nBucket, pBucket );
- m_Stat.onNewBucket();
- return pBucket;
- }
- free_dummy_node( pBucket );
- }
-
- // Another thread set the bucket. Wait while it done
-
- // In this point, we must wait while nBucket is empty.
- // The compiler can decide that waiting loop can be "optimized" (stripped)
- // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
- //
- m_Stat.onBucketInitContenton();
- back_off bkoff;
- while ( true ) {
- dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
- if ( p != nullptr )
- return const_cast<dummy_node_type *>( p );
- bkoff();
- m_Stat.onBusyWaitBucketInit();
- }
- }
-
- dummy_node_type * get_bucket( size_t nHash )
- {
- size_t nBucket = bucket_no( nHash );
-
- dummy_node_type * pHead = m_Buckets.bucket( nBucket );
- if ( pHead == nullptr )
- pHead = init_bucket( nBucket );
-
- assert( pHead->is_dummy() );
-
- return pHead;
- }
-
- void init()
- {
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
-
- // atomicity::empty_item_counter is not allowed as a item counter
- static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
- "cds::atomicity::empty_item_counter is not allowed as a item counter");
-
- // Initialize bucket 0
- dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ );
-
- // insert_aux_node cannot return false for empty list
- CDS_VERIFY( m_List.insert_aux_node( pNode ));
-
- m_Buckets.bucket( 0, pNode );
- }
-
- void inc_item_count()
- {
- size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
- if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
- {
- m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
- }
- }
-
- template <typename Q, typename Compare, typename Func>
- bool find_( Q& val, Compare cmp, Func f )
- {
- size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
- dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != nullptr );
-
- return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
- [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
- }
-
- template <typename Q, typename Compare>
- bool find_value( Q const& val, Compare cmp )
- {
- size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
- dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != nullptr );
-
- return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
- }
-
- template <typename Q, typename Compare>
- value_type * get_( Q const& val, Compare cmp )
- {
- size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
- dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != nullptr );
-
- value_type * p = m_List.get_at( pHead, sv, cmp );
- m_Stat.onFind( p != nullptr );
- return p;
- }
-
- template <typename Q, typename Compare>
- value_type * extract_( Q const& val, Compare cmp )
- {
- size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
- dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != nullptr );
-
- value_type * pNode = m_List.extract_at( pHead, sv, cmp );
- if ( pNode ) {
- --m_ItemCounter;
- m_Stat.onExtractSuccess();
- }
- else
- m_Stat.onExtractFailed();
- return pNode;
- }
-
- template <typename Q, typename Less>
- value_type * extract_with_( Q const& val, Less pred )
- {
- return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
- }
-
- template <typename Q, typename Compare>
- bool erase_( const Q& val, Compare cmp )
- {
- size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
- dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != nullptr );
-
- if ( m_List.erase_at( pHead, sv, cmp ) ) {
- --m_ItemCounter;
- m_Stat.onEraseSuccess();
- return true;
- }
- m_Stat.onEraseFailed();
- return false;
- }
-
- template <typename Q, typename Compare, typename Func>
- bool erase_( Q const& val, Compare cmp, Func f )
- {
- size_t nHash = hash_value( val );
- split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
- dummy_node_type * pHead = get_bucket( nHash );
- assert( pHead != nullptr );
-
- if ( m_List.erase_at( pHead, sv, cmp, f )) {
- --m_ItemCounter;
- m_Stat.onEraseSuccess();
- return true;
- }
- m_Stat.onEraseFailed();
- return false;
- }
-
- //@endcond
-
public:
/// Initialize split-ordered list of default capacity
/**
*/
SplitListSet()
: m_nBucketCountLog2(1)
+ , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()))
{
init();
}
)
: m_Buckets( nItemCount, nLoadFactor )
, m_nBucketCountLog2(1)
+ , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()))
{
init();
}
+ /// Destroys split-list
+ ~SplitListSet()
+ {
+ m_List.clear();
+ gc::force_dispose();
+ }
+
public:
/// Inserts new node
/**
bool insert( value_type& val )
{
size_t nHash = hash_value( val );
- dummy_node_type * pHead = get_bucket( nHash );
+ aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
+ node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash<bit_reversal>( nHash );
if ( m_List.insert_at( pHead, val )) {
inc_item_count();
bool insert( value_type& val, Func f )
{
size_t nHash = hash_value( val );
- dummy_node_type * pHead = get_bucket( nHash );
+ aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
+ node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash<bit_reversal>( nHash );
if ( m_List.insert_at( pHead, val, f )) {
inc_item_count();
return false;
}
- /// Ensures that the \p val exists in the set
+ /// Updates the node
/**
The operation performs inserting or changing data with lock-free manner.
- If the item \p val is not found in the set, then \p val is inserted into the set.
+ If the item \p val is not found in the set, then \p val is inserted
+ iff \p bAllowInsert is \p true.
Otherwise, the functor \p func is called with item found.
The functor signature is:
\code
with arguments:
- \p bNew - \p true if the item has been inserted, \p false otherwise
- \p item - item of the set
- - \p val - argument \p val passed into the \p ensure function
+ - \p val - argument \p val passed into the \p update() function
If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
- refers to the same thing.
+ refers to the same stuff.
- The function makes RCU lock internally.
+ The functor may change non-key fields of the \p item.
+
+ The function applies RCU lock internally.
- Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
\p second is \p true if new item has been added or \p false if the item with \p key
- already is in the set.
+ already is in the list.
@warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
\ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
synchronization.
- */
+ */
template <typename Func>
- std::pair<bool, bool> ensure( value_type& val, Func func )
+ std::pair<bool, bool> update( value_type& val, Func func, bool bAllowInsert = true )
{
size_t nHash = hash_value( val );
- dummy_node_type * pHead = get_bucket( nHash );
+ aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
+ node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash<bit_reversal>( nHash );
- std::pair<bool, bool> bRet = m_List.ensure_at( pHead, val, func );
+ std::pair<bool, bool> bRet = m_List.update_at( pHead, val, func, bAllowInsert );
if ( bRet.first && bRet.second ) {
inc_item_count();
- m_Stat.onEnsureNew();
+ m_Stat.onUpdateNew();
}
else
- m_Stat.onEnsureExist();
+ m_Stat.onUpdateExist();
return bRet;
}
+ //@cond
+ template <typename Func>
+ CDS_DEPRECATED("ensure() is deprecated, use update()")
+ std::pair<bool, bool> ensure( value_type& val, Func func )
+ {
+ return update( val, func, true );
+ }
+ //@endcond
/// Unlinks the item \p val from the set
/**
bool unlink( value_type& val )
{
size_t nHash = hash_value( val );
- dummy_node_type * pHead = get_bucket( nHash );
+ aux_node_type * pHead = get_bucket( nHash );
assert( pHead != nullptr );
- if ( m_List.unlink_at( pHead, val ) ) {
+ if ( m_List.unlink_at( pHead, val )) {
--m_ItemCounter;
m_Stat.onEraseSuccess();
return true;
template <typename Q>
bool erase( Q const& key )
{
- return erase_( key, key_comparator() );
+ return erase_( key, key_comparator());
}
/// Deletes the item from the set using \p pred for searching
template <typename Q, typename Less>
bool erase_with( Q const& key, Less pred )
{
- return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
+ CDS_UNUSED( pred );
+ return erase_( key, typename ordered_list_adapter::template make_compare_from_less<Less>());
}
/// Deletes the item from the set
template <typename Q, typename Less, typename Func>
bool erase_with( Q const& key, Less pred, Func f )
{
- return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
+ CDS_UNUSED( pred );
+ return erase_( key, typename ordered_list_adapter::template make_compare_from_less<Less>(), f );
}
/// Extracts an item from the set
unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
- @note The function does NOT call RCU read-side lock or synchronization,
- and does NOT dispose the item found. It just excludes the item from the set
- and returns a pointer to item found.
- You should lock RCU before calling of the function, and you should synchronize RCU
- outside the RCU lock before reusing returned pointer.
+ Depends on \p bucket_type you should or should not lock RCU before calling of this function:
+ - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
+ - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
+ See ordered list implementation for details.
\code
typedef cds::urcu::gc< general_buffered<> > rcu;
// ...
rcu_splitlist_set::exempt_ptr p;
- {
- // first, we should lock RCU
- rcu_splitlist_set::rcu_lock lock;
-
- // Now, you can apply extract function
- // Note that you must not delete the item found inside the RCU lock
- p = theList.extract( 10 );
- if ( p ) {
- // do something with p
- ...
- }
+
+ // For MichaelList we should not lock RCU
+
+ // Now, you can apply extract function
+ // Note that you must not delete the item found inside the RCU lock
+ p = theList.extract( 10 );
+ if ( p ) {
+ // do something with p
+ ...
}
// We may safely release p here
template <typename Q>
exempt_ptr extract( Q const& key )
{
- return exempt_ptr(extract_( key, key_comparator() ));
+ return exempt_ptr(extract_( key, key_comparator()));
}
/// Extracts an item from the set using \p pred for searching
/**
- The function is an analog of \ref cds_intrusive_SplitListSet_rcu_extract "extract(exempt_ptr&, Q const&)"
- but \p pred is used for key compare.
+ The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
\p Less functor has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the set.
*/
{
return find_( key, key_comparator(), f );
}
+ //@cond
+ template <typename Q, typename Func>
+ bool find( Q const& key, Func f )
+ {
+ return find_( key, key_comparator(), f );
+ }
+ //@endcond
/// Finds the key \p key with \p pred predicate for comparing
/**
template <typename Q, typename Less, typename Func>
bool find_with( Q& key, Less pred, Func f )
{
- return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
+ CDS_UNUSED( pred );
+ return find_( key, typename ordered_list_adapter::template make_compare_from_less<Less>(), f );
}
+ //@cond
+ template <typename Q, typename Less, typename Func>
+ bool find_with( Q const& key, Less pred, Func f )
+ {
+ CDS_UNUSED( pred );
+ return find_( key, typename ordered_list_adapter::template make_compare_from_less<Less>(), f );
+ }
+ //@endcond
-
- /// Finds the key \p key
- /** \anchor cds_intrusive_SplitListSet_rcu_find_val
+ /// Checks whether the set contains \p key
+ /**
The function searches the item with key equal to \p key
- and returns \p true if \p key found or \p false otherwise.
+ and returns \p true if it is found, and \p false otherwise.
+
+ Note the hash functor specified for class \p Traits template parameter
+ should accept a parameter of type \p Q that can be not the same as \p value_type.
+ Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing.
*/
template <typename Q>
+ bool contains( Q const& key )
+ {
+ return find_value( key, key_comparator());
+ }
+ //@cond
+ template <typename Q>
+ CDS_DEPRECATED("deprecated, use contains()")
bool find( Q const& key )
{
- return find_value( key, key_comparator() );
+ return contains( key );
}
+ //@endcond
- /// Finds the key \p key with \p pred predicate for comparing
+ /// Checks whether the set contains \p key using \p pred predicate for searching
/**
- The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)"
- but \p cmp is used for key compare.
- \p Less has the interface like \p std::less.
- \p pred must imply the same element order as the comparator used for building the set.
+ The function is an analog of <tt>contains( key )</tt> but \p pred is used for key comparing.
+ \p Less functor has the interface like \p std::less.
+ \p Less must imply the same element order as the comparator used for building the list.
*/
template <typename Q, typename Less>
+ bool contains( Q const& key, Less pred )
+ {
+ CDS_UNUSED( pred );
+ return find_value( key, typename ordered_list_adapter::template make_compare_from_less<Less>());
+ }
+ //@cond
+ template <typename Q, typename Less>
+ CDS_DEPRECATED("deprecated, use contains()")
bool find_with( Q const& key, Less pred )
{
- return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
+ return contains( key, pred );
}
+ //@endcond
/// Finds the key \p key and return the item found
/** \anchor cds_intrusive_SplitListSet_rcu_get
RCU should be locked before call of this function.
Returned item is valid only while RCU is locked:
\code
- cds::intrusive::SplitListSet< your_template_parameters > theSet;
+ typedef cds::intrusive::SplitListSet< your_template_parameters > set_class;
+ set_class theSet;
// ...
+ typename set_class::raw_ptr rp;
{
// Lock RCU
hash_set::rcu_lock lock;
- foo * pVal = theSet.get( 5 );
- if ( pVal ) {
- // Deal with pVal
+ rp = theSet.get( 5 );
+ if ( rp ) {
+ // Deal with rp
//...
}
// Unlock RCU by rcu_lock destructor
- // pVal can be retired by disposer at any time after RCU has been unlocked
+ // rp can be retired by disposer at any time after RCU has been unlocked
}
\endcode
*/
template <typename Q>
- value_type * get( Q const& key )
+ raw_ptr get( Q const& key )
{
- return get_( key, key_comparator() );
+ return get_( key, key_comparator());
}
/// Finds the key \p key and return the item found
\p pred must imply the same element order as the comparator used for building the set.
*/
template <typename Q, typename Less>
- value_type * get_with( Q const& key, Less pred )
+ raw_ptr get_with( Q const& key, Less pred )
{
- return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
+ CDS_UNUSED( pred );
+ return get_( key, typename ordered_list_adapter::template make_compare_from_less<Less>());
}
void clear()
{
iterator it = begin();
- while ( it != end() ) {
+ while ( it != end()) {
iterator i(it);
++i;
unlink( *it );
return m_Stat;
}
+ /// Returns internal statistics for \p OrderedList
+ typename OrderedList::stat const& list_statistics() const
+ {
+ return m_List.statistics();
+ }
+
protected:
//@cond
template <bool IsConst>
{}
};
//@endcond
+
public:
+ ///@name Forward iterators (thread-safe under RCU lock)
+ //@{
/// Forward iterator
/**
The forward iterator for a split-list has some features:
- it has no post-increment operator
- it depends on iterator of underlying \p OrderedList
- - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data.
- - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent
- deleting operations it is no guarantee that you iterate all item in the split-list.
- Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container
- for debug purpose only.
+ You may safely use iterators in multi-threaded environment only under RCU lock.
+ Otherwise, a crash is possible if another thread deletes the element the iterator points to.
*/
typedef iterator_type<false> iterator;
+
/// Const forward iterator
/**
For iterator's features and requirements see \ref iterator
*/
iterator begin()
{
- return iterator( m_List.begin(), m_List.end() );
+ return iterator( m_List.begin(), m_List.end());
}
/// Returns an iterator that addresses the location succeeding the last element in a split-list
*/
iterator end()
{
- return iterator( m_List.end(), m_List.end() );
+ return iterator( m_List.end(), m_List.end());
}
/// Returns a forward const iterator addressing the first element in a split-list
/// Returns a forward const iterator addressing the first element in a split-list
const_iterator cbegin() const
{
- return const_iterator( m_List.cbegin(), m_List.cend() );
+ return const_iterator( m_List.cbegin(), m_List.cend());
}
/// Returns an const iterator that addresses the location succeeding the last element in a split-list
/// Returns an const iterator that addresses the location succeeding the last element in a split-list
const_iterator cend() const
{
- return const_iterator( m_List.cend(), m_List.cend() );
+ return const_iterator( m_List.cend(), m_List.cend());
+ }
+ //@}
+
+ protected:
+ //@cond
+ aux_node_type * alloc_aux_node( size_t nHash )
+ {
+ m_Stat.onHeadNodeAllocated();
+ aux_node_type* p = m_Buckets.alloc_aux_node();
+ if ( p )
+ p->m_nHash = nHash;
+ return p;
+ }
+
+ void free_aux_node( aux_node_type * p )
+ {
+ m_Buckets.free_aux_node( p );
+ m_Stat.onHeadNodeFreed();
+ }
+
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key );
+ }
+
+ size_t bucket_no( size_t nHash ) const
+ {
+ return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
+ }
+
+ static size_t parent_bucket( size_t nBucket )
+ {
+ assert( nBucket > 0 );
+ return nBucket & ~( 1 << bitop::MSBnz( nBucket ));
+ }
+
+ aux_node_type * init_bucket( size_t const nBucket )
+ {
+ assert( nBucket > 0 );
+ size_t nParent = parent_bucket( nBucket );
+
+ aux_node_type * pParentBucket = m_Buckets.bucket( nParent );
+ if ( pParentBucket == nullptr ) {
+ pParentBucket = init_bucket( nParent );
+ m_Stat.onRecursiveInitBucket();
+ }
+
+ assert( pParentBucket != nullptr );
+
+ // Allocate an aux node for new bucket
+ aux_node_type * pBucket = m_Buckets.bucket( nBucket );
+
+ back_off bkoff;
+ for ( ;; pBucket = m_Buckets.bucket( nBucket )) {
+ if ( pBucket )
+ return pBucket;
+
+ pBucket = alloc_aux_node( split_list::dummy_hash<bit_reversal>( nBucket ));
+ if ( pBucket ) {
+ if ( m_List.insert_aux_node( pParentBucket, pBucket )) {
+ m_Buckets.bucket( nBucket, pBucket );
+ m_Stat.onNewBucket();
+ return pBucket;
+ }
+
+ // Another thread set the bucket. Wait while it done
+ free_aux_node( pBucket );
+ m_Stat.onBucketInitContenton();
+ break;
+ }
+
+ // There are no free buckets. It means that the bucket table is full
+ // Wait while another thread set the bucket or a free bucket will be available
+ m_Stat.onBucketsExhausted();
+ bkoff();
+ }
+
+ // Another thread set the bucket. Wait while it done
+ for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) {
+ bkoff();
+ m_Stat.onBusyWaitBucketInit();
+ }
+
+ return pBucket;
+ }
+
+ aux_node_type * get_bucket( size_t nHash )
+ {
+ size_t nBucket = bucket_no( nHash );
+
+ aux_node_type * pHead = m_Buckets.bucket( nBucket );
+ if ( pHead == nullptr )
+ pHead = init_bucket( nBucket );
+
+ assert( pHead->is_dummy());
+
+ return pHead;
+ }
+
+ void init()
+ {
+ // Initialize bucket 0
+ aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash<bit_reversal>(0)*/ );
+
+ // insert_aux_node cannot return false for empty list
+ CDS_VERIFY( m_List.insert_aux_node( pNode ));
+
+ m_Buckets.bucket( 0, pNode );
+ }
+
+ static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
+ {
+ return nBucketCount * nLoadFactor;
+ }
+
+ void inc_item_count()
+ {
+ size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
+ if ( ++m_ItemCounter <= nMaxCount )
+ return;
+
+ size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
+ const size_t nBucketCount = static_cast<size_t>(1) << sz;
+ if ( nBucketCount < m_Buckets.capacity()) {
+ // we may grow the bucket table
+ const size_t nLoadFactor = m_Buckets.load_factor();
+ if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
+ return; // someone already have updated m_nBucketCountLog2, so stop here
+
+ m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
+ memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+ m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+ }
+ else
+ m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
+ }
+
+ template <typename Q, typename Compare, typename Func>
+ bool find_( Q& val, Compare cmp, Func f )
+ {
+ size_t nHash = hash_value( val );
+ split_list::details::search_value_type<Q> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
+ aux_node_type * pHead = get_bucket( nHash );
+ assert( pHead != nullptr );
+
+ return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
+ [&f](value_type& item, split_list::details::search_value_type<Q>& v){ f(item, v.val ); }));
+ }
+
+ template <typename Q, typename Compare>
+ bool find_value( Q const& val, Compare cmp )
+ {
+ size_t nHash = hash_value( val );
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
+ aux_node_type * pHead = get_bucket( nHash );
+ assert( pHead != nullptr );
+
+ return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
}
+ template <typename Q, typename Compare>
+ raw_ptr get_( Q const& val, Compare cmp )
+ {
+ size_t nHash = hash_value( val );
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
+ aux_node_type * pHead = get_bucket( nHash );
+ assert( pHead != nullptr );
+
+ raw_ptr p = m_List.get_at( pHead, sv, cmp );
+ m_Stat.onFind( !!p );
+ return p;
+ }
+
+ template <typename Q, typename Compare>
+ value_type * extract_( Q const& val, Compare cmp )
+ {
+ size_t nHash = hash_value( val );
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
+ aux_node_type * pHead = get_bucket( nHash );
+ assert( pHead != nullptr );
+
+ value_type * pNode = m_List.extract_at( pHead, sv, cmp );
+ if ( pNode ) {
+ --m_ItemCounter;
+ m_Stat.onExtractSuccess();
+ }
+ else
+ m_Stat.onExtractFailed();
+ return pNode;
+ }
+
+ template <typename Q, typename Less>
+ value_type * extract_with_( Q const& val, Less pred )
+ {
+ CDS_UNUSED( pred );
+ return extract_( val, typename ordered_list_adapter::template make_compare_from_less<Less>());
+ }
+
+ template <typename Q, typename Compare>
+ bool erase_( const Q& val, Compare cmp )
+ {
+ size_t nHash = hash_value( val );
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
+ aux_node_type * pHead = get_bucket( nHash );
+ assert( pHead != nullptr );
+
+ if ( m_List.erase_at( pHead, sv, cmp )) {
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
+ return true;
+ }
+ m_Stat.onEraseFailed();
+ return false;
+ }
+
+ template <typename Q, typename Compare, typename Func>
+ bool erase_( Q const& val, Compare cmp, Func f )
+ {
+ size_t nHash = hash_value( val );
+ split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash<bit_reversal>( nHash ));
+ aux_node_type * pHead = get_bucket( nHash );
+ assert( pHead != nullptr );
+
+ if ( m_List.erase_at( pHead, sv, cmp, f )) {
+ --m_ItemCounter;
+ m_Stat.onEraseSuccess();
+ return true;
+ }
+ m_Stat.onEraseFailed();
+ return false;
+ }
+ //@endcond
+
+ protected:
+ //@cond
+ static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value;
+
+ typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table;
+ padded_bucket_table m_Buckets; ///< bucket table
+
+ typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list;
+ padded_ordered_list m_List; ///< Ordered list containing split-list items
+
+ atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
+ atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
+ hash m_HashFunctor; ///< Hash functor
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics accumulator
+ //@endcond
};
}} // namespace cds::intrusive
-#endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H
+#endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H