typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef LazyList<
+ gc
+ , value_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename maker::intrusive_traits::compare intrusive_key_comparator;
typedef typename base_class::node_type head_type;
- //@endcond
-
- public:
- using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; ///< pointer to extracted node
- /// Type of \p get() member function return value
- typedef value_type * raw_ptr;
-
- protected:
- //@cond
- static value_type& node_to_value( node_type& n )
- {
- return n.m_Value;
- }
-
- static value_type const& node_to_value( node_type const& n )
- {
- return n.m_Value;
- }
-
- template <typename Q>
- static node_type * alloc_node( Q const& v )
- {
- return cxx_allocator().New( v );
- }
-
- template <typename... Args>
- static node_type * alloc_node( Args&&... args )
- {
- return cxx_allocator().MoveNew( std::forward<Args>(args)... );
- }
-
- static void free_node( node_type * pNode )
- {
- cxx_allocator().Delete( pNode );
- }
struct node_disposer {
void operator()( node_type * pNode )
}
};
typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
-
- head_type& head()
- {
- return base_class::m_Head;
- }
-
- head_type& head() const
- {
- return const_cast<head_type&>( base_class::m_Head );
- }
-
- head_type& tail()
- {
- return base_class::m_Tail;
- }
-
- head_type const& tail() const
- {
- return base_class::m_Tail;
- }
//@endcond
+ public:
+ using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; ///< pointer to extracted node
+ /// Type of \p get() member function return value
+ typedef value_type * raw_ptr;
+
protected:
- //@cond
+ //@cond
template <bool IsConst>
class iterator_type: protected base_class::template iterator_type<IsConst>
{
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename Q>
- bool insert( Q const& val )
+ bool insert( Q&& val )
{
- return insert_at( head(), val );
+ return insert_at( head(), std::forward<Q>( val ));
}
/// Inserts new node
The function makes RCU lock internally.
*/
template <typename Q, typename Func>
- bool insert( Q const& key, Func func )
+ bool insert( Q&& key, Func func )
{
- return insert_at( head(), key, func );
+ return insert_at( head(), std::forward<Q>( key ), func );
}
/// Inserts data of type \p value_type constructed from \p args
}
template <typename Q>
- bool insert_at( head_type& refHead, Q const& val )
+ bool insert_at( head_type& refHead, Q&& val )
{
- return insert_node_at( refHead, alloc_node( val ));
+ return insert_node_at( refHead, alloc_node( std::forward<Q>( val )));
}
template <typename... Args>
}
template <typename Q, typename Func>
- bool insert_at( head_type& refHead, Q const& key, Func f )
+ bool insert_at( head_type& refHead, Q&& key, Func f )
{
- scoped_node_ptr pNode( alloc_node( key ));
+ scoped_node_ptr pNode( alloc_node( std::forward<Q>( key )));
if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node_to_value(node) ); } )) {
pNode.release();
return pNode ? &pNode->m_Value : nullptr;
}
+ static value_type& node_to_value( node_type& n )
+ {
+ return n.m_Value;
+ }
+
+ static value_type const& node_to_value( node_type const& n )
+ {
+ return n.m_Value;
+ }
+
+ template <typename Q>
+ static node_type * alloc_node( Q&& v )
+ {
+ return cxx_allocator().New( std::forward<Q>( v ));
+ }
+
+ template <typename... Args>
+ static node_type * alloc_node( Args&&... args )
+ {
+ return cxx_allocator().MoveNew( std::forward<Args>( args )... );
+ }
+
+ static void free_node( node_type * pNode )
+ {
+ cxx_allocator().Delete( pNode );
+ }
+
+ head_type& head()
+ {
+ return base_class::m_Head;
+ }
+
+ head_type& head() const
+ {
+ return const_cast<head_type&>(base_class::m_Head);
+ }
+
+ head_type& tail()
+ {
+ return base_class::m_Tail;
+ }
+
+ head_type const& tail() const
+ {
+ return base_class::m_Tail;
+ }
//@endcond
};
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking
+ //@cond
+ // Rebind traits (split-list support)
+ template <typename... Options>
+ struct rebind_traits {
+ typedef MichaelList<
+ gc
+ , value_type
+ , typename cds::opt::make_options< traits, Options...>::type
+ > type;
+ };
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >;
+ //@endcond
+
protected:
//@cond
typedef typename base_class::value_type node_type;
typedef typename maker::intrusive_traits::compare intrusive_key_comparator;
typedef typename base_class::atomic_node_ptr head_type;
- //@endcond
- public:
- using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; ///< pointer to extracted node
+ struct node_disposer {
+ void operator()( node_type * pNode )
+ {
+ free_node( pNode );
+ }
+ };
+ typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
+ //@endcond
private:
//@cond
//@endcond
public:
+ ///< pointer to extracted node
+ using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >;
+
/// Result of \p get(), \p get_with() functions - pointer to the node found
typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr;
protected:
//@cond
- static value_type& node_to_value( node_type& n )
- {
- return n.m_Value;
- }
- static value_type const& node_to_value( node_type const& n )
- {
- return n.m_Value;
- }
-
- template <typename Q>
- static node_type * alloc_node( Q const& v )
- {
- return cxx_allocator().New( v );
- }
-
- template <typename... Args>
- static node_type * alloc_node( Args&&... args )
- {
- return cxx_allocator().MoveNew( std::forward<Args>(args)... );
- }
-
- static void free_node( node_type * pNode )
- {
- cxx_allocator().Delete( pNode );
- }
-
- struct node_disposer {
- void operator()( node_type * pNode )
- {
- free_node( pNode );
- }
- };
- typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr;
-
- head_type& head()
- {
- return base_class::m_pHead;
- }
-
- head_type& head() const
- {
- return const_cast<head_type&>( base_class::m_pHead );
- }
- //@endcond
-
- protected:
- //@cond
template <bool IsConst>
class iterator_type: protected base_class::template iterator_type<IsConst>
{
Returns \p true if inserting successful, \p false otherwise.
*/
template <typename Q>
- bool insert( Q const& val )
+ bool insert( Q&& val )
{
- return insert_at( head(), val );
+ return insert_at( head(), std::forward<Q>( val ));
}
/// Inserts new node
@warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
*/
template <typename Q, typename Func>
- bool insert( Q const& key, Func func )
+ bool insert( Q&& key, Func func )
{
- return insert_at( head(), key, func );
+ return insert_at( head(), std::forward<Q>( key ), func );
}
/// Updates data by \p key
}
template <typename Q>
- bool insert_at( head_type& refHead, Q const& val )
+ bool insert_at( head_type& refHead, Q&& val )
{
- return insert_node_at( refHead, alloc_node( val ));
+ return insert_node_at( refHead, alloc_node( std::forward<Q>( val )));
}
template <typename Q, typename Func>
- bool insert_at( head_type& refHead, Q const& key, Func f )
+ bool insert_at( head_type& refHead, Q&& key, Func f )
{
- scoped_node_ptr pNode( alloc_node( key ));
+ scoped_node_ptr pNode( alloc_node( std::forward<Q>( key )));
if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { f( node_to_value(node) ); } )) {
pNode.release();
return raw_ptr( base_class::get_at( refHead, val, cmp ));
}
+ static value_type& node_to_value( node_type& n )
+ {
+ return n.m_Value;
+ }
+ static value_type const& node_to_value( node_type const& n )
+ {
+ return n.m_Value;
+ }
+
+ template <typename Q>
+ static node_type * alloc_node( Q&& v )
+ {
+ return cxx_allocator().New( std::forward<Q>( v ));
+ }
+
+ template <typename... Args>
+ static node_type * alloc_node( Args&&... args )
+ {
+ return cxx_allocator().MoveNew( std::forward<Args>( args )... );
+ }
+
+ static void free_node( node_type * pNode )
+ {
+ cxx_allocator().Delete( pNode );
+ }
+
+ head_type& head()
+ {
+ return base_class::m_pHead;
+ }
+
+ head_type& head() const
+ {
+ return const_cast<head_type&>(base_class::m_pHead);
+ }
//@endcond
};
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_CONTAINER_MICHAEL_SET_RCU_H
{
public:
typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector
- typedef OrderedList bucket_type; ///< type of ordered list to be used as a bucket implementation
+ typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket implementation
typedef Traits traits; ///< Set traits
- typedef typename bucket_type::value_type value_type; ///< type of value to be stored in the list
- typedef typename bucket_type::key_comparator key_comparator; ///< key comparing functor
+ typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list
+ typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor
+ typedef typename ordered_list::stat stat; ///< Internal statistics
/// Hash functor for \ref value_type and all its derivatives that you use
typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
- typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::item_counter item_counter; ///< Item counter type
+ typedef typename traits::allocator allocator; ///< Bucket table allocator
- typedef typename bucket_type::rcu_lock rcu_lock; ///< RCU scoped lock
- typedef typename bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
- typedef typename bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
+ typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
/// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
- static CDS_CONSTEXPR const bool c_bExtractLockExternal = bucket_type::c_bExtractLockExternal;
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
- protected:
- //@cond
- class internal_bucket_type: public bucket_type
+ // GC and OrderedList::gc must be the same
+ static_assert(std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
+
+ static_assert(!std::is_same<item_counter, atomicity::empty_item_counter>::value,
+ "atomicity::empty_item_counter is not allowed as a item counter");
+
+#ifdef CDS_DOXYGEN_INVOKED
+ /// Wrapped internal statistics for \p ordered_list
+ typedef implementatin_specific bucket_stat;
+
+ /// Internal bucket type - rebind \p ordered_list with empty item counter and wrapped internal statistics
+ typedef modified_ordered_list internal_bucket_type;
+#else
+ typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat;
+
+ typedef typename ordered_list::template rebind_traits<
+ cds::opt::item_counter< cds::atomicity::empty_item_counter >
+ , cds::opt::stat< typename bucket_stat::wrapped_stat >
+ >::type internal_bucket_type_;
+
+ class internal_bucket_type: public internal_bucket_type_
{
- typedef bucket_type base_class;
+ typedef internal_bucket_type_ base_class;
public:
+ using base_class::base_class;
using base_class::node_type;
using base_class::alloc_node;
using base_class::insert_node;
using base_class::node_to_value;
};
+#endif
- /// Bucket table allocator
- typedef cds::details::Allocator< internal_bucket_type, typename traits::allocator > bucket_table_allocator;
-
- //@endcond
-
- protected:
- item_counter m_ItemCounter; ///< Item counter
- hash m_HashFunctor; ///< Hash functor
- internal_bucket_type * m_Buckets; ///< bucket table
-
- private:
- //@cond
- const size_t m_nHashBitmask;
- //@endcond
+ typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node
+ typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives
protected:
//@cond
- /// Calculates hash value of \p key
- template <typename Q>
- size_t hash_value( Q const& key ) const
- {
- return m_HashFunctor( key ) & m_nHashBitmask;
- }
+ /// Bucket table allocator
+ typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator;
- /// Returns the bucket (ordered list) for \p key
- template <typename Q>
- internal_bucket_type& bucket( Q const& key )
- {
- return m_Buckets[ hash_value( key ) ];
- }
- template <typename Q>
- internal_bucket_type const& bucket( Q const& key ) const
- {
- return m_Buckets[ hash_value( key ) ];
- }
+ const size_t m_nHashBitmask;
+ item_counter m_ItemCounter; ///< Item counter
+ hash m_HashFunctor; ///< Hash functor
+ internal_bucket_type* m_Buckets; ///< bucket table
+ typename bucket_stat::stat m_Stat; ///< Internal statistics
//@endcond
+
public:
///@name Forward iterators (thread-safe under RCU lock)
//@{
};
\endcode
*/
- typedef michael_set::details::iterator< bucket_type, false > iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, false > iterator;
/// Const forward iterator
- typedef michael_set::details::iterator< bucket_type, true > const_iterator;
+ typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator;
/// Returns a forward iterator addressing the first element in a set
/**
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
- }
- //@endcond
-
public:
/// Initialize hash set
/**
size_t nMaxItemCount, ///< estimation of max item count in the hash set
size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket
) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor ))
+ , m_Buckets( bucket_table_allocator().allocate( bucket_count() ) )
{
- // GC and OrderedList::gc must be the same
- static_assert( std::is_same<gc, typename bucket_type::gc>::value, "GC and OrderedList::gc must be the same");
-
- static_assert( !std::is_same<item_counter, atomicity::empty_item_counter>::value,
- "atomicity::empty_item_counter is not allowed as a item counter");
-
- m_Buckets = bucket_table_allocator().NewArray( bucket_count() );
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ construct_bucket<bucket_stat>( it );
}
/// Clears hash set and destroys it
~MichaelHashSet()
{
clear();
- bucket_table_allocator().Delete( m_Buckets, bucket_count() );
+
+ for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it )
+ it->~internal_bucket_type();
+ bucket_table_allocator().deallocate( m_Buckets, bucket_count() );
+
}
/// Inserts new node
Returns \p true if \p val is inserted into the set, \p false otherwise.
*/
template <typename Q>
- bool insert( Q const& val )
+ bool insert( Q&& val )
{
- const bool bRet = bucket( val ).insert( val );
+ const bool bRet = bucket( val ).insert( std::forward<Q>( val ));
if ( bRet )
++m_ItemCounter;
return bRet;
synchronization.
*/
template <typename Q, typename Func>
- bool insert( Q const& val, Func f )
+ bool insert( Q&& val, Func f )
{
- const bool bRet = bucket( val ).insert( val, f );
+ const bool bRet = bucket( val ).insert( std::forward<Q>( val ), f );
if ( bRet )
++m_ItemCounter;
return bRet;
}
-
/// Updates the element
/**
The operation performs inserting or changing data with lock-free manner.
synchronization.
*/
template <typename Q, typename Func>
- std::pair<bool, bool> update( const Q& val, Func func, bool bAllowInsert = true )
+ std::pair<bool, bool> update( Q const& val, Func func, bool bAllowInsert = true )
{
std::pair<bool, bool> bRet = bucket( val ).update( val, func, bAllowInsert );
if ( bRet.second )
If the item with the key equal to \p key is not found the function return an empty \p exempt_ptr.
The function just excludes the item from the set and returns a pointer to item found.
- Depends on \p bucket_type you should or should not lock RCU before calling of this function:
+ Depends on \p ordered_list you should or should not lock RCU before calling of this function:
- for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked
- for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked
See ordered list implementation for details.
/** \anchor cds_nonintrusive_MichaelHashSet_rcu_get
The function searches the item with key equal to \p key and returns the pointer to item found.
If \p key is not found it returns \p nullptr.
- Note the type of returned value depends on underlying \p bucket_type.
+ Note the type of returned value depends on underlying \p ordered_list.
For details, see documentation of ordered list you use.
Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
return m_ItemCounter;
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
/// Returns the size of hash table
/**
Since \p %MichaelHashSet cannot dynamically extend the hash table size,
{
return m_nHashBitmask + 1;
}
+
+ protected:
+ //@cond
+ /// Calculates hash value of \p key
+ template <typename Q>
+ size_t hash_value( Q const& key ) const
+ {
+ return m_HashFunctor( key ) & m_nHashBitmask;
+ }
+
+ /// Returns the bucket (ordered list) for \p key
+ template <typename Q>
+ internal_bucket_type& bucket( Q const& key )
+ {
+ return m_Buckets[hash_value( key )];
+ }
+ template <typename Q>
+ internal_bucket_type const& bucket( Q const& key ) const
+ {
+ return m_Buckets[hash_value( key )];
+ }
+
+ template <typename Stat>
+ typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type;
+ }
+
+ template <typename Stat>
+ typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bucket )
+ {
+ new (bucket) internal_bucket_type( m_Stat );
+ }
+
+ const_iterator get_const_begin() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count() );
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( const_cast<internal_bucket_type const&>(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count() );
+ }
+ //@endcond
};
}} // namespace cds::container
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_DETAILS_ALLOCATOR_H
#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H
#define CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H
-#include <mutex> // unique_lock
+#include <mutex> // unique_lock
#include <cds/intrusive/details/lazy_list_base.h>
#include <cds/urcu/details/check_deadlock.h>
#include <cds/details/binary_functor_wrapper.h>
using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >;
//@endcond
- protected:
- //@cond
- typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer
- typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support)
- //@endcond
-
protected:
node_type m_Head; ///< List head (dummy node)
node_type m_Tail; ///< List tail (dummy node)
mutable stat m_Stat; ///< Internal statistics
//@cond
+ typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer
+ typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support)
/// Position pointer for item search
struct position {
typedef std::unique_lock< position > scoped_position_lock;
typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> deadlock_policy;
- //@endcond
-
- protected:
- //@cond
- static void clear_links( node_type * pNode )
- {
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
- }
struct clear_and_dispose {
void operator()( value_type * p )
disposer()( p );
}
};
-
- static void dispose_node( node_type * pNode )
- {
- assert( pNode );
- assert( !gc::is_locked());
-
- gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ));
- }
-
- static void link_node( node_type * pNode, node_type * pPred, node_type * pCur )
- {
- assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur );
- link_checker::is_empty( pNode );
-
- pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_relaxed );
- pPred->m_pNext.store( marked_node_ptr(pNode), memory_model::memory_order_release );
- }
-
- void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead )
- {
- assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur );
- assert( pCur != &m_Tail );
-
- node_type * pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr();
- pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ); // logical deletion + back-link for search
- pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release); // physically deleting
- }
-
//@endcond
public:
}
//@}
- private:
- //@cond
- const_iterator get_const_begin() const
- {
- const_iterator it( const_cast<node_type *>( &m_Head ));
- ++it ; // skip dummy head
- return it;
- }
- const_iterator get_const_end() const
- {
- return const_iterator( const_cast<node_type *>( &m_Tail ));
- }
- //@endcond
-
public:
/// Default constructor initializes empty list
LazyList()
protected:
//@cond
+ static void clear_links( node_type * pNode )
+ {
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ }
+
+ static void dispose_node( node_type * pNode )
+ {
+ assert( pNode );
+ assert( !gc::is_locked() );
+
+ gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ) );
+ }
+
+ static void link_node( node_type * pNode, node_type * pPred, node_type * pCur )
+ {
+ assert( pPred->m_pNext.load( memory_model::memory_order_relaxed ).ptr() == pCur );
+ link_checker::is_empty( pNode );
+
+ pNode->m_pNext.store( marked_node_ptr( pCur ), memory_model::memory_order_relaxed );
+ pPred->m_pNext.store( marked_node_ptr( pNode ), memory_model::memory_order_release );
+ }
+
+ void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead )
+ {
+ assert( pPred->m_pNext.load( memory_model::memory_order_relaxed ).ptr() == pCur );
+ assert( pCur != &m_Tail );
+
+ node_type * pNext = pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr();
+ pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ); // logical deletion + back-link for search
+ pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release ); // physically deleting
+ }
+
// split-list support
bool insert_aux_node( node_type * pNode )
{
return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
}
//@endcond
+
+ private:
+ //@cond
+ const_iterator get_const_begin() const
+ {
+ const_iterator it( const_cast<node_type *>(&m_Head) );
+ ++it; // skip dummy head
+ return it;
+ }
+ const_iterator get_const_end() const
+ {
+ return const_iterator( const_cast<node_type *>(&m_Tail) );
+ }
+ //@endcond
};
}} // namespace cds::intrusive
typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
- static void clear_links( node_type * pNode )
- {
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release );
- pNode->m_pDelChain = nullptr;
- }
-
struct clear_and_dispose {
void operator()( value_type * p )
{
}
};
- static void dispose_node( node_type * pNode )
- {
- assert( pNode );
- assert( !gc::is_locked() );
-
- gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ) );
- }
-
- static void dispose_chain( node_type * pChain )
- {
- if ( pChain ) {
- assert( !gc::is_locked() );
-
- auto f = [&pChain]() -> cds::urcu::retired_ptr {
- node_type * p = pChain;
- if ( p ) {
- pChain = p->m_pDelChain;
- return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ));
- }
- return cds::urcu::make_retired_ptr<clear_and_dispose>( static_cast<value_type *>(nullptr));
- };
- gc::batch_retire(std::ref(f));
- }
- }
-
/// Position pointer for item search
struct position {
atomic_node_ptr * pPrev ; ///< Previous node
dispose_chain( pDelChain );
}
};
-
//@endcond
public:
/// Result of \p get(), \p get_with() functions - pointer to the node found
typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
- protected:
- //@cond
-
- bool link_node( node_type * pNode, position& pos )
- {
- assert( pNode != nullptr );
- link_checker::is_empty( pNode );
-
- marked_node_ptr p( pos.pCur );
- pNode->m_pNext.store( p, memory_model::memory_order_release );
- if ( cds_likely( pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )))
- return true;
-
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
- return false;
- }
-
- static void link_to_remove_chain( position& pos, node_type * pDel )
- {
- assert( pDel->m_pDelChain == nullptr );
-
- pDel->m_pDelChain = pos.pDelChain;
- pos.pDelChain = pDel;
- }
-
- bool unlink_node( position& pos, erase_node_mask nMask )
- {
- assert(gc::is_locked() );
-
- // Mark the node (logical deletion)
- marked_node_ptr next(pos.pNext, 0);
-
- if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
-
- // Try physical removal - fast path
- marked_node_ptr cur(pos.pCur);
- if ( cds_likely( pos.pPrev->compare_exchange_strong(cur, marked_node_ptr(pos.pNext), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
- if ( nMask == erase_mask )
- link_to_remove_chain( pos, pos.pCur );
- }
- else {
- // Slow path
- search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator() );
- }
- return true;
- }
- return false;
- }
- //@endcond
-
protected:
//@cond
template <bool IsConst>
{
return m_Stat;
}
+
protected:
//@cond
+ static void clear_links( node_type * pNode )
+ {
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release );
+ pNode->m_pDelChain = nullptr;
+ }
+
+ static void dispose_node( node_type * pNode )
+ {
+ assert( pNode );
+ assert( !gc::is_locked() );
+
+ gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ) );
+ }
+
+ static void dispose_chain( node_type * pChain )
+ {
+ if ( pChain ) {
+ assert( !gc::is_locked() );
+
+ auto f = [&pChain]() -> cds::urcu::retired_ptr {
+ node_type * p = pChain;
+ if ( p ) {
+ pChain = p->m_pDelChain;
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ) );
+ }
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( static_cast<value_type *>(nullptr) );
+ };
+ gc::batch_retire( std::ref( f ) );
+ }
+ }
+
+ bool link_node( node_type * pNode, position& pos )
+ {
+ assert( pNode != nullptr );
+ link_checker::is_empty( pNode );
+
+ marked_node_ptr p( pos.pCur );
+ pNode->m_pNext.store( p, memory_model::memory_order_release );
+ if ( cds_likely( pos.pPrev->compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) )
+ return true;
+
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ return false;
+ }
+
+ static void link_to_remove_chain( position& pos, node_type * pDel )
+ {
+ assert( pDel->m_pDelChain == nullptr );
+
+ pDel->m_pDelChain = pos.pDelChain;
+ pos.pDelChain = pDel;
+ }
+
+ bool unlink_node( position& pos, erase_node_mask nMask )
+ {
+ assert( gc::is_locked() );
+
+ // Mark the node (logical deletion)
+ marked_node_ptr next( pos.pNext, 0 );
+
+ if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) ) {
+
+ // Try physical removal - fast path
+ marked_node_ptr cur( pos.pCur );
+ if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) ) {
+ if ( nMask == erase_mask )
+ link_to_remove_chain( pos, pos.pCur );
+ }
+ else {
+ // Slow path
+ search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator() );
+ }
+ return true;
+ }
+ return false;
+ }
+
// split-list support
bool insert_aux_node( node_type * pNode )
{
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
<PreprocessorDefinitions>_ENABLE_ATOMIC_ALIGNMENT_FIX;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
<AdditionalIncludeDirectories>$(SolutionDir)..\..\..;$(GTEST_ROOT)/include;$(SolutionDir)..\..\..\test\include;$(BOOST_PATH);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
<AdditionalOptions>/bigobj %(AdditionalOptions)</AdditionalOptions>\r
+ <DisableSpecificWarnings>4503</DisableSpecificWarnings>\r
</ClCompile>\r
<Link>\r
<SubSystem>Console</SubSystem>\r
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_SET_TEST_MICHAEL_LAZY_RCU_H
#define CDSUNIT_SET_TEST_MICHAEL_LAZY_RCU_H
this->test( s );
}
+TYPED_TEST_P( MichaelLazySet, stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::int_item int_item;
+
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::stat<> stat;
+ };
+ typedef cc::LazyList< rcu_type, int_item, list_traits > list_type;
+
+ struct set_traits: public cc::michael_set::traits
+ {
+ typedef typename TestFixture::hash_int hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashSet< rcu_type, list_type, set_traits >set_type;
+
+ set_type s( TestFixture::kSize, 4 );
+ this->test( s );
+}
+
+TYPED_TEST_P( MichaelLazySet, wrapped_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::int_item int_item;
+
+ struct list_traits: public cc::lazy_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::lazy_list::wrapped_stat<> stat;
+ };
+ typedef cc::LazyList< rcu_type, int_item, list_traits > list_type;
+
+ struct set_traits: public cc::michael_set::traits
+ {
+ typedef typename TestFixture::hash_int hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashSet< rcu_type, list_type, set_traits >set_type;
+
+ set_type s( TestFixture::kSize, 4 );
+ this->test( s );
+}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( MichaelLazySet,
- compare, less, cmpmix, item_counting, backoff, seq_cst, mutex
+ compare, less, cmpmix, item_counting, backoff, seq_cst, mutex, stat, wrapped_stat
);
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSUNIT_SET_TEST_MICHAEL_MICHAEL_RCU_H
#define CDSUNIT_SET_TEST_MICHAEL_MICHAEL_RCU_H
this->test( s );
}
+TYPED_TEST_P( MichaelSet, stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::int_item int_item;
+
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::stat<> stat;
+ };
+ typedef cc::MichaelList< rcu_type, int_item, list_traits > list_type;
+
+ struct set_traits: public cc::michael_set::traits
+ {
+ typedef typename TestFixture::hash_int hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashSet< rcu_type, list_type, set_traits >set_type;
+
+ set_type s( TestFixture::kSize, 4 );
+ this->test( s );
+}
+
+TYPED_TEST_P( MichaelSet, wrapped_stat )
+{
+ typedef typename TestFixture::rcu_type rcu_type;
+ typedef typename TestFixture::int_item int_item;
+
+ struct list_traits: public cc::michael_list::traits
+ {
+ typedef typename TestFixture::less less;
+ typedef cds::backoff::pause back_off;
+ typedef cc::michael_list::wrapped_stat<> stat;
+ };
+ typedef cc::MichaelList< rcu_type, int_item, list_traits > list_type;
+
+ struct set_traits: public cc::michael_set::traits
+ {
+ typedef typename TestFixture::hash_int hash;
+ typedef cds::atomicity::item_counter item_counter;
+ };
+ typedef cc::MichaelHashSet< rcu_type, list_type, set_traits >set_type;
+
+ set_type s( TestFixture::kSize, 4 );
+ this->test( s );
+}
// GCC 5: All test names should be written on single line, otherwise a runtime error will be encountered like as
// "No test named <test_name> can be found in this test case"
REGISTER_TYPED_TEST_CASE_P( MichaelSet,
- compare, less, cmpmix, item_counting, backoff, seq_cst
+ compare, less, cmpmix, item_counting, backoff, seq_cst, stat, wrapped_stat
);