/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef cds::urcu::gc<RCU> gc; ///< RCU schema
- typedef typename traits::back_off back_off; ///< back-off strategy
- typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
- typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef cds::urcu::gc<RCU> gc; ///< RCU schema
+ typedef typename traits::back_off back_off; ///< back-off strategy
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef typename traits::stat stat; ///< Internal statistics
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >;
//@endcond
protected:
- typedef typename node_type::marked_ptr marked_node_ptr ; ///< Marked node pointer
- typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic node pointer
- typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support)
+ typedef typename node_type::marked_ptr marked_node_ptr; ///< Marked node pointer
+ typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer
+ typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
- atomic_node_ptr m_pHead ; ///< Head pointer
- item_counter m_ItemCounter ; ///< Item counter
+ atomic_node_ptr m_pHead; ///< Head pointer
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
protected:
//@cond
typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
- static void clear_links( node_type * pNode )
- {
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release );
- pNode->m_pDelChain = nullptr;
- }
-
struct clear_and_dispose {
void operator()( value_type * p )
{
}
};
- static void dispose_node( node_type * pNode )
- {
- assert( pNode );
- assert( !gc::is_locked() );
-
- gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ) );
- }
-
- static void dispose_chain( node_type * pChain )
- {
- if ( pChain ) {
- assert( !gc::is_locked() );
-
- auto f = [&pChain]() -> cds::urcu::retired_ptr {
- node_type * p = pChain;
- if ( p ) {
- pChain = p->m_pDelChain;
- return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ));
- }
- return cds::urcu::make_retired_ptr<clear_and_dispose>( static_cast<value_type *>(nullptr));
- };
- gc::batch_retire(std::ref(f));
- }
- }
-
/// Position pointer for item search
struct position {
atomic_node_ptr * pPrev ; ///< Previous node
dispose_chain( pDelChain );
}
};
-
//@endcond
public:
/// Result of \p get(), \p get_with() functions - pointer to the node found
typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
- protected:
- //@cond
-
- bool link_node( node_type * pNode, position& pos )
- {
- assert( pNode != nullptr );
- link_checker::is_empty( pNode );
-
- marked_node_ptr p( pos.pCur );
- pNode->m_pNext.store( p, memory_model::memory_order_release );
- return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
- }
-
- static void link_to_remove_chain( position& pos, node_type * pDel )
- {
- assert( pDel->m_pDelChain == nullptr );
-
- pDel->m_pDelChain = pos.pDelChain;
- pos.pDelChain = pDel;
- }
-
- bool unlink_node( position& pos, erase_node_mask nMask )
- {
- assert(gc::is_locked() );
-
- // Mark the node (logical deletion)
- marked_node_ptr next(pos.pNext, 0);
-
- if ( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
-
- // Try physical removal - fast path
- marked_node_ptr cur(pos.pCur);
- if ( pos.pPrev->compare_exchange_strong(cur, marked_node_ptr(pos.pNext), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
- if ( nMask == erase_mask )
- link_to_remove_chain( pos, pos.pCur );
- }
- else {
- // Slow path
- search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator() );
- }
- return true;
- }
- return false;
- }
- //@endcond
-
protected:
//@cond
template <bool IsConst>
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
+
/// Destroy list
~MichaelList()
{
The functor may change non-key fields of the \p item; however, \p func must guarantee
that during changing no any other modifications could be made on this item by concurrent threads.
- Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
+ Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
\p second is \p true if new item has been added or \p false if the item with \p key
already is in the list.
template <typename Q>
bool erase( Q const& key )
{
- return erase_at( m_pHead, key, key_comparator() );
+ return erase_at( m_pHead, key, key_comparator());
}
/// Deletes the item from the list using \p pred predicate for searching
bool erase_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() );
+ return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Deletes the item from the list
rcu_michael_list::exempt_ptr p1;
// The RCU should NOT be locked when extract() is called!
- assert( !rcu::is_locked() );
+ assert( !rcu::is_locked());
// You can call extract() function
p1 = theList.extract( 10 );
template <typename Q>
exempt_ptr extract( Q const& key )
{
- return exempt_ptr( extract_at( m_pHead, key, key_comparator() ));
+ return exempt_ptr( extract_at( m_pHead, key, key_comparator()));
}
/// Extracts an item from the list using \p pred predicate for searching
exempt_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return exempt_ptr( extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() ));
+ return exempt_ptr( extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>()));
}
/// Find the key \p val
template <typename Q>
bool contains( Q const& key )
{
- return find_at( m_pHead, key, key_comparator() );
+ return find_at( m_pHead, key, key_comparator());
}
//@cond
template <typename Q>
bool contains( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() );
+ return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
//@cond
template <typename Q, typename Less>
RCU \p synchronize method can be called.
Note that depending on RCU type used the \ref disposer invocation can be deferred.
- The function can throw \p cds::urcu::rcu_deadlock exception if an deadlock is encountered and
+ The function can throw \p cds::urcu::rcu_deadlock exception if a deadlock is encountered and
deadlock checking policy is \p opt::v::rcu_throw_deadlock.
*/
void clear()
{
- if( !empty() ) {
+ if( !empty()) {
check_deadlock_policy::check();
marked_node_ptr pHead;
{
rcu_lock l;
pHead = m_pHead.load(memory_model::memory_order_acquire);
- if ( !pHead.ptr() )
+ if ( !pHead.ptr())
break;
- marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed) );
- if ( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed ))
+ marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed));
+ if ( cds_unlikely( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed )))
continue;
- if ( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+ if ( cds_unlikely( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed )))
continue;
}
--m_ItemCounter;
- dispose_node( pHead.ptr() );
+ dispose_node( pHead.ptr());
}
}
}
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
+ static void clear_links( node_type * pNode )
+ {
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release );
+ pNode->m_pDelChain = nullptr;
+ }
+
+ static void dispose_node( node_type * pNode )
+ {
+ assert( pNode );
+ assert( !gc::is_locked());
+
+ gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ));
+ }
+
+ static void dispose_chain( node_type * pChain )
+ {
+ if ( pChain ) {
+ assert( !gc::is_locked());
+
+ auto f = [&pChain]() -> cds::urcu::retired_ptr {
+ node_type * p = pChain;
+ if ( p ) {
+ pChain = p->m_pDelChain;
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ));
+ }
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( static_cast<value_type *>(nullptr));
+ };
+ gc::batch_retire( std::ref( f ));
+ }
+ }
+
+ bool link_node( node_type * pNode, position& pos )
+ {
+ assert( pNode != nullptr );
+ link_checker::is_empty( pNode );
+
+ marked_node_ptr p( pos.pCur );
+ pNode->m_pNext.store( p, memory_model::memory_order_release );
+ if ( cds_likely( pos.pPrev->compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ return true;
+
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ return false;
+ }
+
+ static void link_to_remove_chain( position& pos, node_type * pDel )
+ {
+ assert( pDel->m_pDelChain == nullptr );
+
+ pDel->m_pDelChain = pos.pDelChain;
+ pos.pDelChain = pDel;
+ }
+
+ bool unlink_node( position& pos, erase_node_mask nMask )
+ {
+ assert( gc::is_locked());
+
+ // Mark the node (logical deletion)
+ marked_node_ptr next( pos.pNext, 0 );
+
+ if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
+
+ // Try physical removal - fast path
+ marked_node_ptr cur( pos.pCur );
+ if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
+ if ( nMask == erase_mask )
+ link_to_remove_chain( pos, pos.pCur );
+ }
+ else {
+ // Slow path
+ search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator());
+ }
+ return true;
+ }
+ return false;
+ }
+
// split-list support
bool insert_aux_node( node_type * pNode )
{
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
// We assume that comparator can correctly distinguish between aux and regular node.
- return insert_at( refHead, *node_traits::to_value_ptr( pNode ) );
+ return insert_at( refHead, *node_traits::to_value_ptr( pNode ));
}
bool insert_at( atomic_node_ptr& refHead, value_type& val )
{
rcu_lock l;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator()))
+ if ( search( refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
- if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
+ if ( link_node( node_traits::to_node_ptr( val ), pos )) {
f( val );
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
// clear next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
for (;;) {
{
rcu_lock l;
- if ( !search( refHead, val, pos, key_comparator() ) || node_traits::to_value_ptr( *pos.pCur ) != &val )
+ if ( !search( refHead, val, pos, key_comparator()) || node_traits::to_value_ptr( *pos.pCur ) != &val ) {
+ m_Stat.onEraseFailed();
return false;
+ }
if ( !unlink_node( pos, erase_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
}
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
}
for (;;) {
{
rcu_lock l;
- if ( !search( pos.refHead, val, pos, cmp ) )
+ if ( !search( pos.refHead, val, pos, cmp )) {
+ m_Stat.onEraseFailed();
return false;
+ }
+
// store pCur since it may be changed by unlink_node() slow path
pDel = pos.pCur;
if ( !unlink_node( pos, erase_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
}
assert( pDel );
- f( *node_traits::to_value_ptr( pDel ) );
+ f( *node_traits::to_value_ptr( pDel ));
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
}
{
position pos( refHead );
back_off bkoff;
- assert( !gc::is_locked() ) ; // RCU must not be locked!!!
+ assert( !gc::is_locked()) ; // RCU must not be locked!!!
node_type * pExtracted;
{
rcu_lock l;
for (;;) {
- if ( !search( refHead, val, pos, cmp ) )
+ if ( !search( refHead, val, pos, cmp )) {
+ m_Stat.onEraseFailed();
return nullptr;
+ }
+
// store pCur since it may be changed by unlink_node() slow path
pExtracted = pos.pCur;
if ( !unlink_node( pos, extract_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
--m_ItemCounter;
value_type * pRet = node_traits::to_value_ptr( pExtracted );
assert( pExtracted->m_pDelChain == nullptr );
+ m_Stat.onEraseSuccess();
return pRet;
}
}
{
rcu_lock l;
- if ( search( refHead, val, pos, cmp ) ) {
+ if ( search( refHead, val, pos, cmp )) {
assert( pos.pCur != nullptr );
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
- return false;
- }
+ }
+
+ m_Stat.onFindFailed();
+ return false;
}
template <typename Q, typename Compare>
raw_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
// RCU should be locked!
- assert(gc::is_locked() );
+ assert(gc::is_locked());
position pos( refHead );
- if ( search( refHead, val, pos, cmp ))
+ if ( search( refHead, val, pos, cmp )) {
+ m_Stat.onFindSuccess();
return raw_ptr( node_traits::to_value_ptr( pos.pCur ), raw_ptr_disposer( pos ));
+ }
+
+ m_Stat.onFindFailed();
return raw_ptr( raw_ptr_disposer( pos ));
}
//@endcond
bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp )
{
// RCU lock should be locked!!!
- assert( gc::is_locked() );
+ assert( gc::is_locked());
atomic_node_ptr * pPrev;
marked_node_ptr pNext;
pNext = nullptr;
while ( true ) {
- if ( !pCur.ptr() ) {
+ if ( !pCur.ptr()) {
pos.pPrev = pPrev;
pos.pCur = nullptr;
pos.pNext = nullptr;
pNext = pCur->m_pNext.load(memory_model::memory_order_acquire);
- if ( pPrev->load(memory_model::memory_order_acquire) != pCur
- || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire))
+ if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire) != pCur
+ || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire )))
{
bkoff();
goto try_again;
}
- if ( pNext.bits() ) {
+ if ( pNext.bits()) {
// pCur is marked as deleted. Try to unlink it from the list
- if ( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( cds_likely( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
if ( pNext.bits() == erase_mask )
- link_to_remove_chain( pos, pCur.ptr() );
+ link_to_remove_chain( pos, pCur.ptr());
+ m_Stat.onHelpingSuccess();
}
+ m_Stat.onHelpingFailed();
goto try_again;
}
assert( pCur.ptr() != nullptr );
- int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp >= 0 ) {
pos.pPrev = pPrev;
pos.pCur = pCur.ptr();
bool insert_at_locked( position& pos, value_type& val )
{
// RCU lock should be locked!!!
- assert( gc::is_locked() );
+ assert( gc::is_locked());
while ( true ) {
- if ( search( pos.refHead, val, pos, key_comparator() ) )
+ if ( search( pos.refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
- if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
+ if ( link_node( node_traits::to_node_ptr( val ), pos )) {
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
// clear next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
std::pair<iterator, bool> update_at_locked( position& pos, value_type& val, Func func, bool bInsert )
{
// RCU should be locked!!!
- assert( gc::is_locked() );
+ assert( gc::is_locked());
while ( true ) {
- if ( search( pos.refHead, val, pos, key_comparator() ) ) {
- assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 );
+ if ( search( pos.refHead, val, pos, key_comparator())) {
+ assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 );
func( false, *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onUpdateExisting();
return std::make_pair( iterator( pos.pCur ), false );
}
else {
- if ( !bInsert )
+ if ( !bInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( end(), false );
+ }
- if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
+ if ( link_node( node_traits::to_node_ptr( val ), pos )) {
++m_ItemCounter;
func( true, val , val );
+ m_Stat.onUpdateNew();
return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
}
// clear the next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onUpdateRetry();
}
}
}
template <typename Q, typename Compare>
const_iterator find_at_locked( position& pos, Q const& val, Compare cmp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
- if ( search( pos.refHead, val, pos, cmp ) ) {
+ if ( search( pos.refHead, val, pos, cmp )) {
assert( pos.pCur != nullptr );
+ m_Stat.onFindSuccess();
return const_iterator( pos.pCur );
}
+
+ m_Stat.onFindFailed();
return cend();
}
//@endcond