-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H
#define CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H
typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits
typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker
- typedef cds::urcu::gc<RCU> gc; ///< RCU schema
- typedef typename traits::back_off back_off; ///< back-off strategy
- typedef typename traits::item_counter item_counter; ///< Item counting policy used
- typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
- typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef cds::urcu::gc<RCU> gc; ///< RCU schema
+ typedef typename traits::back_off back_off; ///< back-off strategy
+ typedef typename traits::item_counter item_counter; ///< Item counting policy used
+ typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
+ typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
+ typedef typename traits::stat stat; ///< Internal statistics
typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock
static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking
, typename cds::opt::make_options< traits, Options...>::type
> type;
};
+
+ // Stat selector
+ template <typename Stat>
+ using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >;
//@endcond
protected:
- typedef typename node_type::marked_ptr marked_node_ptr ; ///< Marked node pointer
- typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic node pointer
- typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support)
+ typedef typename node_type::marked_ptr marked_node_ptr; ///< Marked node pointer
+ typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer
+ typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support)
- atomic_node_ptr m_pHead ; ///< Head pointer
- item_counter m_ItemCounter ; ///< Item counter
+ atomic_node_ptr m_pHead; ///< Head pointer
+ item_counter m_ItemCounter; ///< Item counter
+ stat m_Stat; ///< Internal statistics
protected:
//@cond
typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
- static void clear_links( node_type * pNode )
- {
- pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release );
- pNode->m_pDelChain = nullptr;
- }
-
struct clear_and_dispose {
void operator()( value_type * p )
{
}
};
- static void dispose_node( node_type * pNode )
- {
- assert( pNode );
- assert( !gc::is_locked() );
-
- gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ) );
- }
-
- static void dispose_chain( node_type * pChain )
- {
- if ( pChain ) {
- assert( !gc::is_locked() );
-
- auto f = [&pChain]() -> cds::urcu::retired_ptr {
- node_type * p = pChain;
- if ( p ) {
- pChain = p->m_pDelChain;
- return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ));
- }
- return cds::urcu::make_retired_ptr<clear_and_dispose>( static_cast<value_type *>(nullptr));
- };
- gc::batch_retire(std::ref(f));
- }
- }
-
/// Position pointer for item search
struct position {
atomic_node_ptr * pPrev ; ///< Previous node
dispose_chain( pDelChain );
}
};
-
//@endcond
public:
/// Result of \p get(), \p get_with() functions - pointer to the node found
typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
- protected:
- //@cond
-
- bool link_node( node_type * pNode, position& pos )
- {
- assert( pNode != nullptr );
- link_checker::is_empty( pNode );
-
- marked_node_ptr p( pos.pCur );
- pNode->m_pNext.store( p, memory_model::memory_order_release );
- return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
- }
-
- static void link_to_remove_chain( position& pos, node_type * pDel )
- {
- assert( pDel->m_pDelChain == nullptr );
-
- pDel->m_pDelChain = pos.pDelChain;
- pos.pDelChain = pDel;
- }
-
- bool unlink_node( position& pos, erase_node_mask nMask )
- {
- assert(gc::is_locked() );
-
- // Mark the node (logical deletion)
- marked_node_ptr next(pos.pNext, 0);
-
- if ( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
-
- // Try physical removal - fast path
- marked_node_ptr cur(pos.pCur);
- if ( pos.pPrev->compare_exchange_strong(cur, marked_node_ptr(pos.pNext), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
- if ( nMask == erase_mask )
- link_to_remove_chain( pos, pos.pCur );
- }
- else {
- // Slow path
- search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator() );
- }
- return true;
- }
- return false;
- }
- //@endcond
-
protected:
//@cond
template <bool IsConst>
//@endcond
public:
+ ///@name Forward iterators (thread-safe only under RCU lock)
+ //@{
/// Forward iterator
+ /**
+ You may safely use iterators in multi-threaded environment only under RCU lock.
+ Otherwise, a crash is possible if another thread deletes the item the iterator points to.
+ */
typedef iterator_type<false> iterator;
+
/// Const forward iterator
typedef iterator_type<true> const_iterator;
{
return const_iterator();
}
+ //@}
public:
/// Default constructor initializes empty list
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
}
+ //@cond
+ template <typename Stat, typename = std::enable_if<std::is_same<stat, michael_list::wrapped_stat<Stat>>::value >>
+ explicit MichaelList( Stat& st )
+ : m_pHead( nullptr )
+ , m_Stat( st )
+ {}
+ //@endcond
+
/// Destroy list
~MichaelList()
{
The functor may change non-key fields of the \p item; however, \p func must guarantee
that during changing no any other modifications could be made on this item by concurrent threads.
- Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
+ Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
\p second is \p true if new item has been added or \p false if the item with \p key
already is in the list.
{
return update( val, func, true );
}
+ //@endcond
/// Unlinks the item \p val from the list
/**
The function searches the item \p val in the list and unlink it from the list
if it is found and it is equal to \p val.
- Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
- and deletes the item found. \p unlink finds an item by key and deletes it
+ Difference between \p erase() and \p %unlink() functions: \p %erase() finds <i>a key</i>
+ and deletes the item found. \p %unlink() finds an item by key and deletes it
only if \p val is an item of that list, i.e. the pointer to the item found
is equal to <tt> &val </tt>.
RCU \p synchronize method can be called.
Note that depending on RCU type used the \ref disposer call can be deferred.
+ \p disposer specified in \p Traits is called for unlinked item.
+
The function can throw cds::urcu::rcu_deadlock exception if deadlock is encountered and
deadlock checking policy is opt::v::rcu_throw_deadlock.
*/
}
/// Deletes the item from the list
- /** \anchor cds_intrusive_MichaelList_rcu_erase_val
+ /**
The function searches an item with key equal to \p key in the list,
unlinks it from the list, and returns \p true.
If the item with the key equal to \p key is not found the function return \p false.
RCU \p synchronize method can be called.
Note that depending on RCU type used the \ref disposer call can be deferred.
+ \p disposer specified in \p Traits is called for deleted item.
+
The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and
the deadlock checking policy is \p opt::v::rcu_throw_deadlock.
*/
template <typename Q>
bool erase( Q const& key )
{
- return erase_at( m_pHead, key, key_comparator() );
+ return erase_at( m_pHead, key, key_comparator());
}
/// Deletes the item from the list using \p pred predicate for searching
/**
- The function is an analog of \ref cds_intrusive_MichaelList_rcu_erase_val "erase(Q const&)"
+ The function is an analog of \p erase(Q const&)
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the list.
+
+ \p disposer specified in \p Traits is called for deleted item.
*/
template <typename Q, typename Less>
bool erase_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() );
+ return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
/// Deletes the item from the list
- /** \anchor cds_intrusive_MichaelList_rcu_erase_func
+ /**
The function searches an item with key equal to \p key in the list,
call \p func functor with item found, unlinks it from the list, and returns \p true.
The \p Func interface is
RCU \p synchronize method can be called.
Note that depending on RCU type used the \ref disposer call can be deferred.
+ \p disposer specified in \p Traits is called for deleted item.
+
The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and
the deadlock checking policy is \p opt::v::rcu_throw_deadlock.
*/
/// Deletes the item from the list using \p pred predicate for searching
/**
- The function is an analog of \ref cds_intrusive_MichaelList_rcu_erase_func "erase(Q const&, Func)"
+ The function is an analog of \p erase(Q const&, Func)
but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
\p pred must imply the same element order as the comparator used for building the list.
+
+ \p disposer specified in \p Traits is called for deleted item.
*/
template <typename Q, typename Less, typename Func>
bool erase_with( Q const& key, Less pred, Func func )
/// Extracts an item from the list
/**
- @anchor cds_intrusive_MichaelList_rcu_extract
The function searches an item with key equal to \p key in the list,
unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
If \p key is not found the function returns an empty \p exempt_ptr.
@note The function does NOT dispose the item found. It just unlinks the item from the list
and returns a pointer to item found.
You shouldn't lock RCU for current thread before calling this function, and you should manually release
- \p dest exempt pointer outside the RCU lock before reusing it.
+ the returned exempt pointer before reusing it.
\code
#include <cds/urcu/general_buffered.h>
rcu_michael_list::exempt_ptr p1;
// The RCU should NOT be locked when extract() is called!
- assert( !rcu::is_locked() );
+ assert( !rcu::is_locked());
// You can call extract() function
p1 = theList.extract( 10 );
template <typename Q>
exempt_ptr extract( Q const& key )
{
- return exempt_ptr( extract_at( m_pHead, key, key_comparator() ));
+ return exempt_ptr( extract_at( m_pHead, key, key_comparator()));
}
/// Extracts an item from the list using \p pred predicate for searching
exempt_ptr extract_with( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return exempt_ptr( extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() ));
+ return exempt_ptr( extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>()));
}
/// Find the key \p val
- /** \anchor cds_intrusive_MichaelList_rcu_find_func
+ /**
The function searches the item with key equal to \p key
and calls the functor \p f for item found.
The interface of \p Func functor is:
template <typename Q>
bool contains( Q const& key )
{
- return find_at( m_pHead, key, key_comparator() );
+ return find_at( m_pHead, key, key_comparator());
}
//@cond
template <typename Q>
bool contains( Q const& key, Less pred )
{
CDS_UNUSED( pred );
- return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>() );
+ return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less<Less>());
}
//@cond
template <typename Q, typename Less>
RCU \p synchronize method can be called.
Note that depending on RCU type used the \ref disposer invocation can be deferred.
- The function can throw \p cds::urcu::rcu_deadlock exception if an deadlock is encountered and
+ The function can throw \p cds::urcu::rcu_deadlock exception if a deadlock is encountered and
deadlock checking policy is \p opt::v::rcu_throw_deadlock.
*/
void clear()
{
- if( !empty() ) {
+ if( !empty()) {
check_deadlock_policy::check();
marked_node_ptr pHead;
{
rcu_lock l;
pHead = m_pHead.load(memory_model::memory_order_acquire);
- if ( !pHead.ptr() )
+ if ( !pHead.ptr())
break;
- marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed) );
- if ( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed ))
+ marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed));
+ if ( cds_unlikely( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed )))
continue;
- if ( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+ if ( cds_unlikely( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed )))
continue;
}
--m_ItemCounter;
- dispose_node( pHead.ptr() );
+ dispose_node( pHead.ptr());
}
}
}
return m_ItemCounter.value();
}
+ /// Returns const reference to internal statistics
+ stat const& statistics() const
+ {
+ return m_Stat;
+ }
+
protected:
//@cond
+ static void clear_links( node_type * pNode )
+ {
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release );
+ pNode->m_pDelChain = nullptr;
+ }
+
+ static void dispose_node( node_type * pNode )
+ {
+ assert( pNode );
+ assert( !gc::is_locked());
+
+ gc::template retire_ptr<clear_and_dispose>( node_traits::to_value_ptr( *pNode ));
+ }
+
+ static void dispose_chain( node_type * pChain )
+ {
+ if ( pChain ) {
+ assert( !gc::is_locked());
+
+ auto f = [&pChain]() -> cds::urcu::retired_ptr {
+ node_type * p = pChain;
+ if ( p ) {
+ pChain = p->m_pDelChain;
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( node_traits::to_value_ptr( p ));
+ }
+ return cds::urcu::make_retired_ptr<clear_and_dispose>( static_cast<value_type *>(nullptr));
+ };
+ gc::batch_retire( std::ref( f ));
+ }
+ }
+
+ bool link_node( node_type * pNode, position& pos )
+ {
+ assert( pNode != nullptr );
+ link_checker::is_empty( pNode );
+
+ marked_node_ptr p( pos.pCur );
+ pNode->m_pNext.store( p, memory_model::memory_order_release );
+ if ( cds_likely( pos.pPrev->compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )))
+ return true;
+
+ pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ return false;
+ }
+
+ static void link_to_remove_chain( position& pos, node_type * pDel )
+ {
+ assert( pDel->m_pDelChain == nullptr );
+
+ pDel->m_pDelChain = pos.pDelChain;
+ pos.pDelChain = pDel;
+ }
+
+ bool unlink_node( position& pos, erase_node_mask nMask )
+ {
+ assert( gc::is_locked());
+
+ // Mark the node (logical deletion)
+ marked_node_ptr next( pos.pNext, 0 );
+
+ if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed ))) {
+
+ // Try physical removal - fast path
+ marked_node_ptr cur( pos.pCur );
+ if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
+ if ( nMask == erase_mask )
+ link_to_remove_chain( pos, pos.pCur );
+ }
+ else {
+ // Slow path
+ search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator());
+ }
+ return true;
+ }
+ return false;
+ }
+
// split-list support
bool insert_aux_node( node_type * pNode )
{
// Hack: convert node_type to value_type.
// In principle, auxiliary node can be non-reducible to value_type
// We assume that comparator can correctly distinguish between aux and regular node.
- return insert_at( refHead, *node_traits::to_value_ptr( pNode ) );
+ return insert_at( refHead, *node_traits::to_value_ptr( pNode ));
}
bool insert_at( atomic_node_ptr& refHead, value_type& val )
template <typename Func>
bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f )
{
- link_checker::is_empty( node_traits::to_node_ptr( val ) );
position pos( refHead );
{
rcu_lock l;
while ( true ) {
- if ( search( refHead, val, pos, key_comparator()))
+ if ( search( refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
- if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
+ if ( link_node( node_traits::to_node_ptr( val ), pos )) {
f( val );
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
// clear next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
for (;;) {
{
rcu_lock l;
- if ( !search( refHead, val, pos, key_comparator() ) || node_traits::to_value_ptr( *pos.pCur ) != &val )
+ if ( !search( refHead, val, pos, key_comparator()) || node_traits::to_value_ptr( *pos.pCur ) != &val ) {
+ m_Stat.onEraseFailed();
return false;
+ }
if ( !unlink_node( pos, erase_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
}
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
}
back_off bkoff;
check_deadlock_policy::check();
+ node_type * pDel;
for (;;) {
{
rcu_lock l;
- if ( !search( pos.refHead, val, pos, cmp ) )
+ if ( !search( pos.refHead, val, pos, cmp )) {
+ m_Stat.onEraseFailed();
return false;
+ }
+
+ // store pCur since it may be changed by unlink_node() slow path
+ pDel = pos.pCur;
if ( !unlink_node( pos, erase_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
}
-
- f( *node_traits::to_value_ptr( *pos.pCur ) );
+ assert( pDel );
+ f( *node_traits::to_value_ptr( pDel ));
--m_ItemCounter;
+ m_Stat.onEraseSuccess();
return true;
}
}
{
position pos( refHead );
back_off bkoff;
- assert( !gc::is_locked() ) ; // RCU must not be locked!!!
+ assert( !gc::is_locked()) ; // RCU must not be locked!!!
node_type * pExtracted;
{
rcu_lock l;
for (;;) {
- if ( !search( refHead, val, pos, cmp ) )
+ if ( !search( refHead, val, pos, cmp )) {
+ m_Stat.onEraseFailed();
return nullptr;
+ }
+
// store pCur since it may be changed by unlink_node() slow path
pExtracted = pos.pCur;
if ( !unlink_node( pos, extract_mask )) {
bkoff();
+ m_Stat.onEraseRetry();
continue;
}
--m_ItemCounter;
value_type * pRet = node_traits::to_value_ptr( pExtracted );
assert( pExtracted->m_pDelChain == nullptr );
+ m_Stat.onEraseSuccess();
return pRet;
}
}
{
rcu_lock l;
- if ( search( refHead, val, pos, cmp ) ) {
+ if ( search( refHead, val, pos, cmp )) {
assert( pos.pCur != nullptr );
f( *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onFindSuccess();
return true;
}
- return false;
- }
+ }
+
+ m_Stat.onFindFailed();
+ return false;
}
template <typename Q, typename Compare>
raw_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp )
{
// RCU should be locked!
- assert(gc::is_locked() );
+ assert(gc::is_locked());
position pos( refHead );
- if ( search( refHead, val, pos, cmp ))
+ if ( search( refHead, val, pos, cmp )) {
+ m_Stat.onFindSuccess();
return raw_ptr( node_traits::to_value_ptr( pos.pCur ), raw_ptr_disposer( pos ));
+ }
+
+ m_Stat.onFindFailed();
return raw_ptr( raw_ptr_disposer( pos ));
}
//@endcond
bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp )
{
// RCU lock should be locked!!!
- assert( gc::is_locked() );
+ assert( gc::is_locked());
atomic_node_ptr * pPrev;
marked_node_ptr pNext;
pNext = nullptr;
while ( true ) {
- if ( !pCur.ptr() ) {
+ if ( !pCur.ptr()) {
pos.pPrev = pPrev;
pos.pCur = nullptr;
pos.pNext = nullptr;
pNext = pCur->m_pNext.load(memory_model::memory_order_acquire);
- if ( pPrev->load(memory_model::memory_order_acquire) != pCur
- || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire))
+ if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire) != pCur
+ || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire )))
{
bkoff();
goto try_again;
}
- if ( pNext.bits() ) {
+ if ( pNext.bits()) {
// pCur is marked as deleted. Try to unlink it from the list
- if ( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
+ if ( cds_likely( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) {
if ( pNext.bits() == erase_mask )
- link_to_remove_chain( pos, pCur.ptr() );
+ link_to_remove_chain( pos, pCur.ptr());
+ m_Stat.onHelpingSuccess();
}
+ m_Stat.onHelpingFailed();
goto try_again;
}
assert( pCur.ptr() != nullptr );
- int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
+ int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
if ( nCmp >= 0 ) {
pos.pPrev = pPrev;
pos.pCur = pCur.ptr();
bool insert_at_locked( position& pos, value_type& val )
{
// RCU lock should be locked!!!
- assert( gc::is_locked() );
- link_checker::is_empty( node_traits::to_node_ptr( val ) );
+ assert( gc::is_locked());
while ( true ) {
- if ( search( pos.refHead, val, pos, key_comparator() ) )
+ if ( search( pos.refHead, val, pos, key_comparator())) {
+ m_Stat.onInsertFailed();
return false;
+ }
- if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
+ if ( link_node( node_traits::to_node_ptr( val ), pos )) {
++m_ItemCounter;
+ m_Stat.onInsertSuccess();
return true;
}
// clear next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onInsertRetry();
}
}
template <typename Func>
std::pair<iterator, bool> update_at_locked( position& pos, value_type& val, Func func, bool bInsert )
{
- // RCU lock should be locked!!!
- assert( gc::is_locked() );
+ // RCU should be locked!!!
+ assert( gc::is_locked());
while ( true ) {
- if ( search( pos.refHead, val, pos, key_comparator() ) ) {
- assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur ) ) == 0 );
+ if ( search( pos.refHead, val, pos, key_comparator())) {
+ assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 );
func( false, *node_traits::to_value_ptr( *pos.pCur ), val );
+ m_Stat.onUpdateExisting();
return std::make_pair( iterator( pos.pCur ), false );
}
else {
- if ( !bInsert )
+ if ( !bInsert ) {
+ m_Stat.onUpdateFailed();
return std::make_pair( end(), false );
+ }
- link_checker::is_empty( node_traits::to_node_ptr( val ) );
-
- if ( link_node( node_traits::to_node_ptr( val ), pos ) ) {
+ if ( link_node( node_traits::to_node_ptr( val ), pos )) {
++m_ItemCounter;
func( true, val , val );
+ m_Stat.onUpdateNew();
return std::make_pair( iterator( node_traits::to_node_ptr( val )), true );
}
// clear the next field
node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed );
+ m_Stat.onUpdateRetry();
}
}
}
template <typename Q, typename Compare>
const_iterator find_at_locked( position& pos, Q const& val, Compare cmp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
- if ( search( pos.refHead, val, pos, cmp ) ) {
+ if ( search( pos.refHead, val, pos, cmp )) {
assert( pos.pCur != nullptr );
+ m_Stat.onFindSuccess();
return const_iterator( pos.pCur );
}
+
+ m_Stat.onFindFailed();
return cend();
}
//@endcond