-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
#ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
#define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
assert( nLevel < height() );
assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
- return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
+# ifdef CDS_THREAD_SANITIZER_ENABLED
+ // TSan false positive: m_arrNext is read-only array
+ CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+ CDS_TSAN_ANNOTATE_IGNORE_READS_END;
+ return r;
+# else
+ return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+# endif
}
/// Access to element of next pointer array (const version)
assert( nLevel < height() );
assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
- return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
+# ifdef CDS_THREAD_SANITIZER_ENABLED
+ // TSan false positive: m_arrNext is read-only array
+ CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
+ atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+ CDS_TSAN_ANNOTATE_IGNORE_READS_END;
+ return r;
+# else
+ return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
+# endif
}
/// Access to element of next pointer array (same as \ref next function)
protected:
void next()
{
- // RCU should be locked before iterating!!!
- assert( gc::is_locked() );
-
back_off bkoff;
for (;;) {
iterator( node_type& refHead )
: m_pNode( nullptr )
{
- // RCU should be locked before iterating!!!
- assert( gc::is_locked() );
-
back_off bkoff;
for (;;) {
public:
iterator()
: m_pNode( nullptr )
- {
- // RCU should be locked before iterating!!!
- assert( gc::is_locked() );
- }
+ {}
iterator( iterator const& s)
: m_pNode( s.m_pNode )
- {
- // RCU should be locked before iterating!!!
- assert( gc::is_locked() );
- }
+ {}
value_type * operator ->() const
{
//@cond
static unsigned int const c_nMinHeight = 5;
- typedef cds::intrusive::skip_list::implementation_tag implementation_tag;
//@endcond
protected:
for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
while ( true ) {
- pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
+ pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
if ( pCur.bits() ) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
- pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
+ pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
goto retry;
if ( pSucc.bits() ) {
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
+# ifdef _DEBUG
+ if ( nLevel == 0 )
+ pCur->m_bUnlinked = true;
+# endif
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
-# ifdef _DEBUG
- pCur->m_bUnlinked = true;
-# endif
-
if ( !is_extracted( pSucc )) {
// We cannot free the node at this moment since RCU is locked
// Link deleted nodes to a chain to free later
for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
- pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
+ pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
// pCur.bits() means that pPred is logically deleted
// head cannot be deleted
assert( pCur.bits() == 0 );
if ( pCur.ptr() ) {
// pSucc contains deletion mark for pCur
- pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
+ pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
goto retry;
if ( pSucc.bits() ) {
// pCur is marked, i.e. logically deleted.
+# ifdef _DEBUG
+ if ( nLevel == 0 )
+ pCur->m_bUnlinked = true;
+# endif
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
-# ifdef _DEBUG
- pCur->m_bUnlinked = true;
-# endif
-
if ( !is_extracted( pSucc )) {
// We cannot free the node at this moment since RCU is locked
// Link deleted nodes to a chain to free later
for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
while ( true ) {
- pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
+ pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
if ( pCur.bits() ) {
// pCur.bits() means that pPred is logically deleted
goto retry;
}
// pSucc contains deletion mark for pCur
- pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
+ pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
- if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
+ if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
goto retry;
if ( pSucc.bits() ) {
// pCur is marked, i.e. logically deleted.
+# ifdef _DEBUG
+ if ( nLevel == 0 )
+ pCur->m_bUnlinked = true;
+# endif
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
-# ifdef _DEBUG
- pCur->m_bUnlinked = true;
-# endif
-
if ( !is_extracted( pSucc )) {
// We cannot free the node at this moment since RCU is locked
// Link deleted nodes to a chain to free later
}
else
m_Stat.onFastExtract();
-
return true;
}
+ m_Stat.onEraseRetry();
}
}
}
public:
- /// Iterator type
+ ///@name Forward iterators (thread-safe under RCU lock)
+ //@{
+ /// Forward iterator
+ /**
+ The forward iterator has some features:
+ - it has no post-increment operator
+ - it depends on iterator of underlying \p OrderedList
+
+ You may safely use iterators in multi-threaded environment only under RCU lock.
+ Otherwise, a crash is possible if another thread deletes the element the iterator points to.
+ */
typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
/// Const iterator type
{
return const_iterator();
}
+ //@}
public:
/// Inserts new node
return bRet;
}
- /// Ensures that the \p val exists in the set
+ /// Updates the node
/**
The operation performs inserting or changing data with lock-free manner.
- If the item \p val is not found in the set, then \p val is inserted into the set.
+ If the item \p val is not found in the set, then \p val is inserted into the set
+ iff \p bInsert is \p true.
Otherwise, the functor \p func is called with item found.
The functor signature is:
\code
with arguments:
- \p bNew - \p true if the item has been inserted, \p false otherwise
- \p item - item of the set
- - \p val - argument \p val passed into the \p %ensure() function
+ - \p val - argument \p val passed into the \p %update() function
If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
refer to the same thing.
RCU \p synchronize method can be called. RCU should not be locked.
- Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
+ i.e. the node has been inserted or updated,
\p second is \p true if new item has been added or \p false if the item with \p key
- already is in the set.
+ already exists.
@warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
*/
template <typename Func>
- std::pair<bool, bool> ensure( value_type& val, Func func )
+ std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
{
check_deadlock_policy::check();
scp.release();
func( false, *node_traits::to_value_ptr(pos.pCur), val );
- m_Stat.onEnsureExist();
+ m_Stat.onUpdateExist();
+ break;
+ }
+
+ if ( !bInsert ) {
+ scp.release();
+ bRet.first = false;
break;
}
++m_ItemCounter;
scp.release();
m_Stat.onAddNode( nHeight );
- m_Stat.onEnsureNew();
+ m_Stat.onUpdateNew();
bRet.second = true;
break;
}
return bRet;
}
+ //@cond
+ template <typename Func>
+ CDS_DEPRECATED("ensure() is deprecated, use update()")
+ std::pair<bool, bool> ensure( value_type& val, Func func )
+ {
+ return update( val, func, true );
+ }
+ //@endcond
/// Unlinks the item \p val from the set
/**
}
//@endcond
- /// Finds \p key
- /** @anchor cds_intrusive_SkipListSet_rcu_find_val
+ /// Checks whether the set contains \p key
+ /**
The function searches the item with key equal to \p key
and returns \p true if it is found, and \p false otherwise.
The function applies RCU lock internally.
*/
template <typename Q>
- bool find( Q const& key )
+ bool contains( Q const& key )
{
return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
}
+ //@cond
+ template <typename Q>
+ CDS_DEPRECATED("deprecated, use contains()")
+ bool find( Q const& key )
+ {
+ return contains( key );
+ }
+ //@endcond
- /// Finds \p key with comparing functor \p pred
+ /// Checks whether the set contains \p key using \p pred predicate for searching
/**
- The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)"
- but \p pred is used for key compare.
+ The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
- \p pred must imply the same element order as the comparator used for building the set.
+ \p Less must imply the same element order as the comparator used for building the set.
*/
template <typename Q, typename Less>
- bool find_with( Q const& key, Less pred )
+ bool contains( Q const& key, Less pred )
{
CDS_UNUSED( pred );
return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
}
+ //@cond
+ template <typename Q, typename Less>
+ CDS_DEPRECATED("deprecated, use contains()")
+ bool find_with( Q const& key, Less pred )
+ {
+ return contains( key, pred );
+ }
+ //@endcond
/// Finds \p key and return the item found
/** \anchor cds_intrusive_SkipListSet_rcu_get
CDS_UNUSED( pred );
assert( gc::is_locked());
- value_type * pFound;
+ value_type * pFound = nullptr;
position pos;
if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
[&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))