/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
-
+
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H
/// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node
explicit base_node( bool bInternal )
- : basic_node( bInternal ? internal : 0 )
+ : basic_node( bInternal )
, m_pNextRetired( nullptr )
{}
};
the priority value plus some uniformly distributed random value.
@attention Recall the tree is <b>unbalanced</b>. The complexity of operations is <tt>O(log N)</tt>
- for uniformly distributed random keys, but in worst case the complexity is <tt>O(N)</tt>.
+ for uniformly distributed random keys, but in the worst case the complexity is <tt>O(N)</tt>.
@note In the current implementation we do not use helping technique described in the original paper.
Instead of helping, when a thread encounters a concurrent operation it just spins waiting for
protected:
//@cond
- static void free_leaf_node( value_type * p )
+ static void free_leaf_node( value_type* p )
{
disposer()( p );
}
+ static void free_leaf_node_void( void* p )
+ {
+ free_leaf_node( reinterpret_cast<value_type*>( p ));
+ }
internal_node * alloc_internal_node() const
{
return pNode;
}
- static void free_internal_node( internal_node * pNode )
+ static void free_internal_node( internal_node* pNode )
{
cxx_node_allocator().Delete( pNode );
}
+ static void free_internal_node_void( void* pNode )
+ {
+ free_internal_node( reinterpret_cast<internal_node*>( pNode ));
+ }
struct internal_node_deleter {
void operator()( internal_node * p) const
return cxx_update_desc_allocator().New();
}
- static void free_update_desc( update_desc * pDesc )
+ static void free_update_desc( update_desc* pDesc )
{
cxx_update_desc_allocator().Delete( pDesc );
}
+ static void free_update_desc_void( void* pDesc )
+ {
+ free_update_desc( reinterpret_cast<update_desc*>( pDesc ) );
+ }
class retired_list
{
cds::urcu::retired_ptr operator *()
{
if ( m_pUpdate ) {
- return cds::urcu::retired_ptr( reinterpret_cast<void *>( m_pUpdate ),
- reinterpret_cast<cds::urcu::free_retired_ptr_func>( free_update_desc ));
+ return cds::urcu::retired_ptr( reinterpret_cast<void *>( m_pUpdate ), free_update_desc_void );
}
if ( m_pNode ) {
if ( m_pNode->is_leaf()) {
return cds::urcu::retired_ptr( reinterpret_cast<void *>( node_traits::to_value_ptr( static_cast<leaf_node *>( m_pNode ))),
- reinterpret_cast< cds::urcu::free_retired_ptr_func>( free_leaf_node ));
+ free_leaf_node_void );
}
else {
return cds::urcu::retired_ptr( reinterpret_cast<void *>( static_cast<internal_node *>( m_pNode )),
- reinterpret_cast<cds::urcu::free_retired_ptr_func>( free_internal_node ));
+ free_internal_node_void );
}
}
- return cds::urcu::retired_ptr( nullptr,
- reinterpret_cast<cds::urcu::free_retired_ptr_func>( free_update_desc ));
+ return cds::urcu::retired_ptr( nullptr, free_update_desc_void );
}
void operator ++()
RCU \p synchronize method can be called. RCU should not be locked.
- Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
+ Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
i.e. the node has been inserted or updated,
\p second is \p true if new item has been added or \p false if the item with \p key
already exists.
func( false, *node_traits::to_value_ptr( res.pLeaf ), val );
if ( pNewInternal.get())
m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node
- m_Stat.onEnsureExist();
+ m_Stat.onUpdateExist();
return std::make_pair( true, false );
}
help( res.updParent, updRetire );
bkoff();
- m_Stat.onEnsureRetry();
+ m_Stat.onUpdateRetry();
}
}
++m_ItemCounter;
- m_Stat.onEnsureNew();
+ m_Stat.onUpdateNew();
return std::make_pair( true, true );
}
unlinks it from the tree, and returns \p true.
If the item with key equal to \p key is not found the function return \p false.
- Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q
+ Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q
that can be not the same as \p value_type.
RCU \p synchronize method can be called. RCU should not be locked.
If the item with key equal to \p key is not found the function return \p false.
- Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q
+ Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q
that can be not the same as \p value_type.
RCU \p synchronize method can be called. RCU should not be locked.
tree_node * pLeaf = static_cast<tree_node *>( pOp->iInfo.pLeaf );
if ( pOp->iInfo.bRightLeaf ) {
pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
- memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
else {
pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
- memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
update_ptr cur( pOp, update_desc::IFlag );
update_ptr updGP( res.updGrandParent.ptr());
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
if ( help_delete( pOp, updRetire )) {
// res.pLeaf is not deleted yet since RCU is blocked
update_ptr updGP( res.updGrandParent.ptr());
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
if ( help_delete( pOp, updRetire )) {
pResult = node_traits::to_value_ptr( res.pLeaf );
update_ptr updGP( res.updGrandParent.ptr());
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
if ( help_delete( pOp, updRetire )) {
pResult = node_traits::to_value_ptr( res.pLeaf );
update_ptr updGP( res.updGrandParent.ptr());
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
if ( help_delete( pOp, updRetire )) {
pResult = node_traits::to_value_ptr( res.pLeaf );
pNewInternal->infinite_key( 1 );
}
pNewInternal->m_pLeft.store( static_cast<tree_node *>(pNewLeaf), memory_model::memory_order_relaxed );
- pNewInternal->m_pRight.store( static_cast<tree_node *>(res.pLeaf), memory_model::memory_order_release );
+ pNewInternal->m_pRight.store( static_cast<tree_node *>(res.pLeaf), memory_model::memory_order_relaxed );
}
else {
assert( !res.pLeaf->is_internal());
key_extractor()( pNewInternal->m_Key, val );
pNewInternal->m_pLeft.store( static_cast<tree_node *>(res.pLeaf), memory_model::memory_order_relaxed );
- pNewInternal->m_pRight.store( static_cast<tree_node *>(pNewLeaf), memory_model::memory_order_release );
+ pNewInternal->m_pRight.store( static_cast<tree_node *>(pNewLeaf), memory_model::memory_order_relaxed );
assert( !res.pLeaf->infinite_key());
}
update_ptr updCur( res.updParent.ptr());
if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ),
- memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
+ memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
{
// do insert
help_insert( pOp );