/** @ingroup cds_nonintrusive_map
@ingroup cds_nonintrusive_tree
@headerfile cds/container/bronson_avltree_map_rcu.h
+ @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
/// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
+ /// Group of \p extract_xxx functions does not require external locking
+ static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
+
# ifdef CDS_DOXYGEN_INVOKED
/// Returned pointer to \p mapped_type of extracted node
typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
static void free_node( node_type * pNode )
{
// Free node without disposer
+ assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
+ assert( pNode->m_SyncMonitorInjection.check_free());
cxx_allocator().Delete( pNode );
}
disposer()(pVal);
}
- static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
+ static node_type * child( node_type * pNode, int nDir, atomics::memory_order order = memory_model::memory_order_relaxed )
{
return pNode->child( nDir ).load( order );
}
- static node_type * parent( node_type * pNode, atomics::memory_order order )
+ static node_type * parent( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
{
- return pNode->m_pParent.load( order );
+ return pNode->parent( order );
}
- // RCU safe disposer
+ // RCU safe disposer
class rcu_disposer
{
node_type * m_pRetiredList; ///< head of retired node list
pNode->m_pNextRemoved = m_pRetiredList;
m_pRetiredList = pNode;
}
-
+
void dispose_value( mapped_type pVal )
{
assert( m_pRetiredValue == nullptr );
m_pRetiredValue = pVal;
}
-
+
private:
struct internal_disposer
{
void clean()
{
assert( !gc::is_locked() );
-
+
// TODO: use RCU::batch_retire
// Dispose nodes
[pVal]( node_type * pNode ) -> mapped_type
{
assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
+ CDS_UNUSED( pNode );
return pVal;
- },
+ },
update_flags::allow_insert
) == update_flags::result_inserted;
}
\p second is \p true if new node has been added or \p false if the node with \p key
already exists.
*/
- template <typename K, typename Func>
+ template <typename K>
std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
{
int result = do_update( key, key_comparator(),
- [pVal]( node_type * ) -> mapped_type
+ [pVal]( node_type * ) -> mapped_type
{
return pVal;
},
- update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
+ update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
);
return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
}
+ //@cond
+ template <typename K>
+ std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
+ {
+ return update( key, pVal, true );
+ }
+
+ //@endcond
+
/// Delete \p key from the map
/**
RCU \p synchronize() method can be called. RCU should not be locked.
bool erase_with( K const& key, Less pred )
{
CDS_UNUSED( pred );
- return do_remove(
- key,
+ return do_remove(
+ key,
cds::opt::details::make_comparator_from_less<Less>(),
[]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
);
The functor \p Func interface:
\code
struct extractor {
- void operator()(mapped_type& item) { ... }
+ void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
};
\endcode
template <typename K, typename Func>
bool erase( K const& key, Func f )
{
- return do_remove(
- key,
- key_comparator(),
- [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
+ return do_remove(
+ key,
+ key_comparator(),
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( *pVal );
- disp.dispose_value(pVal);
+ f( key, *pVal );
+ disp.dispose_value(pVal);
return true;
}
);
bool erase_with( K const& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return do_remove(
- key,
+ return do_remove(
+ key,
cds::opt::details::make_comparator_from_less<Less>(),
- [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
+ [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
assert( pVal );
- f( *pVal );
- disp.dispose_value(pVal);
+ f( key, *pVal );
+ disp.dispose_value(pVal);
return true;
}
);
template <typename K, typename Func>
bool find( K const& key, Func f )
{
- return do_find( key, key_comparator(),
+ return do_find( key, key_comparator(),
[&f]( node_type * pNode ) -> bool {
assert( pNode != nullptr );
mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
bool find_with( K const& key, Less pred, Func f )
{
CDS_UNUSED( pred );
- return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
+ return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
[&f]( node_type * pNode ) -> bool {
assert( pNode != nullptr );
mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
return true;
}
return false;
- }
+ }
);
}
return m_stat;
}
+ /// Returns reference to \p sync_monitor object
+ sync_monitor& monitor()
+ {
+ return m_Monitor;
+ }
+ //@cond
+ sync_monitor const& monitor() const
+ {
+ return m_Monitor;
+ }
+ //@endcond
+
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
void operator()( size_t nLevel, size_t hLeft, size_t hRight );
};
\endcode
- where
+ where
- \p nLevel - the level where the violation is found
- \p hLeft - the height of left subtree
- \p hRight - the height of right subtree
template <typename Func>
bool check_consistency( Func f ) const
{
- node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
+ node_type * pChild = child( m_pRoot, right_child );
if ( pChild ) {
size_t nErrors = 0;
do_check_consistency( pChild, 1, f, nErrors );
size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
{
if ( pNode ) {
- size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
- size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
+ key_comparator cmp;
+ node_type * pLeft = child( pNode, left_child );
+ node_type * pRight = child( pNode, right_child );
+ if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
+ ++nErrors;
+ if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
+ ++nErrors;
+
+ size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
+ size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
if ( hLeft >= hRight ) {
if ( hLeft - hRight > 1 ) {
find_result result;
{
rcu_lock l;
- result = try_find( key, cmp, f, m_pRoot, 1, 0 );
+ result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
}
assert( result != find_result::retry );
return result == find_result::found;
{
rcu_lock l;
- int result = update_flags::failed;
- do {
+ while ( true ) {
+ int result;
+
// get right child of root
node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
if ( pChild ) {
- version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveRootWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
}
+ else
+ result = update_flags::retry;
}
- } while ( result == update_flags::retry );
+ else
+ return;
+
+ if ( result == update_flags::retry )
+ m_stat.onRemoveRetry();
+ }
}
}
);
return pExtracted;
}
-
//@endcond
private:
//@cond
+ static int height( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
+ {
+ assert( pNode );
+ return pNode->m_nHeight.load( order );
+ }
+ static void set_height( node_type * pNode, int h, atomics::memory_order order = memory_model::memory_order_relaxed )
+ {
+ assert( pNode );
+ pNode->m_nHeight.store( h, order );
+ }
+ static int height_null( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
+ {
+ return pNode ? height( pNode, order ) : 0;
+ }
+
template <typename Q, typename Compare, typename Func>
find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
{
assert( pNode );
while ( true ) {
- node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
+ node_type * pChild = child( pNode, nDir );
if ( !pChild ) {
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
- m_stat.onFindRetry();
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
return find_result::retry;
- }
m_stat.onFindFailed();
return find_result::not_found;
m_stat.onFindWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
- m_stat.onFindRetry();
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
return find_result::retry;
- }
}
- else if ( nChildVersion != node_type::unlinked ) {
-
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
- m_stat.onFindRetry();
+ else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
+ {
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
return find_result::retry;
- }
find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
if ( found != find_result::retry )
return found;
}
+
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
+ return find_result::retry;
+
+ m_stat.onFindRetry();
}
}
{
assert( gc::is_locked() );
- int result;
- do {
+ while ( true ) {
+ int result;
+
// get right child of root
node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
if ( pChild ) {
- version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onUpdateRootWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
result = update_flags::retry;
}
- else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
- result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
- }
- }
+ else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
+ result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
+ else
+ result = update_flags::retry;
+ }
else {
// the tree is empty
if ( nFlags & update_flags::allow_insert ) {
{
node_scoped_lock l( m_Monitor, *m_pRoot );
if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
- result = result == update_flags::retry;
+ result = update_flags::retry;
continue;
}
pNew->m_pValue.store( pVal, memory_model::memory_order_release );
m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
- m_pRoot->height( 2, memory_model::memory_order_relaxed );
+ set_height( m_pRoot, 2 );
}
++m_ItemCounter;
return update_flags::failed;
}
- } while ( result == update_flags::retry );
- return result;
+
+ if ( result == update_flags::retry )
+ m_stat.onUpdateRetry();
+ else
+ return result;
+ }
}
template <typename K, typename Compare, typename Func>
{
assert( gc::is_locked() );
- int result;
- do {
+ while ( true ) {
+ int result;
+
// get right child of root
node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
if ( pChild ) {
- version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveRootWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
else
return false;
- } while ( result == update_flags::retry );
- return result == update_flags::result_removed;
+ if ( result == update_flags::retry )
+ m_stat.onRemoveRetry();
+ else
+ return result == update_flags::result_removed;
+ }
}
template <typename K, typename Compare, typename Func>
- int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
+ int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
assert( gc::is_locked() );
assert( nVersion != node_type::unlinked );
- CDS_UNUSED( pParent );
int nCmp = cmp( key, pNode->m_key );
if ( nCmp == 0 ) {
- if ( nFlags & update_flags::allow_update ) {
- return try_update_node( funcUpdate, pNode, disp );
- }
+ if ( nFlags & update_flags::allow_update )
+ return try_update_node( funcUpdate, pNode, nVersion, disp );
return update_flags::failed;
}
- int result;
- do {
- node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
- m_stat.onUpdateRetry();
+ while ( true ) {
+ int result;
+ node_type * pChild = child( pNode, nCmp );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
return update_flags::retry;
- }
if ( pChild == nullptr ) {
// insert new node
}
else {
// update child
- result = update_flags::retry;
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onUpdateWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
// retry
+ result = update_flags::retry;
}
- else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
+ else if ( pChild == child( pNode, nCmp )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
- if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
- m_stat.onUpdateRetry();
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
return update_flags::retry;
- }
// At this point we know that the traversal our parent took to get to node is still valid.
// The recursive implementation will validate the traversal from node to
// child, so just prior to the node nVersion validation both traversals were definitely okay.
// This means that we are no longer vulnerable to node shrinks, and we don't need
// to validate node version any more.
- result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
+ result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
- } while ( result == update_flags::retry );
- return result;
+
+ if ( result == update_flags::retry ) {
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
+ return update_flags::retry;
+ m_stat.onUpdateRetry();
+ }
+ else
+ return result;
+ }
}
template <typename K, typename Compare, typename Func>
if ( nCmp == 0 )
return try_remove_node( pParent, pNode, nVersion, func, disp );
- int result;
- do {
- node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
- m_stat.onRemoveRetry();
+ while ( true ) {
+ int result;
+
+ node_type * pChild = child( pNode, nCmp );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
return update_flags::retry;
- }
- if ( pChild == nullptr ) {
+ if ( pChild == nullptr )
return update_flags::failed;
- }
else {
// update child
result = update_flags::retry;
m_stat.onRemoveWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
// retry
+ result = update_flags::retry;
}
- else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
+ else if ( pChild == child( pNode, nCmp )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
- if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
- m_stat.onRemoveRetry();
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
return update_flags::retry;
- }
// At this point we know that the traversal our parent took to get to node is still valid.
// The recursive implementation will validate the traversal from node to
// to validate node version any more.
result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
- } while ( result == update_flags::retry );
- return result;
+
+ if ( result == update_flags::retry ) {
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
+ return update_flags::retry;
+ m_stat.onRemoveRetry();
+ }
+ else
+ return result;
+ }
}
template <typename Func>
assert( gc::is_locked() );
assert( nVersion != node_type::unlinked );
- int result;
- do {
- node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
- m_stat.onRemoveRetry();
+ while ( true ) {
+ int result;
+ node_type * pChild = child( pNode, nDir );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
return update_flags::retry;
- }
if ( pChild == nullptr ) {
// Found min/max
return try_remove_node( pParent, pNode, nVersion, func, disp );
}
else {
- result = update_flags::retry;
+ //result = update_flags::retry;
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveWaitShrinking();
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
// retry
+ result = update_flags::retry;
}
- else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
+ else if ( pChild == child( pNode, nDir )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
- if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
- m_stat.onRemoveRetry();
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
return update_flags::retry;
- }
// At this point we know that the traversal our parent took to get to node is still valid.
// The recursive implementation will validate the traversal from node to
// to validate node version any more.
result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
}
+ else
+ result = update_flags::retry;
}
- } while ( result == update_flags::retry );
- return result;
+
+ if ( result == update_flags::retry ) {
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
+ return update_flags::retry;
+ m_stat.onRemoveRetry();
+ }
+ else
+ return result;
+ }
}
template <typename K, typename Func>
if ( c_bRelaxedInsert ) {
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
- || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
+ || child( pNode, nDir ) != nullptr )
{
m_stat.onInsertRetry();
return update_flags::retry;
assert( pNode != nullptr );
node_scoped_lock l( m_Monitor, *pNode );
- if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
- || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
+ || child( pNode, nDir ) != nullptr )
{
if ( c_bRelaxedInsert ) {
+ mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
+ pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
+ free_value( pVal );
free_node( pNew );
m_stat.onRelaxedInsertFailed();
}
++m_ItemCounter;
m_stat.onInsertSuccess();
- fix_height_and_rebalance( pDamaged, disp );
+ if ( pDamaged ) {
+ fix_height_and_rebalance( pDamaged, disp );
+ m_stat.onInsertRebalanceRequired();
+ }
return update_flags::result_inserted;
}
template <typename Func>
- int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
+ int try_update_node( Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
mapped_type pOld;
assert( pNode != nullptr );
{
node_scoped_lock l( m_Monitor, *pNode );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
+ return update_flags::retry;
+
if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
m_stat.onUpdateUnlinked();
return update_flags::retry;
if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
return update_flags::failed;
- if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
- || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
- {
+ if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
node_type * pDamaged;
mapped_type pOld;
{
node_scoped_lock lp( m_Monitor, *pParent );
- if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
+ if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode ) != pParent )
return update_flags::retry;
{
node_scoped_lock ln( m_Monitor, *pNode );
pOld = pNode->value( memory_model::memory_order_relaxed );
- if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
- && pOld
+ if ( !( pNode->version( memory_model::memory_order_acquire ) == nVersion
+ && pOld
&& try_unlink_locked( pParent, pNode, disp )))
{
return update_flags::retry;
else
m_stat.onExtractValue();
- fix_height_and_rebalance( pDamaged, disp );
+ if ( pDamaged ) {
+ fix_height_and_rebalance( pDamaged, disp );
+ m_stat.onRemoveRebalanceRequired();
+ }
return update_flags::result_removed;
}
else {
{
node_scoped_lock ln( m_Monitor, *pNode );
pOld = pNode->value( atomics::memory_order_relaxed );
- if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
+ if ( pNode->version( atomics::memory_order_acquire ) == nVersion && pOld ) {
pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
result = update_flags::result_removed;
}
// pParent and pNode must be locked
assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
- node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
- node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
+ node_type * pParentLeft = child( pParent, left_child );
+ node_type * pParentRight = child( pParent, right_child );
if ( pNode != pParentLeft && pNode != pParentRight ) {
// node is no longer a child of parent
return false;
}
assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
- assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
+ assert( pParent == parent( pNode ));
- node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
- node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
+ node_type * pLeft = child( pNode, left_child );
+ node_type * pRight = child( pNode, right_child );
if ( pLeft != nullptr && pRight != nullptr ) {
// splicing is no longer possible
return false;
pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
if ( pSplice )
- pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
+ pSplice->parent( pParent, memory_model::memory_order_relaxed );
// Mark the node as unlinked
pNode->version( node_type::unlinked, memory_model::memory_order_release );
//@cond
int estimate_node_condition( node_type * pNode )
{
- node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
- node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
+ node_type * pLeft = child( pNode, left_child );
+ node_type * pRight = child( pNode, right_child );
if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
return unlink_required;
- int h = pNode->height( memory_model::memory_order_relaxed );
- int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
- int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
+ int h = height( pNode );
+ int hL = height_null( pLeft );
+ int hR = height_null( pRight );
int hNew = 1 + std::max( hL, hR );
int nBalance = hL - hR;
case nothing_required:
return nullptr;
default:
- pNode->height( h, memory_model::memory_order_relaxed );
- return parent( pNode, memory_model::memory_order_relaxed );
+ set_height( pNode, h );
+ return parent( pNode );
}
}
void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
{
- while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
+ while ( pNode && parent( pNode )) {
int nCond = estimate_node_condition( pNode );
if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
return;
if ( nCond != unlink_required && nCond != rebalance_required )
pNode = fix_height( pNode );
else {
- node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
+ node_type * pParent = parent( pNode );
assert( pParent != nullptr );
{
node_scoped_lock lp( m_Monitor, *pParent );
- if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
- && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
- {
+ if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode ) == pParent ) {
node_scoped_lock ln( m_Monitor, *pNode );
pNode = rebalance_locked( pParent, pNode, disp );
}
{
// pParent and pNode should be locked.
// Returns a damaged node, or nullptr if no more rebalancing is necessary
- assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
- || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+ assert( parent( pNode ) == pParent );
- node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
- node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
+ node_type * pLeft = child( pNode, left_child );
+ node_type * pRight = child( pNode, right_child );
if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
if ( try_unlink_locked( pParent, pNode, disp ))
}
}
- int h = pNode->height( memory_model::memory_order_relaxed );
- int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
- int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
+ assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
+
+ int h = height( pNode );
+ int hL = height_null( pLeft );
+ int hR = height_null( pRight );
int hNew = 1 + std::max( hL, hR );
int balance = hL - hR;
else if ( balance < -1 )
return rebalance_to_left_locked( pParent, pNode, pRight, hL );
else if ( hNew != h ) {
- pNode->height( hNew, memory_model::memory_order_relaxed );
+ set_height( pNode, hNew );
// pParent is already locked
return fix_height_locked( pParent );
node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
{
- assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
- || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+ assert( parent( pNode ) == pParent );
+ assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
// pParent and pNode is locked yet
// pNode->pLeft is too large, we will rotate-right.
if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
return pNode; // retry for pNode
- int hL = pLeft->height( memory_model::memory_order_relaxed );
+ int hL = height( pLeft );
if ( hL - hR <= 1 )
return pNode; // retry
- node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
- int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
- node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
- int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
+ node_type * pLRight = child( pLeft, right_child );
+ int hLR = height_null( pLRight );
+ node_type * pLLeft = child( pLeft, left_child );
+ int hLL = height_null( pLLeft );
if ( hLL > hLR ) {
// rotate right
if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
return pNode; // retry
- hLR = pLRight->height( memory_model::memory_order_relaxed );
+ hLR = height( pLRight );
if ( hLL > hLR )
return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
- node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
- int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
+ int hLRL = height_null( child( pLRight, left_child ));
int balance = hLL - hLRL;
if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
// nParent.child.left won't be damaged after a double rotation
node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
{
- assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
- || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+ assert( parent( pNode ) == pParent );
+ assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
// pParent and pNode is locked yet
{
if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
return pNode; // retry for pNode
- int hR = pRight->height( memory_model::memory_order_relaxed );
+ int hR = height( pRight );
if ( hL - hR >= -1 )
return pNode; // retry
- node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
- int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
- node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
- int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
+ node_type * pRLeft = child( pRight, left_child );
+ int hRL = height_null( pRLeft );
+ node_type * pRRight = child( pRight, right_child );
+ int hRR = height_null( pRRight );
if ( hRR > hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
return pNode; // retry
- hRL = pRLeft->height( memory_model::memory_order_relaxed );
+ hRL = height( pRLeft );
if ( hRR >= hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
- node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
- int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
+ node_type * pRLRight = child( pRLeft, right_child );
+ int hRLR = height_null( pRLRight );
int balance = hRR - hRLR;
if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
static void begin_change( node_type * pNode, version_type version )
{
+ assert(pNode->version(memory_model::memory_order_acquire) == version );
+ assert( (version & node_type::shrinking) == 0 );
pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
}
static void end_change( node_type * pNode, version_type version )
node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
+ node_type * pParentLeft = child( pParent, left_child );
begin_change( pNode, nodeVersion );
pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
if ( pLRight != nullptr )
- pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
+ pLRight->parent( pNode, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
- pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
+ pNode->parent( pLeft, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
if ( pParentLeft == pNode )
pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
}
- pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
+ pLeft->parent( pParent, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
// fix up heights links
int hNode = 1 + std::max( hLR, hR );
- pNode->height( hNode, memory_model::memory_order_relaxed );
- pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
+ set_height( pNode, hNode );
+ set_height( pLeft, 1 + std::max( hLL, hNode ));
end_change( pNode, nodeVersion );
m_stat.onRotateRight();
int nodeBalance = hLR - hR;
if ( nodeBalance < -1 || nodeBalance > 1 ) {
// we need another rotation at pNode
+ m_stat.onRotateAfterRightRotation();
return pNode;
}
// extra-routing node damage
if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
// we need to remove pNode and then repair
+ m_stat.onRemoveAfterRightRotation();
return pNode;
}
// we've already fixed the height at pLeft, do we need a rotation here?
int leftBalance = hLL - hNode;
- if ( leftBalance < -1 || leftBalance > 1 )
+ if ( leftBalance < -1 || leftBalance > 1 ) {
+ m_stat.onRotateAfterRightRotation();
return pLeft;
+ }
// pLeft might also have routing node damage (if pLeft.left was null)
- if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
+ if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_relaxed) ) {
+ m_stat.onDamageAfterRightRotation();
return pLeft;
+ }
// try to fix the parent height while we've still got the lock
return fix_height_locked( pParent );
node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
+ node_type * pParentLeft = child( pParent, left_child );
begin_change( pNode, nodeVersion );
// fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
if ( pRLeft != nullptr )
- pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
+ pRLeft->parent( pNode, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
- pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
+ pNode->parent( pRight, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
if ( pParentLeft == pNode )
pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
}
- pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
+ pRight->parent( pParent, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
// fix up heights
int hNode = 1 + std::max( hL, hRL );
- pNode->height( hNode, memory_model::memory_order_relaxed );
- pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
+ set_height( pNode, hNode );
+ set_height( pRight, 1 + std::max( hNode, hRR ));
end_change( pNode, nodeVersion );
m_stat.onRotateLeft();
int nodeBalance = hRL - hL;
- if ( nodeBalance < -1 || nodeBalance > 1 )
+ if ( nodeBalance < -1 || nodeBalance > 1 ) {
+ m_stat.onRotateAfterLeftRotation();
return pNode;
+ }
- if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
+ if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
+ m_stat.onRemoveAfterLeftRotation();
return pNode;
+ }
int rightBalance = hRR - hNode;
- if ( rightBalance < -1 || rightBalance > 1 )
+ if ( rightBalance < -1 || rightBalance > 1 ) {
+ m_stat.onRotateAfterLeftRotation();
return pRight;
+ }
- if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
+ if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_relaxed) ) {
+ m_stat.onDamageAfterLeftRotation();
return pRight;
+ }
return fix_height_locked( pParent );
}
node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
+ version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
- node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
- node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
- node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
- int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
+ node_type * pPL = child( pParent, left_child );
+ node_type * pLRL = child( pLRight, left_child );
+ node_type * pLRR = child( pLRight, right_child );
+ int hLRR = height_null( pLRR );
begin_change( pNode, nodeVersion );
begin_change( pLeft, leftVersion );
// fix up pNode links, careful about the order!
pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
if ( pLRR != nullptr )
- pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
+ pLRR->parent( pNode, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
if ( pLRL != nullptr )
- pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
+ pLRL->parent( pLeft, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
- pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
+ pLeft->parent( pLRight, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
+
pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
- pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
+ pNode->parent( pLRight, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
if ( pPL == pNode )
pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
else {
- assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+ assert( child( pParent, right_child ) == pNode );
pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
}
- pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
+ pLRight->parent( pParent, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
// fix up heights
int hNode = 1 + std::max( hLRR, hR );
- pNode->height( hNode, memory_model::memory_order_relaxed );
+ set_height( pNode, hNode );
int hLeft = 1 + std::max( hLL, hLRL );
- pLeft->height( hLeft, memory_model::memory_order_relaxed );
- pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
+ set_height( pLeft, hLeft );
+ set_height( pLRight, 1 + std::max( hLeft, hNode ));
end_change( pNode, nodeVersion );
end_change( pLeft, leftVersion );
int nodeBalance = hLRR - hR;
if ( nodeBalance < -1 || nodeBalance > 1 ) {
// we need another rotation at pNode
+ m_stat.onRotateAfterRLRotation();
return pNode;
}
// pNode might also be damaged by being an unnecessary routing node
if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
// repair involves splicing out pNode and maybe more rotations
+ m_stat.onRemoveAfterRLRotation();
return pNode;
}
// we've already fixed the height at pLRight, do we need a rotation here?
int balanceLR = hLeft - hNode;
- if ( balanceLR < -1 || balanceLR > 1 )
+ if ( balanceLR < -1 || balanceLR > 1 ) {
+ m_stat.onRotateAfterRLRotation();
return pLRight;
+ }
// try to fix the parent height while we've still got the lock
return fix_height_locked( pParent );
node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
- version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
+ version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
- node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
- node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
- node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
- int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
+ node_type * pPL = child( pParent, left_child );
+ node_type * pRLL = child( pRLeft, left_child );
+ node_type * pRLR = child( pRLeft, right_child );
+ int hRLL = height_null( pRLL );
begin_change( pNode, nodeVersion );
begin_change( pRight, rightVersion );
// fix up pNode links, careful about the order!
pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
if ( pRLL != nullptr )
- pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
+ pRLL->parent( pNode, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
if ( pRLR != nullptr )
- pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
+ pRLR->parent( pRight, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
- pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
+ pRight->parent( pRLeft, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
+
pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
- pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
+ pNode->parent( pRLeft, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
if ( pPL == pNode )
pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
}
- pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
+ pRLeft->parent( pParent, memory_model::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
// fix up heights
int hNode = 1 + std::max( hL, hRLL );
- pNode->height( hNode, memory_model::memory_order_relaxed );
+ set_height( pNode, hNode );
int hRight = 1 + std::max( hRLR, hRR );
- pRight->height( hRight, memory_model::memory_order_relaxed );
- pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
+ set_height( pRight, hRight );
+ set_height( pRLeft, 1 + std::max( hNode, hRight ));
end_change( pNode, nodeVersion );
end_change( pRight, rightVersion );
assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
int nodeBalance = hRLL - hL;
- if ( nodeBalance < -1 || nodeBalance > 1 )
+ if ( nodeBalance < -1 || nodeBalance > 1 ) {
+ m_stat.onRotateAfterLRRotation();
return pNode;
- if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
+ }
+
+ if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
+ m_stat.onRemoveAfterLRRotation();
return pNode;
+ }
int balRL = hRight - hNode;
- if ( balRL < -1 || balRL > 1 )
+ if ( balRL < -1 || balRL > 1 ) {
+ m_stat.onRotateAfterLRRotation();
return pRLeft;
+ }
return fix_height_locked( pParent );
}