disposer()(pVal);
}
- static node_type * child( node_type * pNode, int nDir, atomics::memory_order order = memory_model::memory_order_relaxed )
+ static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
{
- return pNode->child( nDir ).load( order );
+ return pNode->child( nDir, order );
}
- static node_type * parent( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
+ static node_type * parent( node_type * pNode, atomics::memory_order order )
{
return pNode->parent( order );
}
return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
}
- //@cond
- template <typename K>
- std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
- {
- return update( key, pVal, true );
- }
-
//@endcond
/// Delete \p key from the map
);
}
- /// Find the key \p key
+ /// Checks whether the map contains \p key
/**
The function searches the item with key equal to \p key
and returns \p true if it is found, and \p false otherwise.
The function applies RCU lock internally.
*/
template <typename K>
- bool find( K const& key )
+ bool contains( K const& key )
{
return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
}
- /// Finds the key \p val using \p pred predicate for searching
+ /// Checks whether the map contains \p key using \p pred predicate for searching
/**
- The function is an analog of \p find(K const&)
- but \p pred is used for key comparing.
+ The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
\p Less functor has the interface like \p std::less.
- \p Less must imply the same element order as the comparator used for building the map.
+ \p Less must imply the same element order as the comparator used for building the set.
*/
template <typename K, typename Less>
- bool find_with( K const& key, Less pred )
+ bool contains( K const& key, Less pred )
{
CDS_UNUSED( pred );
return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
template <typename Func>
bool check_consistency( Func f ) const
{
- node_type * pChild = child( m_pRoot, right_child );
+ node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
if ( pChild ) {
size_t nErrors = 0;
do_check_consistency( pChild, 1, f, nErrors );
{
if ( pNode ) {
key_comparator cmp;
- node_type * pLeft = child( pNode, left_child );
- node_type * pRight = child( pNode, right_child );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
++nErrors;
if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveRootWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
result = update_flags::retry;
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
if ( result == update_flags::retry )
m_stat.onRemoveRetry();
+ else {
+ m_stat.onExtract( result == update_flags::result_removed );
+ return;
+ }
}
}
}
key_comparator(),
[&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
);
+ m_stat.onExtract( pExtracted != nullptr );
return pExtracted;
}
cds::opt::details::make_comparator_from_less<Less>(),
[&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
);
+ m_stat.onExtract( pExtracted != nullptr );
return pExtracted;
}
//@endcond
private:
//@cond
- static int height( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
+ static int height( node_type * pNode, atomics::memory_order order )
{
assert( pNode );
return pNode->m_nHeight.load( order );
}
- static void set_height( node_type * pNode, int h, atomics::memory_order order = memory_model::memory_order_relaxed )
+ static void set_height( node_type * pNode, int h, atomics::memory_order order )
{
assert( pNode );
pNode->m_nHeight.store( h, order );
}
- static int height_null( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
+ static int height_null( node_type * pNode, atomics::memory_order order )
{
return pNode ? height( pNode, order ) : 0;
}
+ static CDS_CONSTEXPR int const c_stackSize = 64;
+
template <typename Q, typename Compare, typename Func>
find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
{
assert( gc::is_locked() );
assert( pNode );
- while ( true ) {
- node_type * pChild = child( pNode, nDir );
- if ( !pChild ) {
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return find_result::retry;
+ struct stack_record
+ {
+ node_type * pNode;
+ version_type nVersion;
+ int nDir;
+ };
- m_stat.onFindFailed();
- return find_result::not_found;
- }
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
+ stack[0].nDir = nDir;
- int nCmp = cmp( key, pChild->m_key );
- if ( nCmp == 0 ) {
- if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
- // key found
- node_scoped_lock l( m_Monitor, *pChild );
- if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
- if ( f( pChild ) ) {
- m_stat.onFindSuccess();
- return find_result::found;
- }
+ while ( pos >= 0 ) {
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+ nDir = stack[pos].nDir;
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
+ if ( !pChild ) {
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
}
+ m_stat.onFindFailed();
+ return find_result::not_found;
}
- m_stat.onFindFailed();
- return find_result::not_found;
- }
+ int nCmp = cmp( key, pChild->m_key );
+ if ( nCmp == 0 ) {
+ if ( pChild->is_valued( memory_model::memory_order_acquire ) ) {
+ // key found
+ node_scoped_lock l( m_Monitor, *pChild );
+ if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
+ if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
+ if ( f( pChild ) ) {
+ m_stat.onFindSuccess();
+ return find_result::found;
+ }
+ }
+ }
+ else {
+ m_stat.onFindRetry();
+ continue;
+ }
+ }
+ m_stat.onFindFailed();
+ return find_result::not_found;
+ }
+ else {
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onFindWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
- version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
- if ( nChildVersion & node_type::shrinking ) {
- m_stat.onFindWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
+ }
+ else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
+ {
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return find_result::retry;
- }
- else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
- {
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
- return find_result::retry;
+ ++pos;
+ assert(pos < c_stackSize);
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ stack[pos].nDir = nCmp;
+ break; // child iteration
+ }
- find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
- if ( found != find_result::retry )
- return found;
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
+ }
+ m_stat.onFindRetry();
}
-
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return find_result::retry;
-
- m_stat.onFindRetry();
}
+ return find_result::retry;
}
template <typename K, typename Compare, typename Func>
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onUpdateRootWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
result = update_flags::retry;
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
assert( pVal != nullptr );
pNew->m_pValue.store( pVal, memory_model::memory_order_release );
- m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
- set_height( m_pRoot, 2 );
+ m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
+ set_height( m_pRoot, 2, memory_model::memory_order_release );
}
++m_ItemCounter;
return update_flags::failed;
}
- if ( result == update_flags::retry )
- m_stat.onUpdateRetry();
- else
+ if ( result != update_flags::retry )
return result;
}
}
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveRootWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
result = update_flags::retry;
}
else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
if ( result == update_flags::retry )
m_stat.onRemoveRetry();
- else
+ else {
+ m_stat.onRemove( result == update_flags::result_removed );
return result == update_flags::result_removed;
+ }
}
}
assert( gc::is_locked() );
assert( nVersion != node_type::unlinked );
- int nCmp = cmp( key, pNode->m_key );
- if ( nCmp == 0 )
- return try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
+ struct stack_record
+ {
+ node_type * pNode;
+ version_type nVersion;
+ };
- while ( true ) {
- int result;
- node_type * pChild = child( pNode, nCmp );
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
- return update_flags::retry;
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
- if ( pChild == nullptr ) {
- // insert new node
- if ( nFlags & update_flags::allow_insert )
- result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
- else
- result = update_flags::failed;
- }
- else {
- // update child
- version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
- if ( nChildVersion & node_type::shrinking ) {
- m_stat.onUpdateWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
- // retry
- result = update_flags::retry;
- }
- else if ( pChild == child( pNode, nCmp )) {
- // this second read is important, because it is protected by nChildVersion
+ while ( pos >= 0 ) {
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
- // validate the read that our caller took to get to node
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return update_flags::retry;
+ int nCmp = cmp( key, pNode->m_key );
+ if ( nCmp == 0 ) {
+ int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onUpdateRetry();
+ continue;
+ }
- // At this point we know that the traversal our parent took to get to node is still valid.
- // The recursive implementation will validate the traversal from node to
- // child, so just prior to the node nVersion validation both traversals were definitely okay.
- // This means that we are no longer vulnerable to node shrinks, and we don't need
- // to validate node version any more.
- result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
+ while ( true ) {
+ node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onUpdateRetry();
+ break;
}
- else
- result = update_flags::retry;
- }
- if ( result == update_flags::retry ) {
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return update_flags::retry;
- m_stat.onUpdateRetry();
+ if ( pChild == nullptr ) {
+ // insert new node
+ if ( nFlags & update_flags::allow_insert ) {
+ int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onUpdateRetry();
+ break;
+ }
+ else
+ return update_flags::failed;
+ }
+ else {
+ // update child
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onUpdateWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
+ // retry
+ }
+ else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
+ // this second read is important, because it is protected by nChildVersion
+
+ // validate the read that our caller took to get to node
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
+ --pos;
+ m_stat.onUpdateRetry();
+ break; // retry
+ }
+ // At this point we know that the traversal our parent took to get to node is still valid.
+ // The recursive implementation will validate the traversal from node to
+ // child, so just prior to the node nVersion validation both traversals were definitely okay.
+ // This means that we are no longer vulnerable to node shrinks, and we don't need
+ // to validate node version any more.
+ ++pos;
+ assert( pos < c_stackSize );
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ assert( nChildVersion != node_type::unlinked );
+ break; // child iteration
+ }
+ m_stat.onUpdateRetry();
+ }
}
- else
- return result;
}
+ return update_flags::retry;
}
template <typename K, typename Compare, typename Func>
assert( gc::is_locked() );
assert( nVersion != node_type::unlinked );
- int nCmp = cmp( key, pNode->m_key );
- if ( nCmp == 0 )
- return try_remove_node( pParent, pNode, nVersion, func, disp );
+ struct stack_record
+ {
+ node_type * pParent;
+ node_type * pNode;
+ version_type nVersion;
+ };
- while ( true ) {
- int result;
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pParent = pParent;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
- node_type * pChild = child( pNode, nCmp );
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
- return update_flags::retry;
+ while ( pos >= 0 ) {
+ pParent = stack[pos].pParent;
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+
+ int nCmp = cmp( key, pNode->m_key );
+ if ( nCmp == 0 ) {
+ int result = try_remove_node( pParent, pNode, nVersion, func, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onRemoveRetry();
+ continue;
+ }
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ if ( pChild == nullptr )
+ return update_flags::failed;
- if ( pChild == nullptr )
- return update_flags::failed;
- else {
// update child
- result = update_flags::retry;
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
// retry
}
- else if ( pChild == child( pNode, nCmp )) {
+ else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return update_flags::retry;
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
// At this point we know that the traversal our parent took to get to node is still valid.
// The recursive implementation will validate the traversal from node to
// child, so just prior to the node nVersion validation both traversals were definitely okay.
// This means that we are no longer vulnerable to node shrinks, and we don't need
// to validate node version any more.
- result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
+ ++pos;
+ assert( pos < c_stackSize );
+ stack[pos].pParent = pNode;
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ break; // child iteration
}
- else
- result = update_flags::retry;
- }
-
- if ( result == update_flags::retry ) {
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return update_flags::retry;
m_stat.onRemoveRetry();
}
- else
- return result;
}
+ return update_flags::retry;
}
template <typename Func>
assert( gc::is_locked() );
assert( nVersion != node_type::unlinked );
- while ( true ) {
- int result;
- node_type * pChild = child( pNode, nDir );
- if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
- return update_flags::retry;
+ struct stack_record
+ {
+ node_type * pParent;
+ node_type * pNode;
+ version_type nVersion;
+ };
+
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pParent = pParent;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
+
+ while ( pos >= 0 ) {
+ pParent = stack[pos].pParent;
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ if ( pChild == nullptr ) {
+ // Found min/max
+ assert(pNode->is_valued( memory_model::memory_order_acquire ));
+ int result = try_remove_node( pParent, pNode, nVersion, func, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
- if ( pChild == nullptr ) {
- // Found min/max
- return try_remove_node( pParent, pNode, nVersion, func, disp );
- }
- else {
- //result = update_flags::retry;
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
if ( nChildVersion & node_type::shrinking ) {
m_stat.onRemoveWaitShrinking();
- pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
// retry
- result = update_flags::retry;
}
- else if ( pChild == child( pNode, nDir )) {
+ else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return update_flags::retry;
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
// At this point we know that the traversal our parent took to get to node is still valid.
// The recursive implementation will validate the traversal from node to
// child, so just prior to the node nVersion validation both traversals were definitely okay.
// This means that we are no longer vulnerable to node shrinks, and we don't need
// to validate node version any more.
- result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
+ ++pos;
+ assert( pos < c_stackSize );
+ stack[pos].pParent = pNode;
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ break; // child iteration
}
- else
- result = update_flags::retry;
- }
-
- if ( result == update_flags::retry ) {
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
- return update_flags::retry;
m_stat.onRemoveRetry();
}
- else
- return result;
}
+ return update_flags::retry;
}
template <typename K, typename Func>
auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
mapped_type pVal = funcUpdate( pNew );
assert( pVal != nullptr );
- pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
+ pNew->m_pValue.store( pVal, memory_model::memory_order_release );
};
if ( c_bRelaxedInsert ) {
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
- || child( pNode, nDir ) != nullptr )
+ || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
{
m_stat.onInsertRetry();
return update_flags::retry;
node_scoped_lock l( m_Monitor, *pNode );
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
- || child( pNode, nDir ) != nullptr )
+ || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
{
if ( c_bRelaxedInsert ) {
mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
if ( !c_bRelaxedInsert )
fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
- pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
+ pNode->child( pNew, nDir, memory_model::memory_order_release );
pDamaged = fix_height_locked( pNode );
}
if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
return update_flags::retry;
- if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
+ if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
m_stat.onUpdateUnlinked();
return update_flags::retry;
}
pOld = nullptr;
else {
assert( pVal != nullptr );
- pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
+ pNode->m_pValue.store( pVal, memory_model::memory_order_release );
}
}
}
if ( bInserted ) {
+ ++m_ItemCounter;
m_stat.onInsertSuccess();
return update_flags::result_inserted;
}
assert( pParent != nullptr );
assert( pNode != nullptr );
- if ( !pNode->is_valued( memory_model::memory_order_relaxed ) )
+ if ( !pNode->is_valued( memory_model::memory_order_acquire ) )
return update_flags::failed;
- if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
+ if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
+ || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
+ {
// pNode can be replaced with its child
node_type * pDamaged;
mapped_type pOld;
{
node_scoped_lock lp( m_Monitor, *pParent );
- if ( pParent->is_unlinked( memory_model::memory_order_relaxed ) || parent( pNode ) != pParent )
+ if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
return update_flags::retry;
{
{
node_scoped_lock ln( m_Monitor, *pNode );
pOld = pNode->value( memory_model::memory_order_relaxed );
- if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
+ if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
return update_flags::retry;
if ( !pOld )
return update_flags::failed;
- pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
+ pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
+ m_stat.onMakeRoutingNode();
}
--m_ItemCounter;
// pParent and pNode must be locked
assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
- node_type * pParentLeft = child( pParent, left_child );
- node_type * pParentRight = child( pParent, right_child );
+ node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
+ node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
if ( pNode != pParentLeft && pNode != pParentRight ) {
// node is no longer a child of parent
return false;
}
- assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
- assert( pParent == parent( pNode ));
+ assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
+ assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
- node_type * pLeft = child( pNode, left_child );
- node_type * pRight = child( pNode, right_child );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
if ( pLeft != nullptr && pRight != nullptr ) {
// splicing is no longer possible
return false;
node_type * pSplice = pLeft ? pLeft : pRight;
if ( pParentLeft == pNode )
- pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
+ pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
else
- pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
+ pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
if ( pSplice )
- pSplice->parent( pParent, memory_model::memory_order_relaxed );
+ pSplice->parent( pParent, memory_model::memory_order_release );
// Mark the node as unlinked
pNode->version( node_type::unlinked, memory_model::memory_order_release );
// The value will be disposed by calling function
- pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
+ pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
disp.dispose( pNode );
m_stat.onDisposeNode();
//@cond
int estimate_node_condition( node_type * pNode )
{
- node_type * pLeft = child( pNode, left_child );
- node_type * pRight = child( pNode, right_child );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
- if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
+ if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
return unlink_required;
- int h = height( pNode );
- int hL = height_null( pLeft );
- int hR = height_null( pRight );
+ int h = height( pNode, memory_model::memory_order_acquire );
+ int hL = height_null( pLeft, memory_model::memory_order_acquire );
+ int hR = height_null( pRight, memory_model::memory_order_acquire );
int hNew = 1 + std::max( hL, hR );
int nBalance = hL - hR;
case nothing_required:
return nullptr;
default:
- set_height( pNode, h );
- return parent( pNode );
+ set_height( pNode, h, memory_model::memory_order_release );
+ return parent( pNode, memory_model::memory_order_relaxed );
}
}
void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
{
- while ( pNode && parent( pNode )) {
+ while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
int nCond = estimate_node_condition( pNode );
- if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
+ if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ) )
return;
if ( nCond != unlink_required && nCond != rebalance_required )
pNode = fix_height( pNode );
else {
- node_type * pParent = parent( pNode );
+ node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
assert( pParent != nullptr );
{
node_scoped_lock lp( m_Monitor, *pParent );
- if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode ) == pParent ) {
+ if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
node_scoped_lock ln( m_Monitor, *pNode );
pNode = rebalance_locked( pParent, pNode, disp );
}
{
// pParent and pNode should be locked.
// Returns a damaged node, or nullptr if no more rebalancing is necessary
- assert( parent( pNode ) == pParent );
+ assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
- node_type * pLeft = child( pNode, left_child );
- node_type * pRight = child( pNode, right_child );
+ node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
+ node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
if ( try_unlink_locked( pParent, pNode, disp ))
}
}
- assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
- int h = height( pNode );
- int hL = height_null( pLeft );
- int hR = height_null( pRight );
+ int h = height( pNode, memory_model::memory_order_acquire );
+ int hL = height_null( pLeft, memory_model::memory_order_acquire );
+ int hR = height_null( pRight, memory_model::memory_order_acquire );
int hNew = 1 + std::max( hL, hR );
int balance = hL - hR;
else if ( balance < -1 )
return rebalance_to_left_locked( pParent, pNode, pRight, hL );
else if ( hNew != h ) {
- set_height( pNode, hNew );
+ set_height( pNode, hNew, memory_model::memory_order_release );
// pParent is already locked
return fix_height_locked( pParent );
node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
{
- assert( parent( pNode ) == pParent );
- assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
+ assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
// pNode->pLeft is too large, we will rotate-right.
if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
return pNode; // retry for pNode
- int hL = height( pLeft );
+ int hL = height( pLeft, memory_model::memory_order_acquire );
if ( hL - hR <= 1 )
return pNode; // retry
- node_type * pLRight = child( pLeft, right_child );
- int hLR = height_null( pLRight );
- node_type * pLLeft = child( pLeft, left_child );
- int hLL = height_null( pLLeft );
+ node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
+ int hLR = height_null( pLRight, memory_model::memory_order_acquire );
+ node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
+ int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
if ( hLL > hLR ) {
// rotate right
assert( pLRight != nullptr );
{
node_scoped_lock lr( m_Monitor, *pLRight );
- if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
+ if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
return pNode; // retry
- hLR = height( pLRight );
+ hLR = height( pLRight, memory_model::memory_order_acquire );
if ( hLL > hLR )
return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
- int hLRL = height_null( child( pLRight, left_child ));
+ int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
int balance = hLL - hLRL;
if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
// nParent.child.left won't be damaged after a double rotation
node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
{
- assert( parent( pNode ) == pParent );
- assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
+ assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
+ assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
+ || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
{
if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
return pNode; // retry for pNode
- int hR = height( pRight );
+ int hR = height( pRight, memory_model::memory_order_acquire );
if ( hL - hR >= -1 )
return pNode; // retry
- node_type * pRLeft = child( pRight, left_child );
- int hRL = height_null( pRLeft );
- node_type * pRRight = child( pRight, right_child );
- int hRR = height_null( pRRight );
+ node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
+ int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
+ node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
+ int hRR = height_null( pRRight, memory_model::memory_order_acquire );
if ( hRR > hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
{
assert( pRLeft != nullptr );
node_scoped_lock lrl( m_Monitor, *pRLeft );
- if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
+ if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
return pNode; // retry
- hRL = height( pRLeft );
+ hRL = height( pRLeft, memory_model::memory_order_acquire );
if ( hRR >= hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
- node_type * pRLRight = child( pRLeft, right_child );
- int hRLR = height_null( pRLRight );
+ node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
+ int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
int balance = hRR - hRLR;
if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
- node_type * pParentLeft = child( pParent, left_child );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
+ node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
begin_change( pNode, nodeVersion );
- pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
+ pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
if ( pLRight != nullptr )
- pLRight->parent( pNode, memory_model::memory_order_relaxed );
+ pLRight->parent( pNode, memory_model::memory_order_release );
atomics::atomic_thread_fence( memory_model::memory_order_release );
- pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
- pNode->parent( pLeft, memory_model::memory_order_relaxed );
+ pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
+ pNode->parent( pLeft, memory_model::memory_order_release );
atomics::atomic_thread_fence( memory_model::memory_order_release );
if ( pParentLeft == pNode )
- pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
+ pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
else {
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
+ pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
}
- pLeft->parent( pParent, memory_model::memory_order_relaxed );
+ pLeft->parent( pParent, memory_model::memory_order_release );
atomics::atomic_thread_fence( memory_model::memory_order_release );
// fix up heights links
int hNode = 1 + std::max( hLR, hR );
- set_height( pNode, hNode );
- set_height( pLeft, 1 + std::max( hLL, hNode ));
+ set_height( pNode, hNode, memory_model::memory_order_release );
+ set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
end_change( pNode, nodeVersion );
m_stat.onRotateRight();
}
// pLeft might also have routing node damage (if pLeft.left was null)
- if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_relaxed) ) {
+ if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire) ) {
m_stat.onDamageAfterRightRotation();
return pLeft;
}
node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
- node_type * pParentLeft = child( pParent, left_child );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
+ node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
begin_change( pNode, nodeVersion );
// fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
- pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
+ pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
if ( pRLeft != nullptr )
- pRLeft->parent( pNode, memory_model::memory_order_relaxed );
+ pRLeft->parent( pNode, memory_model::memory_order_release );
atomics::atomic_thread_fence( memory_model::memory_order_release );
- pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
- pNode->parent( pRight, memory_model::memory_order_relaxed );
+ pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
+ pNode->parent( pRight, memory_model::memory_order_release );
atomics::atomic_thread_fence( memory_model::memory_order_release );
if ( pParentLeft == pNode )
- pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
+ pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
else {
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
+ pParent->m_pRight.store( pRight, memory_model::memory_order_release );
}
- pRight->parent( pParent, memory_model::memory_order_relaxed );
+ pRight->parent( pParent, memory_model::memory_order_release );
atomics::atomic_thread_fence( memory_model::memory_order_release );
// fix up heights
int hNode = 1 + std::max( hL, hRL );
- set_height( pNode, hNode );
- set_height( pRight, 1 + std::max( hNode, hRR ));
+ set_height( pNode, hNode, memory_model::memory_order_release );
+ set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
end_change( pNode, nodeVersion );
m_stat.onRotateLeft();
return pRight;
}
- if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_relaxed) ) {
+ if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire) ) {
m_stat.onDamageAfterLeftRotation();
return pRight;
}
node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
- node_type * pPL = child( pParent, left_child );
- node_type * pLRL = child( pLRight, left_child );
- node_type * pLRR = child( pLRight, right_child );
- int hLRR = height_null( pLRR );
+ node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
+ node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
+ node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
+ int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
begin_change( pNode, nodeVersion );
begin_change( pLeft, leftVersion );
// fix up pNode links, careful about the order!
- pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
+ pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
if ( pLRR != nullptr )
- pLRR->parent( pNode, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pLRR->parent( pNode, memory_model::memory_order_release );
- pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
+ pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
if ( pLRL != nullptr )
- pLRL->parent( pLeft, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pLRL->parent( pLeft, memory_model::memory_order_release );
- pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
- pLeft->parent( pLRight, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
+ pLeft->parent( pLRight, memory_model::memory_order_release );
- pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
- pNode->parent( pLRight, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
+ pNode->parent( pLRight, memory_model::memory_order_release );
if ( pPL == pNode )
- pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
+ pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
else {
- assert( child( pParent, right_child ) == pNode );
- pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
+ assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
+ pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
}
- pLRight->parent( pParent, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pLRight->parent( pParent, memory_model::memory_order_release );
// fix up heights
int hNode = 1 + std::max( hLRR, hR );
- set_height( pNode, hNode );
+ set_height( pNode, hNode, memory_model::memory_order_release );
int hLeft = 1 + std::max( hLL, hLRL );
- set_height( pLeft, hLeft );
- set_height( pLRight, 1 + std::max( hLeft, hNode ));
+ set_height( pLeft, hLeft, memory_model::memory_order_release );
+ set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
end_change( pNode, nodeVersion );
end_change( pLeft, leftVersion );
// caller should have performed only a single rotation if pLeft was going
// to end up damaged
assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
- assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
+ assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
// We have damaged pParent, pLR (now parent.child), and pNode (now
// parent.child.right). pNode is the deepest. Perform as many fixes as we
node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
{
- version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
+ version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
- node_type * pPL = child( pParent, left_child );
- node_type * pRLL = child( pRLeft, left_child );
- node_type * pRLR = child( pRLeft, right_child );
- int hRLL = height_null( pRLL );
+ node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
+ node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
+ node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
+ int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
begin_change( pNode, nodeVersion );
begin_change( pRight, rightVersion );
// fix up pNode links, careful about the order!
- pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
+ pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
if ( pRLL != nullptr )
- pRLL->parent( pNode, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pRLL->parent( pNode, memory_model::memory_order_release );
- pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
+ pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
if ( pRLR != nullptr )
- pRLR->parent( pRight, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pRLR->parent( pRight, memory_model::memory_order_release );
- pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
- pRight->parent( pRLeft, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
+ pRight->parent( pRLeft, memory_model::memory_order_release );
- pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
- pNode->parent( pRLeft, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
+ pNode->parent( pRLeft, memory_model::memory_order_release );
if ( pPL == pNode )
- pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
+ pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
else {
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
+ pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
}
- pRLeft->parent( pParent, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ pRLeft->parent( pParent, memory_model::memory_order_release );
// fix up heights
int hNode = 1 + std::max( hL, hRLL );
- set_height( pNode, hNode );
+ set_height( pNode, hNode, memory_model::memory_order_release );
int hRight = 1 + std::max( hRLR, hRR );
- set_height( pRight, hRight );
- set_height( pRLeft, 1 + std::max( hNode, hRight ));
+ set_height( pRight, hRight, memory_model::memory_order_release );
+ set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
end_change( pNode, nodeVersion );
end_change( pRight, rightVersion );