/*
This file is a part of libcds - Concurrent Data Structures library
- (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
Source code repo: http://github.com/khizmax/libcds/
Download: http://sourceforge.net/projects/libcds/files/
void clean()
{
- assert( !gc::is_locked() );
+ assert( !gc::is_locked());
// TODO: use RCU::batch_retire
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
It means that the function gets rightmost leaf of the tree and tries to unlink it.
- During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
+ During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
So, the function returns the item with maximum key at the moment of tree traversing.
RCU \p synchronize method can be called. RCU should NOT be locked.
@note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
It means that the function gets rightmost leaf of the tree and tries to unlink it.
- During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
+ During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key.
So, the function returns the item with maximum key at the moment of tree traversing.
RCU \p synchronize method can be called. RCU should NOT be locked.
this sequence
\code
set.clear();
- assert( set.empty() );
+ assert( set.empty());
\endcode
the assertion could be raised.
*/
void clear()
{
- while ( extract_min() );
+ while ( extract_min());
}
/// Clears the tree (not thread safe)
template <typename Q, typename Compare, typename Func>
find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
assert( pNode );
struct stack_record
int nCmp = cmp( key, pChild->m_key );
if ( nCmp == 0 ) {
- if ( pChild->is_valued( memory_model::memory_order_acquire ) ) {
+ if ( pChild->is_valued( memory_model::memory_order_acquire )) {
// key found
node_scoped_lock l( m_Monitor, *pChild );
if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
- if ( f( pChild ) ) {
+ if ( f( pChild )) {
m_stat.onFindSuccess();
return find_result::found;
}
template <typename K, typename Compare, typename Func>
int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
while ( true ) {
int result;
template <typename K, typename Compare, typename Func>
bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
while ( true ) {
int result;
template <typename K, typename Compare, typename Func>
int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
assert( nVersion != node_type::unlinked );
struct stack_record
template <typename K, typename Compare, typename Func>
int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
assert( nVersion != node_type::unlinked );
struct stack_record
template <typename Func>
int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
- assert( gc::is_locked() );
+ assert( gc::is_locked());
assert( nVersion != node_type::unlinked );
struct stack_record
nVersion = stack[pos].nVersion;
while ( true ) {
- node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
+ int iterDir = nDir;
+ node_type * pChild = child( pNode, iterDir, memory_model::memory_order_acquire );
if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
--pos;
m_stat.onRemoveRetry();
break;
}
- if ( pChild == nullptr ) {
+ if ( !pChild ) {
// Found min/max
- assert(pNode->is_valued( memory_model::memory_order_acquire ));
- int result = try_remove_node( pParent, pNode, nVersion, func, disp );
- if ( result != update_flags::retry )
- return result;
- --pos;
- m_stat.onRemoveRetry();
- break;
+ if ( pNode->is_valued( memory_model::memory_order_acquire )) {
+ int result = try_remove_node( pParent, pNode, nVersion, func, disp );
+
+ if ( result == update_flags::result_removed )
+ return result;
+
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+ else {
+ // check right (for min) or left (for max) child node
+ iterDir = -iterDir;
+ pChild = child( pNode, iterDir, memory_model::memory_order_acquire );
+ if ( !pChild ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+ }
}
version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
// retry
}
- else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
+ else if ( pChild == child( pNode, iterDir, memory_model::memory_order_acquire )) {
// this second read is important, because it is protected by nChildVersion
// validate the read that our caller took to get to node
pNew->m_pValue.store( pVal, memory_model::memory_order_release );
};
- if ( c_bRelaxedInsert ) {
+ static_if ( c_bRelaxedInsert ) {
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
|| child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
{
if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
|| child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
{
- if ( c_bRelaxedInsert ) {
+ static_if ( c_bRelaxedInsert ) {
mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
free_value( pVal );
return update_flags::retry;
}
- if ( !c_bRelaxedInsert )
+ static_if ( !c_bRelaxedInsert )
fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
pNode->child( pNew, nDir, memory_model::memory_order_release );
return update_flags::retry;
}
- if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
+ if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update)) {
m_stat.onInsertFailed();
return update_flags::failed;
}
assert( pParent != nullptr );
assert( pNode != nullptr );
- if ( !pNode->is_valued( memory_model::memory_order_acquire ) )
+ if ( !pNode->is_valued( memory_model::memory_order_acquire ))
return update_flags::failed;
if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
{
// pParent and pNode must be locked
- assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
+ assert( !pParent->is_unlinked(memory_model::memory_order_relaxed));
node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
private: // rotations
//@cond
+ int check_node_ordering( node_type* pParent, node_type* pChild )
+ {
+ return key_comparator()( pParent->m_key, pChild->m_key );
+ }
+
int estimate_node_condition( node_type * pNode )
{
node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
{
while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
int nCond = estimate_node_condition( pNode );
- if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ) )
+ if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ))
return;
if ( nCond != unlink_required && nCond != rebalance_required )
// pNode->pLeft is too large, we will rotate-right.
// If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
- {
- assert( pLeft != nullptr );
- node_scoped_lock l( m_Monitor, *pLeft );
- if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
- return pNode; // retry for pNode
-
- int hL = height( pLeft, memory_model::memory_order_acquire );
- if ( hL - hR <= 1 )
- return pNode; // retry
-
- node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
- int hLR = height_null( pLRight, memory_model::memory_order_acquire );
- node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
- int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
-
- if ( hLL > hLR ) {
- // rotate right
- return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
- }
- else {
- if ( pLRight ) {
- node_scoped_lock lr( m_Monitor, *pLRight );
- if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
- return pNode; // retry
-
- hLR = height( pLRight, memory_model::memory_order_acquire );
- if ( hLL > hLR )
- return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
-
- int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
- int balance = hLL - hLRL;
- if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
- // nParent.child.left won't be damaged after a double rotation
- return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
- }
- }
- else
+ assert( pLeft != nullptr );
+ node_scoped_lock l( m_Monitor, *pLeft );
+ if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
+ return pNode; // retry for pNode
+
+ assert( check_node_ordering( pNode, pLeft ) > 0 );
+
+ int hL = height( pLeft, memory_model::memory_order_acquire );
+ if ( hL - hR <= 1 )
+ return pNode; // retry
+
+ node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
+ int hLR = height_null( pLRight, memory_model::memory_order_acquire );
+ node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
+ int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
+
+ if ( pLRight ) {
+ {
+ node_scoped_lock lr( m_Monitor, *pLRight );
+ if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
return pNode; // retry
- // focus on pLeft, if necessary pNode will be balanced later
- return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
+ assert( check_node_ordering( pLeft, pLRight ) < 0 );
+
+ hLR = height( pLRight, memory_model::memory_order_acquire );
+ if ( hLL > hLR )
+ return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
+
+ int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire );
+ int balance = hLL - hLRL;
+ if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed ))) {
+ // nParent.child.left won't be damaged after a double rotation
+ return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
+ }
}
+
+ // focus on pLeft, if necessary pNode will be balanced later
+ return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
}
+ else if ( hLL > hLR ) {
+ // rotate right
+ return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
+ }
+
+ return pNode; // retry
}
node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
|| child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
// pParent and pNode is locked yet
- {
- assert( pRight != nullptr );
- node_scoped_lock l( m_Monitor, *pRight );
- if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
- return pNode; // retry for pNode
-
- int hR = height( pRight, memory_model::memory_order_acquire );
- if ( hL - hR >= -1 )
- return pNode; // retry
-
- node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
- int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
- node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
- int hRR = height_null( pRRight, memory_model::memory_order_acquire );
- if ( hRR > hRL )
- return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
-
- if ( pRLeft ) {
+ assert( pRight != nullptr );
+ node_scoped_lock l( m_Monitor, *pRight );
+ if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
+ return pNode; // retry for pNode
+
+ assert( check_node_ordering( pNode, pRight ) < 0 );
+
+ int hR = height( pRight, memory_model::memory_order_acquire );
+ if ( hL - hR >= -1 )
+ return pNode; // retry
+
+ node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
+ int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
+ node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
+ int hRR = height_null( pRRight, memory_model::memory_order_acquire );
+
+ if ( pRLeft ) {
+ {
node_scoped_lock lrl( m_Monitor, *pRLeft );
if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
return pNode; // retry
+ assert( check_node_ordering( pRight, pRLeft ) > 0 );
+
hRL = height( pRLeft, memory_model::memory_order_acquire );
if ( hRR >= hRL )
return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
int balance = hRR - hRLR;
- if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
- return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
+ if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed )))
+ return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
}
- else
- return pNode; // retry
+
return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
}
+ else if ( hRR > hRL )
+ return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
+
+ return pNode; // retry
}
static void begin_change( node_type * pNode, version_type version )
{
assert(pNode->version(memory_model::memory_order_acquire) == version );
assert( (version & node_type::shrinking) == 0 );
- pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
+ pNode->exchange_version( version | node_type::shrinking, memory_model::memory_order_acquire );
}
static void end_change( node_type * pNode, version_type version )
{
begin_change( pNode, nodeVersion );
pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
- if ( pLRight != nullptr )
- pLRight->parent( pNode, memory_model::memory_order_release );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ if ( pLRight != nullptr ) {
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent );
+ pLRight->parent( pNode, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pNode, pLRight ) > 0 );
+ }
- pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
- pNode->parent( pLeft, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight );
+ pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
- if ( pParentLeft == pNode )
- pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+ pNode->parent( pLeft, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pLeft, pNode ) < 0 );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ if ( pParentLeft == pNode ) {
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+ pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
+ }
else {
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+ pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
}
- pLeft->parent( pParent, memory_model::memory_order_release );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent );
+ pLeft->parent( pParent, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
// fix up heights links
int hNode = 1 + std::max( hLR, hR );
}
// pLeft might also have routing node damage (if pLeft.left was null)
- if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire) ) {
+ if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire)) {
m_stat.onDamageAfterRightRotation();
return pLeft;
}
// fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
- if ( pRLeft != nullptr )
- pRLeft->parent( pNode, memory_model::memory_order_release );
-
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ if ( pRLeft != nullptr ) {
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent );
+ pRLeft->parent( pNode, memory_model::memory_order_relaxed );
+ }
- pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
- pNode->parent( pRight, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft );
+ pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+ pNode->parent( pRight, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pRight, pNode ) > 0 );
- if ( pParentLeft == pNode )
- pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ if ( pParentLeft == pNode ) {
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+ pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
+ }
else {
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pRight, memory_model::memory_order_release );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+ pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
}
- pRight->parent( pParent, memory_model::memory_order_release );
- atomics::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent );
+ pRight->parent( pParent, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
// fix up heights
int hNode = 1 + std::max( hL, hRL );
return pNode;
}
- if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
+ if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
m_stat.onRemoveAfterLeftRotation();
return pNode;
}
return pRight;
}
- if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire) ) {
+ if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire)) {
m_stat.onDamageAfterLeftRotation();
return pRight;
}
// fix up pNode links, careful about the order!
pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
- if ( pLRR != nullptr )
- pLRR->parent( pNode, memory_model::memory_order_release );
+ if ( pLRR != nullptr ) {
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRR->m_pParent );
+ pLRR->parent( pNode, memory_model::memory_order_relaxed );
+ }
- pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
- if ( pLRL != nullptr )
- pLRL->parent( pLeft, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight );
+ pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
+
+ if ( pLRL != nullptr ) {
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRL->m_pParent );
+ pLRL->parent( pLeft, memory_model::memory_order_relaxed );
+ }
- pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
- pLeft->parent( pLRight, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pLeft );
+ pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
- pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
- pNode->parent( pLRight, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent );
+ pLeft->parent( pLRight, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pLRight, pLeft ) > 0 );
- if ( pPL == pNode )
- pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pRight );
+ pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+ pNode->parent( pLRight, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pLRight, pNode ) < 0 );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ if ( pPL == pNode ) {
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+ pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
+ }
else {
assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+ pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
}
- pLRight->parent( pParent, memory_model::memory_order_release );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent );
+ pLRight->parent( pParent, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
// fix up heights
int hNode = 1 + std::max( hLRR, hR );
// fix up pNode links, careful about the order!
pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
- if ( pRLL != nullptr )
- pRLL->parent( pNode, memory_model::memory_order_release );
+ if ( pRLL != nullptr ) {
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLL->m_pParent );
+ pRLL->parent( pNode, memory_model::memory_order_relaxed );
+ }
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft );
+ pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
+
+ if ( pRLR != nullptr ) {
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLR->m_pParent );
+ pRLR->parent( pRight, memory_model::memory_order_relaxed );
+ }
- pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
- if ( pRLR != nullptr )
- pRLR->parent( pRight, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pRight );
+ pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
- pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
- pRight->parent( pRLeft, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent );
+ pRight->parent( pRLeft, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pRLeft, pRight ) < 0 );
- pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
- pNode->parent( pRLeft, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pLeft );
+ pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
- if ( pPL == pNode )
- pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent );
+ pNode->parent( pRLeft, memory_model::memory_order_relaxed );
+ assert( check_node_ordering( pRLeft, pNode ) > 0 );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ if ( pPL == pNode ) {
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft );
+ pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
+ }
else {
assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
- pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight );
+ pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
}
- pRLeft->parent( pParent, memory_model::memory_order_release );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
+ CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent );
+ pRLeft->parent( pParent, memory_model::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( memory_model::memory_order_acq_rel );
// fix up heights
int hNode = 1 + std::max( hL, hRLL );
return pNode;
}
- if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
+ if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
m_stat.onRemoveAfterLRRotation();
return pNode;
}