From f6bdcbbed87947f92d5ee61045c23859c2717342 Mon Sep 17 00:00:00 2001 From: khizmax Date: Sun, 5 Apr 2015 12:49:18 +0300 Subject: [PATCH] BronsonAVLTreeMap: removed recursion --- cds/container/impl/bronson_avltree_map_rcu.h | 361 ++++++++++++++++++- 1 file changed, 357 insertions(+), 4 deletions(-) diff --git a/cds/container/impl/bronson_avltree_map_rcu.h b/cds/container/impl/bronson_avltree_map_rcu.h index 8524ddfc..c623c467 100644 --- a/cds/container/impl/bronson_avltree_map_rcu.h +++ b/cds/container/impl/bronson_avltree_map_rcu.h @@ -884,6 +884,8 @@ namespace cds { namespace container { if ( result == update_flags::retry ) m_stat.onRemoveRetry(); + else + return; } } } @@ -931,6 +933,106 @@ namespace cds { namespace container { return pNode ? height( pNode, order ) : 0; } + static CDS_CONSTEXPR int const c_stackSize = 64; + + template + find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const + { + assert( gc::is_locked() ); + assert( pNode ); + + struct stack_record + { + node_type * pNode; + version_type nVersion; + int nDir; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + stack[0].nDir = nDir; + + while ( pos >= 0 ) { + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + nDir = stack[pos].nDir; + + while ( true ) { + node_type * pChild = child( pNode, nDir ); + if ( !pChild ) { + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + m_stat.onFindFailed(); + return find_result::not_found; + } + + int nCmp = cmp( key, pChild->m_key ); + if ( nCmp == 0 ) { + if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) { + // key found + node_scoped_lock l( m_Monitor, *pChild ); + if ( child(pNode, nDir) == pChild ) { + if ( pChild->is_valued( memory_model::memory_order_relaxed )) { + if ( f( pChild ) ) { + m_stat.onFindSuccess(); + return find_result::found; + } + } + } + else { + m_stat.onFindRetry(); + continue; + } + } + m_stat.onFindFailed(); + return find_result::not_found; + } + else { + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onFindWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_relaxed ); + + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + } + else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild ) + { + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + + ++pos; + assert(pos < c_stackSize); + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + stack[pos].nDir = nCmp; + break; // child iteration + } + + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + } + m_stat.onFindRetry(); + } + } + return find_result::retry; + } + +#if 0 template find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const { @@ -988,6 +1090,7 @@ namespace cds { namespace container { m_stat.onFindRetry(); } } +#endif template int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp ) @@ -1039,9 +1142,7 @@ namespace cds { namespace container { return update_flags::failed; } - if ( result == update_flags::retry ) - m_stat.onUpdateRetry(); - else + if ( result != update_flags::retry ) return result; } } @@ -1079,6 +1180,95 @@ namespace cds { namespace container { } } + template + int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + assert( gc::is_locked() ); + assert( nVersion != node_type::unlinked ); + + struct stack_record + { + node_type * pNode; + version_type nVersion; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + + while ( pos >= 0 ) { + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + + int nCmp = cmp( key, pNode->m_key ); + if ( nCmp == 0 ) { + int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onUpdateRetry(); + continue; + } + + while ( true ) { + node_type * pChild = child( pNode, nCmp ); + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onUpdateRetry(); + break; + } + + if ( pChild == nullptr ) { + // insert new node + if ( nFlags & update_flags::allow_insert ) { + int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onUpdateRetry(); + break; + } + else + return update_flags::failed; + } + else { + // update child + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onUpdateWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_relaxed ); + // retry + } + else if ( pChild == child( pNode, nCmp )) { + // this second read is important, because it is protected by nChildVersion + + // validate the read that our caller took to get to node + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) { + --pos; + m_stat.onUpdateRetry(); + break; // retry + } + // At this point we know that the traversal our parent took to get to node is still valid. + // The recursive implementation will validate the traversal from node to + // child, so just prior to the node nVersion validation both traversals were definitely okay. + // This means that we are no longer vulnerable to node shrinks, and we don't need + // to validate node version any more. + ++pos; + assert( pos < c_stackSize ); + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + assert( nChildVersion != node_type::unlinked ); + break; // child iteration + } + m_stat.onUpdateRetry(); + } + } + } + return update_flags::retry; + } + +#if 0 template int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { @@ -1138,7 +1328,89 @@ namespace cds { namespace container { return result; } } +#endif + + template + int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + assert( gc::is_locked() ); + assert( nVersion != node_type::unlinked ); + + struct stack_record + { + node_type * pParent; + node_type * pNode; + version_type nVersion; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pParent = pParent; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + + while ( pos >= 0 ) { + pParent = stack[pos].pParent; + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + + int nCmp = cmp( key, pNode->m_key ); + if ( nCmp == 0 ) { + int result = try_remove_node( pParent, pNode, nVersion, func, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onRemoveRetry(); + continue; + } + + while ( true ) { + node_type * pChild = child( pNode, nCmp ); + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + if ( pChild == nullptr ) + return update_flags::failed; + + // update child + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onRemoveWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_relaxed ); + // retry + } + else if ( pChild == child( pNode, nCmp )) { + // this second read is important, because it is protected by nChildVersion + + // validate the read that our caller took to get to node + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + // At this point we know that the traversal our parent took to get to node is still valid. + // The recursive implementation will validate the traversal from node to + // child, so just prior to the node nVersion validation both traversals were definitely okay. + // This means that we are no longer vulnerable to node shrinks, and we don't need + // to validate node version any more. + ++pos; + assert( pos < c_stackSize ); + stack[pos].pParent = pNode; + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + break; // child iteration + } + m_stat.onRemoveRetry(); + } + } + return update_flags::retry; + } +#if 0 template int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { @@ -1194,7 +1466,86 @@ namespace cds { namespace container { return result; } } +#endif + template + int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + assert( gc::is_locked() ); + assert( nVersion != node_type::unlinked ); + + struct stack_record + { + node_type * pParent; + node_type * pNode; + version_type nVersion; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pParent = pParent; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + + while ( pos >= 0 ) { + pParent = stack[pos].pParent; + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + + while ( true ) { + node_type * pChild = child( pNode, nDir ); + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + if ( pChild == nullptr ) { + // Found min/max + assert(pNode->is_valued( memory_model::memory_order_relaxed )); + int result = try_remove_node( pParent, pNode, nVersion, func, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onRemoveRetry(); + break; + } + + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onRemoveWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_relaxed ); + // retry + } + else if ( pChild == child( pNode, nDir )) { + // this second read is important, because it is protected by nChildVersion + + // validate the read that our caller took to get to node + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + // At this point we know that the traversal our parent took to get to node is still valid. + // The recursive implementation will validate the traversal from node to + // child, so just prior to the node nVersion validation both traversals were definitely okay. + // This means that we are no longer vulnerable to node shrinks, and we don't need + // to validate node version any more. + ++pos; + assert( pos < c_stackSize ); + stack[pos].pParent = pNode; + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + break; // child iteration + } + m_stat.onRemoveRetry(); + } + } + return update_flags::retry; + } + +#if 0 template int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) { @@ -1209,6 +1560,7 @@ namespace cds { namespace container { if ( pChild == nullptr ) { // Found min/max + assert(pNode->is_valued( memory_model::memory_order_relaxed )); return try_remove_node( pParent, pNode, nVersion, func, disp ); } else { @@ -1247,6 +1599,7 @@ namespace cds { namespace container { return result; } } +#endif template int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp ) @@ -1436,7 +1789,7 @@ namespace cds { namespace container { return false; } - assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) ); + assert( !pNode->is_unlinked( memory_model::memory_order_relaxed )); assert( pParent == parent( pNode )); node_type * pLeft = child( pNode, left_child ); -- 2.34.1