if ( result == update_flags::retry )
m_stat.onRemoveRetry();
+ else
+ return;
}
}
}
return pNode ? height( pNode, order ) : 0;
}
+ static CDS_CONSTEXPR int const c_stackSize = 64;
+
+ template <typename Q, typename Compare, typename Func>
+ find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
+ {
+ assert( gc::is_locked() );
+ assert( pNode );
+
+ struct stack_record
+ {
+ node_type * pNode;
+ version_type nVersion;
+ int nDir;
+ };
+
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
+ stack[0].nDir = nDir;
+
+ while ( pos >= 0 ) {
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+ nDir = stack[pos].nDir;
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nDir );
+ if ( !pChild ) {
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
+ m_stat.onFindFailed();
+ return find_result::not_found;
+ }
+
+ int nCmp = cmp( key, pChild->m_key );
+ if ( nCmp == 0 ) {
+ if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
+ // key found
+ node_scoped_lock l( m_Monitor, *pChild );
+ if ( child(pNode, nDir) == pChild ) {
+ if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
+ if ( f( pChild ) ) {
+ m_stat.onFindSuccess();
+ return find_result::found;
+ }
+ }
+ }
+ else {
+ m_stat.onFindRetry();
+ continue;
+ }
+ }
+ m_stat.onFindFailed();
+ return find_result::not_found;
+ }
+ else {
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onFindWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
+ }
+ else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
+ {
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
+
+ ++pos;
+ assert(pos < c_stackSize);
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ stack[pos].nDir = nCmp;
+ break; // child iteration
+ }
+
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onFindRetry();
+ break; // retry
+ }
+ }
+ m_stat.onFindRetry();
+ }
+ }
+ return find_result::retry;
+ }
+
+#if 0
template <typename Q, typename Compare, typename Func>
find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
{
m_stat.onFindRetry();
}
}
+#endif
template <typename K, typename Compare, typename Func>
int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
return update_flags::failed;
}
- if ( result == update_flags::retry )
- m_stat.onUpdateRetry();
- else
+ if ( result != update_flags::retry )
return result;
}
}
}
}
+ template <typename K, typename Compare, typename Func>
+ int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
+ {
+ assert( gc::is_locked() );
+ assert( nVersion != node_type::unlinked );
+
+ struct stack_record
+ {
+ node_type * pNode;
+ version_type nVersion;
+ };
+
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
+
+ while ( pos >= 0 ) {
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+
+ int nCmp = cmp( key, pNode->m_key );
+ if ( nCmp == 0 ) {
+ int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onUpdateRetry();
+ continue;
+ }
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nCmp );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onUpdateRetry();
+ break;
+ }
+
+ if ( pChild == nullptr ) {
+ // insert new node
+ if ( nFlags & update_flags::allow_insert ) {
+ int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onUpdateRetry();
+ break;
+ }
+ else
+ return update_flags::failed;
+ }
+ else {
+ // update child
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onUpdateWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ // retry
+ }
+ else if ( pChild == child( pNode, nCmp )) {
+ // this second read is important, because it is protected by nChildVersion
+
+ // validate the read that our caller took to get to node
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
+ --pos;
+ m_stat.onUpdateRetry();
+ break; // retry
+ }
+ // At this point we know that the traversal our parent took to get to node is still valid.
+ // The recursive implementation will validate the traversal from node to
+ // child, so just prior to the node nVersion validation both traversals were definitely okay.
+ // This means that we are no longer vulnerable to node shrinks, and we don't need
+ // to validate node version any more.
+ ++pos;
+ assert( pos < c_stackSize );
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ assert( nChildVersion != node_type::unlinked );
+ break; // child iteration
+ }
+ m_stat.onUpdateRetry();
+ }
+ }
+ }
+ return update_flags::retry;
+ }
+
+#if 0
template <typename K, typename Compare, typename Func>
int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
return result;
}
}
+#endif
+
+ template <typename K, typename Compare, typename Func>
+ int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
+ {
+ assert( gc::is_locked() );
+ assert( nVersion != node_type::unlinked );
+
+ struct stack_record
+ {
+ node_type * pParent;
+ node_type * pNode;
+ version_type nVersion;
+ };
+
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pParent = pParent;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
+
+ while ( pos >= 0 ) {
+ pParent = stack[pos].pParent;
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+
+ int nCmp = cmp( key, pNode->m_key );
+ if ( nCmp == 0 ) {
+ int result = try_remove_node( pParent, pNode, nVersion, func, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onRemoveRetry();
+ continue;
+ }
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nCmp );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ if ( pChild == nullptr )
+ return update_flags::failed;
+
+ // update child
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onRemoveWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ // retry
+ }
+ else if ( pChild == child( pNode, nCmp )) {
+ // this second read is important, because it is protected by nChildVersion
+
+ // validate the read that our caller took to get to node
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ // At this point we know that the traversal our parent took to get to node is still valid.
+ // The recursive implementation will validate the traversal from node to
+ // child, so just prior to the node nVersion validation both traversals were definitely okay.
+ // This means that we are no longer vulnerable to node shrinks, and we don't need
+ // to validate node version any more.
+ ++pos;
+ assert( pos < c_stackSize );
+ stack[pos].pParent = pNode;
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ break; // child iteration
+ }
+ m_stat.onRemoveRetry();
+ }
+ }
+ return update_flags::retry;
+ }
+#if 0
template <typename K, typename Compare, typename Func>
int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
return result;
}
}
+#endif
+ template <typename Func>
+ int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
+ {
+ assert( gc::is_locked() );
+ assert( nVersion != node_type::unlinked );
+
+ struct stack_record
+ {
+ node_type * pParent;
+ node_type * pNode;
+ version_type nVersion;
+ };
+
+ stack_record stack[c_stackSize];
+ int pos = 0;
+ stack[0].pParent = pParent;
+ stack[0].pNode = pNode;
+ stack[0].nVersion = nVersion;
+
+ while ( pos >= 0 ) {
+ pParent = stack[pos].pParent;
+ pNode = stack[pos].pNode;
+ nVersion = stack[pos].nVersion;
+
+ while ( true ) {
+ node_type * pChild = child( pNode, nDir );
+ if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ if ( pChild == nullptr ) {
+ // Found min/max
+ assert(pNode->is_valued( memory_model::memory_order_relaxed ));
+ int result = try_remove_node( pParent, pNode, nVersion, func, disp );
+ if ( result != update_flags::retry )
+ return result;
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
+ if ( nChildVersion & node_type::shrinking ) {
+ m_stat.onRemoveWaitShrinking();
+ pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
+ // retry
+ }
+ else if ( pChild == child( pNode, nDir )) {
+ // this second read is important, because it is protected by nChildVersion
+
+ // validate the read that our caller took to get to node
+ if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
+ --pos;
+ m_stat.onRemoveRetry();
+ break;
+ }
+
+ // At this point we know that the traversal our parent took to get to node is still valid.
+ // The recursive implementation will validate the traversal from node to
+ // child, so just prior to the node nVersion validation both traversals were definitely okay.
+ // This means that we are no longer vulnerable to node shrinks, and we don't need
+ // to validate node version any more.
+ ++pos;
+ assert( pos < c_stackSize );
+ stack[pos].pParent = pNode;
+ stack[pos].pNode = pChild;
+ stack[pos].nVersion = nChildVersion;
+ break; // child iteration
+ }
+ m_stat.onRemoveRetry();
+ }
+ }
+ return update_flags::retry;
+ }
+
+#if 0
template <typename Func>
int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
{
if ( pChild == nullptr ) {
// Found min/max
+ assert(pNode->is_valued( memory_model::memory_order_relaxed ));
return try_remove_node( pParent, pNode, nVersion, func, disp );
}
else {
return result;
}
}
+#endif
template <typename K, typename Func>
int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
return false;
}
- assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
+ assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
assert( pParent == parent( pNode ));
node_type * pLeft = child( pNode, left_child );