3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order = memory_model::memory_order_relaxed )
157 return pNode->child( nDir ).load( order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
162 return pNode->parent( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
299 template <typename K>
300 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
302 return update( key, pVal, true );
307 /// Delete \p key from the map
309 RCU \p synchronize() method can be called. RCU should not be locked.
311 Return \p true if \p key is found and deleted, \p false otherwise
313 template <typename K>
314 bool erase( K const& key )
319 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
323 /// Deletes the item from the map using \p pred predicate for searching
325 The function is an analog of \p erase(K const&)
326 but \p pred is used for key comparing.
327 \p Less functor has the interface like \p std::less.
328 \p Less must imply the same element order as the comparator used for building the map.
330 template <typename K, typename Less>
331 bool erase_with( K const& key, Less pred )
336 cds::opt::details::make_comparator_from_less<Less>(),
337 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
341 /// Delete \p key from the map
343 The function searches an item with key \p key, calls \p f functor
344 and deletes the item. If \p key is not found, the functor is not called.
346 The functor \p Func interface:
349 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
353 RCU \p synchronize method can be called. RCU should not be locked.
355 Return \p true if key is found and deleted, \p false otherwise
357 template <typename K, typename Func>
358 bool erase( K const& key, Func f )
363 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
366 disp.dispose_value(pVal);
372 /// Deletes the item from the map using \p pred predicate for searching
374 The function is an analog of \p erase(K const&, Func)
375 but \p pred is used for key comparing.
376 \p Less functor has the interface like \p std::less.
377 \p Less must imply the same element order as the comparator used for building the map.
379 template <typename K, typename Less, typename Func>
380 bool erase_with( K const& key, Less pred, Func f )
385 cds::opt::details::make_comparator_from_less<Less>(),
386 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
389 disp.dispose_value(pVal);
395 /// Extracts a value with minimal key from the map
397 Returns \p exempt_ptr to the leftmost item.
398 If the tree is empty, returns empty \p exempt_ptr.
400 Note that the function returns only the value for minimal key.
401 To retrieve its key use \p extract_min( Func ) member function.
403 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
404 It means that the function gets leftmost leaf of the tree and tries to unlink it.
405 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
406 So, the function returns the item with minimum key at the moment of tree traversing.
408 RCU \p synchronize method can be called. RCU should NOT be locked.
409 The function does not free the item.
410 The deallocator will be implicitly invoked when the returned object is destroyed or when
411 its \p release() member function is called.
413 exempt_ptr extract_min()
415 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
418 /// Extracts minimal key key and corresponding value
420 Returns \p exempt_ptr to the leftmost item.
421 If the tree is empty, returns empty \p exempt_ptr.
423 \p Func functor is used to store minimal key.
424 \p Func has the following signature:
427 void operator()( key_type const& key );
430 If the tree is empty, \p f is not called.
431 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
434 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
435 It means that the function gets leftmost leaf of the tree and tries to unlink it.
436 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
437 So, the function returns the item with minimum key at the moment of tree traversing.
439 RCU \p synchronize method can be called. RCU should NOT be locked.
440 The function does not free the item.
441 The deallocator will be implicitly invoked when the returned object is destroyed or when
442 its \p release() member function is called.
444 template <typename Func>
445 exempt_ptr extract_min( Func f )
447 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
450 /// Extracts minimal key key and corresponding value
452 This function is a shortcut for the following call:
455 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
457 \p key_type should be copy-assignable. The copy of minimal key
458 is returned in \p min_key argument.
460 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
461 extract_min_key( key_type& min_key )
463 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
466 /// Extracts a value with maximal key from the tree
468 Returns \p exempt_ptr pointer to the rightmost item.
469 If the set is empty, returns empty \p exempt_ptr.
471 Note that the function returns only the value for maximal key.
472 To retrieve its key use \p extract_max( Func ) member function.
474 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
475 It means that the function gets rightmost leaf of the tree and tries to unlink it.
476 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
477 So, the function returns the item with maximum key at the moment of tree traversing.
479 RCU \p synchronize method can be called. RCU should NOT be locked.
480 The function does not free the item.
481 The deallocator will be implicitly invoked when the returned object is destroyed or when
482 its \p release() is called.
484 exempt_ptr extract_max()
486 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
489 /// Extracts the maximal key and corresponding value
491 Returns \p exempt_ptr pointer to the rightmost item.
492 If the set is empty, returns empty \p exempt_ptr.
494 \p Func functor is used to store maximal key.
495 \p Func has the following signature:
498 void operator()( key_type const& key );
501 If the tree is empty, \p f is not called.
502 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
505 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
506 It means that the function gets rightmost leaf of the tree and tries to unlink it.
507 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
508 So, the function returns the item with maximum key at the moment of tree traversing.
510 RCU \p synchronize method can be called. RCU should NOT be locked.
511 The function does not free the item.
512 The deallocator will be implicitly invoked when the returned object is destroyed or when
513 its \p release() is called.
515 template <typename Func>
516 exempt_ptr extract_max( Func f )
518 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
521 /// Extracts the maximal key and corresponding value
523 This function is a shortcut for the following call:
526 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
528 \p key_type should be copy-assignable. The copy of maximal key
529 is returned in \p max_key argument.
531 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
532 extract_max_key( key_type& max_key )
534 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
537 /// Extracts an item from the map
539 The function searches an item with key equal to \p key in the tree,
540 unlinks it, and returns \p exempt_ptr pointer to a value found.
541 If \p key is not found the function returns an empty \p exempt_ptr.
543 RCU \p synchronize method can be called. RCU should NOT be locked.
544 The function does not destroy the value found.
545 The disposer will be implicitly invoked when the returned object is destroyed or when
546 its \p release() member function is called.
548 template <typename Q>
549 exempt_ptr extract( Q const& key )
551 return exempt_ptr(do_extract( key ));
555 /// Extracts an item from the map using \p pred for searching
557 The function is an analog of \p extract(Q const&)
558 but \p pred is used for key compare.
559 \p Less has the interface like \p std::less.
560 \p pred must imply the same element order as the comparator used for building the tree.
562 template <typename Q, typename Less>
563 exempt_ptr extract_with( Q const& key, Less pred )
565 return exempt_ptr(do_extract_with( key, pred ));
568 /// Find the key \p key
570 The function searches the item with key equal to \p key and calls the functor \p f for item found.
571 The interface of \p Func functor is:
574 void operator()( key_type const& key, mapped_type& item );
577 where \p item is the item found.
578 The functor is called under node-level lock.
580 The function applies RCU lock internally.
582 The function returns \p true if \p key is found, \p false otherwise.
584 template <typename K, typename Func>
585 bool find( K const& key, Func f )
587 return do_find( key, key_comparator(),
588 [&f]( node_type * pNode ) -> bool {
589 assert( pNode != nullptr );
590 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
592 f( pNode->m_key, *pVal );
600 /// Finds the key \p val using \p pred predicate for searching
602 The function is an analog of \p find(K const&, Func)
603 but \p pred is used for key comparing.
604 \p Less functor has the interface like \p std::less.
605 \p Less must imply the same element order as the comparator used for building the map.
607 template <typename K, typename Less, typename Func>
608 bool find_with( K const& key, Less pred, Func f )
611 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
612 [&f]( node_type * pNode ) -> bool {
613 assert( pNode != nullptr );
614 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
616 f( pNode->m_key, *pVal );
624 /// Find the key \p key
626 The function searches the item with key equal to \p key
627 and returns \p true if it is found, and \p false otherwise.
629 The function applies RCU lock internally.
631 template <typename K>
632 bool find( K const& key )
634 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
637 /// Finds the key \p val using \p pred predicate for searching
639 The function is an analog of \p find(K const&)
640 but \p pred is used for key comparing.
641 \p Less functor has the interface like \p std::less.
642 \p Less must imply the same element order as the comparator used for building the map.
644 template <typename K, typename Less>
645 bool find_with( K const& key, Less pred )
648 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
651 /// Clears the tree (thread safe, not atomic)
653 The function unlink all items from the tree.
654 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
658 assert( set.empty() );
660 the assertion could be raised.
662 For each node the \ref disposer will be called after unlinking.
664 RCU \p synchronize method can be called. RCU should not be locked.
668 while ( extract_min() );
671 /// Clears the tree (not thread safe)
673 This function is not thread safe and may be called only when no other thread deals with the tree.
674 The function is used in the tree destructor.
678 clear(); // temp solution
682 /// Checks if the map is empty
685 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
688 /// Returns item count in the map
690 Only leaf nodes containing user data are counted.
692 The value returned depends on item counter type provided by \p Traits template parameter.
693 If it is \p atomicity::empty_item_counter this function always returns 0.
695 The function is not suitable for checking the tree emptiness, use \p empty()
696 member function for this purpose.
700 return m_ItemCounter;
703 /// Returns const reference to internal statistics
704 stat const& statistics() const
709 /// Returns reference to \p sync_monitor object
710 sync_monitor& monitor()
715 sync_monitor const& monitor() const
721 /// Checks internal consistency (not atomic, not thread-safe)
723 The debugging function to check internal consistency of the tree.
725 bool check_consistency() const
727 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
730 /// Checks internal consistency (not atomic, not thread-safe)
732 The debugging function to check internal consistency of the tree.
733 The functor \p Func is called if a violation of internal tree structure
737 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
741 - \p nLevel - the level where the violation is found
742 - \p hLeft - the height of left subtree
743 - \p hRight - the height of right subtree
745 The functor is called for each violation found.
747 template <typename Func>
748 bool check_consistency( Func f ) const
750 node_type * pChild = child( m_pRoot, right_child );
753 do_check_consistency( pChild, 1, f, nErrors );
761 template <typename Func>
762 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
766 node_type * pLeft = child( pNode, left_child );
767 node_type * pRight = child( pNode, right_child );
768 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
770 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
773 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
774 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
776 if ( hLeft >= hRight ) {
777 if ( hLeft - hRight > 1 ) {
778 f( nLevel, hLeft, hRight );
784 if ( hRight - hLeft > 1 ) {
785 f( nLevel, hLeft, hRight );
794 template <typename Q, typename Compare, typename Func>
795 bool do_find( Q& key, Compare cmp, Func f ) const
800 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
802 assert( result != find_result::retry );
803 return result == find_result::found;
806 template <typename K, typename Compare, typename Func>
807 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
809 check_deadlock_policy::check();
811 rcu_disposer removed_list;
814 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
818 template <typename K, typename Compare, typename Func>
819 bool do_remove( K const& key, Compare cmp, Func func )
821 // Func must return true if the value was disposed
822 // or false if the value was extracted
824 check_deadlock_policy::check();
826 rcu_disposer removed_list;
829 return try_remove_root( key, cmp, func, removed_list );
833 template <typename Func>
834 mapped_type do_extract_min( Func f )
836 mapped_type pExtracted = nullptr;
839 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
844 template <typename Func>
845 mapped_type do_extract_max( Func f )
847 mapped_type pExtracted = nullptr;
850 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
855 template <typename Func>
856 void do_extract_minmax( int nDir, Func func )
858 check_deadlock_policy::check();
860 rcu_disposer removed_list;
867 // get right child of root
868 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
870 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
871 if ( nChildVersion & node_type::shrinking ) {
872 m_stat.onRemoveRootWaitShrinking();
873 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
874 result = update_flags::retry;
876 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
877 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
880 result = update_flags::retry;
885 if ( result == update_flags::retry )
886 m_stat.onRemoveRetry();
888 m_stat.onExtract( result == update_flags::result_removed );
895 template <typename Q>
896 mapped_type do_extract( Q const& key )
898 mapped_type pExtracted = nullptr;
902 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
904 m_stat.onExtract( pExtracted != nullptr );
908 template <typename Q, typename Less>
909 mapped_type do_extract_with( Q const& key, Less pred )
912 mapped_type pExtracted = nullptr;
915 cds::opt::details::make_comparator_from_less<Less>(),
916 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
918 m_stat.onExtract( pExtracted != nullptr );
925 static int height( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
928 return pNode->m_nHeight.load( order );
930 static void set_height( node_type * pNode, int h, atomics::memory_order order = memory_model::memory_order_relaxed )
933 pNode->m_nHeight.store( h, order );
935 static int height_null( node_type * pNode, atomics::memory_order order = memory_model::memory_order_relaxed )
937 return pNode ? height( pNode, order ) : 0;
940 static CDS_CONSTEXPR int const c_stackSize = 64;
942 template <typename Q, typename Compare, typename Func>
943 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
945 assert( gc::is_locked() );
951 version_type nVersion;
955 stack_record stack[c_stackSize];
957 stack[0].pNode = pNode;
958 stack[0].nVersion = nVersion;
959 stack[0].nDir = nDir;
962 pNode = stack[pos].pNode;
963 nVersion = stack[pos].nVersion;
964 nDir = stack[pos].nDir;
967 node_type * pChild = child( pNode, nDir );
969 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
971 m_stat.onFindRetry();
974 m_stat.onFindFailed();
975 return find_result::not_found;
978 int nCmp = cmp( key, pChild->m_key );
980 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
982 node_scoped_lock l( m_Monitor, *pChild );
983 if ( child(pNode, nDir) == pChild ) {
984 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
986 m_stat.onFindSuccess();
987 return find_result::found;
992 m_stat.onFindRetry();
996 m_stat.onFindFailed();
997 return find_result::not_found;
1000 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1001 if ( nChildVersion & node_type::shrinking ) {
1002 m_stat.onFindWaitShrinking();
1003 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1005 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1007 m_stat.onFindRetry();
1011 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
1013 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1015 m_stat.onFindRetry();
1020 assert(pos < c_stackSize);
1021 stack[pos].pNode = pChild;
1022 stack[pos].nVersion = nChildVersion;
1023 stack[pos].nDir = nCmp;
1024 break; // child iteration
1027 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1029 m_stat.onFindRetry();
1033 m_stat.onFindRetry();
1036 return find_result::retry;
1040 template <typename Q, typename Compare, typename Func>
1041 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
1043 assert( gc::is_locked() );
1047 node_type * pChild = child( pNode, nDir );
1049 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1050 return find_result::retry;
1052 m_stat.onFindFailed();
1053 return find_result::not_found;
1056 int nCmp = cmp( key, pChild->m_key );
1058 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
1060 node_scoped_lock l( m_Monitor, *pChild );
1061 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
1062 if ( f( pChild ) ) {
1063 m_stat.onFindSuccess();
1064 return find_result::found;
1069 m_stat.onFindFailed();
1070 return find_result::not_found;
1073 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1074 if ( nChildVersion & node_type::shrinking ) {
1075 m_stat.onFindWaitShrinking();
1076 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1078 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1079 return find_result::retry;
1081 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir ) == pChild )
1083 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1084 return find_result::retry;
1086 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
1087 if ( found != find_result::retry )
1091 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1092 return find_result::retry;
1094 m_stat.onFindRetry();
1099 template <typename K, typename Compare, typename Func>
1100 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1102 assert( gc::is_locked() );
1107 // get right child of root
1108 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1110 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1111 if ( nChildVersion & node_type::shrinking ) {
1112 m_stat.onUpdateRootWaitShrinking();
1113 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1114 result = update_flags::retry;
1116 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1117 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1119 result = update_flags::retry;
1122 // the tree is empty
1123 if ( nFlags & update_flags::allow_insert ) {
1124 // insert into tree as right child of the root
1126 node_scoped_lock l( m_Monitor, *m_pRoot );
1127 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1128 result = update_flags::retry;
1132 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1133 mapped_type pVal = funcUpdate( pNew );
1134 assert( pVal != nullptr );
1135 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1137 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
1138 set_height( m_pRoot, 2 );
1142 m_stat.onInsertSuccess();
1143 return update_flags::result_inserted;
1146 return update_flags::failed;
1149 if ( result != update_flags::retry )
1154 template <typename K, typename Compare, typename Func>
1155 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1157 assert( gc::is_locked() );
1162 // get right child of root
1163 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1165 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1166 if ( nChildVersion & node_type::shrinking ) {
1167 m_stat.onRemoveRootWaitShrinking();
1168 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1169 result = update_flags::retry;
1171 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1172 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1175 result = update_flags::retry;
1180 if ( result == update_flags::retry )
1181 m_stat.onRemoveRetry();
1183 m_stat.onRemove( result == update_flags::result_removed );
1184 return result == update_flags::result_removed;
1189 template <typename K, typename Compare, typename Func>
1190 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1192 assert( gc::is_locked() );
1193 assert( nVersion != node_type::unlinked );
1198 version_type nVersion;
1201 stack_record stack[c_stackSize];
1203 stack[0].pNode = pNode;
1204 stack[0].nVersion = nVersion;
1206 while ( pos >= 0 ) {
1207 pNode = stack[pos].pNode;
1208 nVersion = stack[pos].nVersion;
1210 int nCmp = cmp( key, pNode->m_key );
1212 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1213 if ( result != update_flags::retry )
1216 m_stat.onUpdateRetry();
1221 node_type * pChild = child( pNode, nCmp );
1222 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1224 m_stat.onUpdateRetry();
1228 if ( pChild == nullptr ) {
1230 if ( nFlags & update_flags::allow_insert ) {
1231 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1232 if ( result != update_flags::retry )
1235 m_stat.onUpdateRetry();
1239 return update_flags::failed;
1243 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1244 if ( nChildVersion & node_type::shrinking ) {
1245 m_stat.onUpdateWaitShrinking();
1246 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1249 else if ( pChild == child( pNode, nCmp )) {
1250 // this second read is important, because it is protected by nChildVersion
1252 // validate the read that our caller took to get to node
1253 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1255 m_stat.onUpdateRetry();
1258 // At this point we know that the traversal our parent took to get to node is still valid.
1259 // The recursive implementation will validate the traversal from node to
1260 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1261 // This means that we are no longer vulnerable to node shrinks, and we don't need
1262 // to validate node version any more.
1264 assert( pos < c_stackSize );
1265 stack[pos].pNode = pChild;
1266 stack[pos].nVersion = nChildVersion;
1267 assert( nChildVersion != node_type::unlinked );
1268 break; // child iteration
1270 m_stat.onUpdateRetry();
1274 return update_flags::retry;
1278 template <typename K, typename Compare, typename Func>
1279 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1281 assert( gc::is_locked() );
1282 assert( nVersion != node_type::unlinked );
1284 int nCmp = cmp( key, pNode->m_key );
1286 return try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1290 node_type * pChild = child( pNode, nCmp );
1291 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1292 return update_flags::retry;
1294 if ( pChild == nullptr ) {
1296 if ( nFlags & update_flags::allow_insert )
1297 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1299 result = update_flags::failed;
1303 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1304 if ( nChildVersion & node_type::shrinking ) {
1305 m_stat.onUpdateWaitShrinking();
1306 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1308 result = update_flags::retry;
1310 else if ( pChild == child( pNode, nCmp )) {
1311 // this second read is important, because it is protected by nChildVersion
1313 // validate the read that our caller took to get to node
1314 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1315 return update_flags::retry;
1317 // At this point we know that the traversal our parent took to get to node is still valid.
1318 // The recursive implementation will validate the traversal from node to
1319 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1320 // This means that we are no longer vulnerable to node shrinks, and we don't need
1321 // to validate node version any more.
1322 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1325 result = update_flags::retry;
1328 if ( result == update_flags::retry ) {
1329 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1330 return update_flags::retry;
1331 m_stat.onUpdateRetry();
1339 template <typename K, typename Compare, typename Func>
1340 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1342 assert( gc::is_locked() );
1343 assert( nVersion != node_type::unlinked );
1347 node_type * pParent;
1349 version_type nVersion;
1352 stack_record stack[c_stackSize];
1354 stack[0].pParent = pParent;
1355 stack[0].pNode = pNode;
1356 stack[0].nVersion = nVersion;
1358 while ( pos >= 0 ) {
1359 pParent = stack[pos].pParent;
1360 pNode = stack[pos].pNode;
1361 nVersion = stack[pos].nVersion;
1363 int nCmp = cmp( key, pNode->m_key );
1365 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1366 if ( result != update_flags::retry )
1369 m_stat.onRemoveRetry();
1374 node_type * pChild = child( pNode, nCmp );
1375 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1377 m_stat.onRemoveRetry();
1381 if ( pChild == nullptr )
1382 return update_flags::failed;
1385 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1386 if ( nChildVersion & node_type::shrinking ) {
1387 m_stat.onRemoveWaitShrinking();
1388 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1391 else if ( pChild == child( pNode, nCmp )) {
1392 // this second read is important, because it is protected by nChildVersion
1394 // validate the read that our caller took to get to node
1395 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1397 m_stat.onRemoveRetry();
1401 // At this point we know that the traversal our parent took to get to node is still valid.
1402 // The recursive implementation will validate the traversal from node to
1403 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1404 // This means that we are no longer vulnerable to node shrinks, and we don't need
1405 // to validate node version any more.
1407 assert( pos < c_stackSize );
1408 stack[pos].pParent = pNode;
1409 stack[pos].pNode = pChild;
1410 stack[pos].nVersion = nChildVersion;
1411 break; // child iteration
1413 m_stat.onRemoveRetry();
1416 return update_flags::retry;
1420 template <typename K, typename Compare, typename Func>
1421 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1423 assert( gc::is_locked() );
1424 assert( nVersion != node_type::unlinked );
1426 int nCmp = cmp( key, pNode->m_key );
1428 return try_remove_node( pParent, pNode, nVersion, func, disp );
1433 node_type * pChild = child( pNode, nCmp );
1434 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1435 return update_flags::retry;
1437 if ( pChild == nullptr )
1438 return update_flags::failed;
1441 result = update_flags::retry;
1442 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1443 if ( nChildVersion & node_type::shrinking ) {
1444 m_stat.onRemoveWaitShrinking();
1445 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1448 else if ( pChild == child( pNode, nCmp )) {
1449 // this second read is important, because it is protected by nChildVersion
1451 // validate the read that our caller took to get to node
1452 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1453 return update_flags::retry;
1455 // At this point we know that the traversal our parent took to get to node is still valid.
1456 // The recursive implementation will validate the traversal from node to
1457 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1458 // This means that we are no longer vulnerable to node shrinks, and we don't need
1459 // to validate node version any more.
1460 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1463 result = update_flags::retry;
1466 if ( result == update_flags::retry ) {
1467 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1468 return update_flags::retry;
1469 m_stat.onRemoveRetry();
1477 template <typename Func>
1478 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1480 assert( gc::is_locked() );
1481 assert( nVersion != node_type::unlinked );
1485 node_type * pParent;
1487 version_type nVersion;
1490 stack_record stack[c_stackSize];
1492 stack[0].pParent = pParent;
1493 stack[0].pNode = pNode;
1494 stack[0].nVersion = nVersion;
1496 while ( pos >= 0 ) {
1497 pParent = stack[pos].pParent;
1498 pNode = stack[pos].pNode;
1499 nVersion = stack[pos].nVersion;
1502 node_type * pChild = child( pNode, nDir );
1503 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1505 m_stat.onRemoveRetry();
1509 if ( pChild == nullptr ) {
1511 assert(pNode->is_valued( memory_model::memory_order_relaxed ));
1512 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1513 if ( result != update_flags::retry )
1516 m_stat.onRemoveRetry();
1520 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1521 if ( nChildVersion & node_type::shrinking ) {
1522 m_stat.onRemoveWaitShrinking();
1523 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1526 else if ( pChild == child( pNode, nDir )) {
1527 // this second read is important, because it is protected by nChildVersion
1529 // validate the read that our caller took to get to node
1530 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1532 m_stat.onRemoveRetry();
1536 // At this point we know that the traversal our parent took to get to node is still valid.
1537 // The recursive implementation will validate the traversal from node to
1538 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1539 // This means that we are no longer vulnerable to node shrinks, and we don't need
1540 // to validate node version any more.
1542 assert( pos < c_stackSize );
1543 stack[pos].pParent = pNode;
1544 stack[pos].pNode = pChild;
1545 stack[pos].nVersion = nChildVersion;
1546 break; // child iteration
1548 m_stat.onRemoveRetry();
1551 return update_flags::retry;
1555 template <typename Func>
1556 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1558 assert( gc::is_locked() );
1559 assert( nVersion != node_type::unlinked );
1563 node_type * pChild = child( pNode, nDir );
1564 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1565 return update_flags::retry;
1567 if ( pChild == nullptr ) {
1569 assert(pNode->is_valued( memory_model::memory_order_relaxed ));
1570 return try_remove_node( pParent, pNode, nVersion, func, disp );
1573 //result = update_flags::retry;
1574 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1575 if ( nChildVersion & node_type::shrinking ) {
1576 m_stat.onRemoveWaitShrinking();
1577 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1579 result = update_flags::retry;
1581 else if ( pChild == child( pNode, nDir )) {
1582 // this second read is important, because it is protected by nChildVersion
1584 // validate the read that our caller took to get to node
1585 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1586 return update_flags::retry;
1588 // At this point we know that the traversal our parent took to get to node is still valid.
1589 // The recursive implementation will validate the traversal from node to
1590 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1591 // This means that we are no longer vulnerable to node shrinks, and we don't need
1592 // to validate node version any more.
1593 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1596 result = update_flags::retry;
1599 if ( result == update_flags::retry ) {
1600 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1601 return update_flags::retry;
1602 m_stat.onRemoveRetry();
1610 template <typename K, typename Func>
1611 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1615 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1616 mapped_type pVal = funcUpdate( pNew );
1617 assert( pVal != nullptr );
1618 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1621 if ( c_bRelaxedInsert ) {
1622 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1623 || child( pNode, nDir ) != nullptr )
1625 m_stat.onInsertRetry();
1626 return update_flags::retry;
1629 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1632 node_type * pDamaged;
1634 assert( pNode != nullptr );
1635 node_scoped_lock l( m_Monitor, *pNode );
1637 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1638 || child( pNode, nDir ) != nullptr )
1640 if ( c_bRelaxedInsert ) {
1641 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1642 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1645 m_stat.onRelaxedInsertFailed();
1648 m_stat.onInsertRetry();
1649 return update_flags::retry;
1652 if ( !c_bRelaxedInsert )
1653 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1655 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1656 pDamaged = fix_height_locked( pNode );
1660 m_stat.onInsertSuccess();
1663 fix_height_and_rebalance( pDamaged, disp );
1664 m_stat.onInsertRebalanceRequired();
1667 return update_flags::result_inserted;
1670 template <typename Func>
1671 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1675 assert( pNode != nullptr );
1677 node_scoped_lock l( m_Monitor, *pNode );
1679 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1680 return update_flags::retry;
1682 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1683 m_stat.onUpdateUnlinked();
1684 return update_flags::retry;
1687 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
1688 m_stat.onInsertFailed();
1689 return update_flags::failed;
1692 pOld = pNode->value( memory_model::memory_order_relaxed );
1693 bInserted = pOld == nullptr;
1694 mapped_type pVal = funcUpdate( pNode );
1698 assert( pVal != nullptr );
1699 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1704 disp.dispose_value(pOld);
1705 m_stat.onDisposeValue();
1709 m_stat.onInsertSuccess();
1710 return update_flags::result_inserted;
1713 m_stat.onUpdateSuccess();
1714 return update_flags::result_updated;
1717 template <typename Func>
1718 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1720 assert( pParent != nullptr );
1721 assert( pNode != nullptr );
1723 if ( !pNode->is_valued( memory_model::memory_order_relaxed ) )
1724 return update_flags::failed;
1726 if ( child( pNode, left_child ) == nullptr || child( pNode, right_child ) == nullptr ) {
1727 // pNode can be replaced with its child
1729 node_type * pDamaged;
1732 node_scoped_lock lp( m_Monitor, *pParent );
1733 if ( pParent->is_unlinked( memory_model::memory_order_relaxed ) || parent( pNode ) != pParent )
1734 return update_flags::retry;
1737 node_scoped_lock ln( m_Monitor, *pNode );
1738 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1739 return update_flags::retry;
1741 pOld = pNode->value( memory_model::memory_order_relaxed );
1743 return update_flags::failed;
1745 if ( !try_unlink_locked( pParent, pNode, disp ))
1746 return update_flags::retry;
1748 pDamaged = fix_height_locked( pParent );
1752 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1753 m_stat.onDisposeValue();
1755 m_stat.onExtractValue();
1758 fix_height_and_rebalance( pDamaged, disp );
1759 m_stat.onRemoveRebalanceRequired();
1763 // pNode is an internal with two children
1767 node_scoped_lock ln( m_Monitor, *pNode );
1768 pOld = pNode->value( memory_model::memory_order_relaxed );
1769 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1770 return update_flags::retry;
1772 return update_flags::failed;
1774 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1775 m_stat.onMakeRoutingNode();
1779 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1780 m_stat.onDisposeValue();
1782 m_stat.onExtractValue();
1784 return update_flags::result_removed;
1787 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1789 // pParent and pNode must be locked
1790 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1792 node_type * pParentLeft = child( pParent, left_child );
1793 node_type * pParentRight = child( pParent, right_child );
1794 if ( pNode != pParentLeft && pNode != pParentRight ) {
1795 // node is no longer a child of parent
1799 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1800 assert( pParent == parent( pNode ));
1802 node_type * pLeft = child( pNode, left_child );
1803 node_type * pRight = child( pNode, right_child );
1804 if ( pLeft != nullptr && pRight != nullptr ) {
1805 // splicing is no longer possible
1808 node_type * pSplice = pLeft ? pLeft : pRight;
1810 if ( pParentLeft == pNode )
1811 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1813 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1816 pSplice->parent( pParent, memory_model::memory_order_relaxed );
1818 // Mark the node as unlinked
1819 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1821 // The value will be disposed by calling function
1822 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1824 disp.dispose( pNode );
1825 m_stat.onDisposeNode();
1832 private: // rotations
1834 int estimate_node_condition( node_type * pNode )
1836 node_type * pLeft = child( pNode, left_child );
1837 node_type * pRight = child( pNode, right_child );
1839 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1840 return unlink_required;
1842 int h = height( pNode );
1843 int hL = height_null( pLeft );
1844 int hR = height_null( pRight );
1846 int hNew = 1 + std::max( hL, hR );
1847 int nBalance = hL - hR;
1849 if ( nBalance < -1 || nBalance > 1 )
1850 return rebalance_required;
1852 return h != hNew ? hNew : nothing_required;
1855 node_type * fix_height( node_type * pNode )
1857 assert( pNode != nullptr );
1858 node_scoped_lock l( m_Monitor, *pNode );
1859 return fix_height_locked( pNode );
1862 node_type * fix_height_locked( node_type * pNode )
1864 // pNode must be locked!!!
1865 int h = estimate_node_condition( pNode );
1867 case rebalance_required:
1868 case unlink_required:
1870 case nothing_required:
1873 set_height( pNode, h );
1874 return parent( pNode );
1878 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1880 while ( pNode && parent( pNode )) {
1881 int nCond = estimate_node_condition( pNode );
1882 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1885 if ( nCond != unlink_required && nCond != rebalance_required )
1886 pNode = fix_height( pNode );
1888 node_type * pParent = parent( pNode );
1889 assert( pParent != nullptr );
1891 node_scoped_lock lp( m_Monitor, *pParent );
1892 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode ) == pParent ) {
1893 node_scoped_lock ln( m_Monitor, *pNode );
1894 pNode = rebalance_locked( pParent, pNode, disp );
1901 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1903 // pParent and pNode should be locked.
1904 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1905 assert( parent( pNode ) == pParent );
1907 node_type * pLeft = child( pNode, left_child );
1908 node_type * pRight = child( pNode, right_child );
1910 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1911 if ( try_unlink_locked( pParent, pNode, disp ))
1912 return fix_height_locked( pParent );
1914 // retry needed for pNode
1919 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1921 int h = height( pNode );
1922 int hL = height_null( pLeft );
1923 int hR = height_null( pRight );
1924 int hNew = 1 + std::max( hL, hR );
1925 int balance = hL - hR;
1928 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1929 else if ( balance < -1 )
1930 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1931 else if ( hNew != h ) {
1932 set_height( pNode, hNew );
1934 // pParent is already locked
1935 return fix_height_locked( pParent );
1941 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1943 assert( parent( pNode ) == pParent );
1944 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1946 // pParent and pNode is locked yet
1947 // pNode->pLeft is too large, we will rotate-right.
1948 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1951 assert( pLeft != nullptr );
1952 node_scoped_lock l( m_Monitor, *pLeft );
1953 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1954 return pNode; // retry for pNode
1956 int hL = height( pLeft );
1958 return pNode; // retry
1960 node_type * pLRight = child( pLeft, right_child );
1961 int hLR = height_null( pLRight );
1962 node_type * pLLeft = child( pLeft, left_child );
1963 int hLL = height_null( pLLeft );
1967 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1970 assert( pLRight != nullptr );
1972 node_scoped_lock lr( m_Monitor, *pLRight );
1973 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1974 return pNode; // retry
1976 hLR = height( pLRight );
1978 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1980 int hLRL = height_null( child( pLRight, left_child ));
1981 int balance = hLL - hLRL;
1982 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1983 // nParent.child.left won't be damaged after a double rotation
1984 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1988 // focus on pLeft, if necessary pNode will be balanced later
1989 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1994 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1996 assert( parent( pNode ) == pParent );
1997 assert( child( pParent, left_child ) == pNode || child( pParent, right_child ) == pNode );
1999 // pParent and pNode is locked yet
2001 assert( pRight != nullptr );
2002 node_scoped_lock l( m_Monitor, *pRight );
2003 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
2004 return pNode; // retry for pNode
2006 int hR = height( pRight );
2007 if ( hL - hR >= -1 )
2008 return pNode; // retry
2010 node_type * pRLeft = child( pRight, left_child );
2011 int hRL = height_null( pRLeft );
2012 node_type * pRRight = child( pRight, right_child );
2013 int hRR = height_null( pRRight );
2015 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
2018 assert( pRLeft != nullptr );
2019 node_scoped_lock lrl( m_Monitor, *pRLeft );
2020 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
2021 return pNode; // retry
2023 hRL = height( pRLeft );
2025 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
2027 node_type * pRLRight = child( pRLeft, right_child );
2028 int hRLR = height_null( pRLRight );
2029 int balance = hRR - hRLR;
2030 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
2031 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
2033 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
2037 static void begin_change( node_type * pNode, version_type version )
2039 assert(pNode->version(memory_model::memory_order_acquire) == version );
2040 assert( (version & node_type::shrinking) == 0 );
2041 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
2043 static void end_change( node_type * pNode, version_type version )
2045 // Clear shrinking and unlinked flags and increment version
2046 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
2049 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
2051 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2052 node_type * pParentLeft = child( pParent, left_child );
2054 begin_change( pNode, nodeVersion );
2056 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
2057 if ( pLRight != nullptr )
2058 pLRight->parent( pNode, memory_model::memory_order_relaxed );
2060 atomics::atomic_thread_fence( memory_model::memory_order_release );
2062 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
2063 pNode->parent( pLeft, memory_model::memory_order_relaxed );
2065 atomics::atomic_thread_fence( memory_model::memory_order_release );
2067 if ( pParentLeft == pNode )
2068 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
2070 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2071 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
2073 pLeft->parent( pParent, memory_model::memory_order_relaxed );
2075 atomics::atomic_thread_fence( memory_model::memory_order_release );
2077 // fix up heights links
2078 int hNode = 1 + std::max( hLR, hR );
2079 set_height( pNode, hNode );
2080 set_height( pLeft, 1 + std::max( hLL, hNode ));
2082 end_change( pNode, nodeVersion );
2083 m_stat.onRotateRight();
2085 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
2086 // parent.child). pNode is the deepest. Perform as many fixes as we can
2087 // with the locks we've got.
2089 // We've already fixed the height for pNode, but it might still be outside
2090 // our allowable balance range. In that case a simple fix_height_locked()
2092 int nodeBalance = hLR - hR;
2093 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2094 // we need another rotation at pNode
2095 m_stat.onRotateAfterRightRotation();
2099 // we've fixed balance and height damage for pNode, now handle
2100 // extra-routing node damage
2101 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
2102 // we need to remove pNode and then repair
2103 m_stat.onRemoveAfterRightRotation();
2107 // we've already fixed the height at pLeft, do we need a rotation here?
2108 int leftBalance = hLL - hNode;
2109 if ( leftBalance < -1 || leftBalance > 1 ) {
2110 m_stat.onRotateAfterRightRotation();
2114 // pLeft might also have routing node damage (if pLeft.left was null)
2115 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_relaxed) ) {
2116 m_stat.onDamageAfterRightRotation();
2120 // try to fix the parent height while we've still got the lock
2121 return fix_height_locked( pParent );
2124 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
2126 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2127 node_type * pParentLeft = child( pParent, left_child );
2129 begin_change( pNode, nodeVersion );
2131 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
2132 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
2133 if ( pRLeft != nullptr )
2134 pRLeft->parent( pNode, memory_model::memory_order_relaxed );
2136 atomics::atomic_thread_fence( memory_model::memory_order_release );
2138 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
2139 pNode->parent( pRight, memory_model::memory_order_relaxed );
2141 atomics::atomic_thread_fence( memory_model::memory_order_release );
2143 if ( pParentLeft == pNode )
2144 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
2146 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2147 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
2149 pRight->parent( pParent, memory_model::memory_order_relaxed );
2151 atomics::atomic_thread_fence( memory_model::memory_order_release );
2154 int hNode = 1 + std::max( hL, hRL );
2155 set_height( pNode, hNode );
2156 set_height( pRight, 1 + std::max( hNode, hRR ));
2158 end_change( pNode, nodeVersion );
2159 m_stat.onRotateLeft();
2161 int nodeBalance = hRL - hL;
2162 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2163 m_stat.onRotateAfterLeftRotation();
2167 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2168 m_stat.onRemoveAfterLeftRotation();
2172 int rightBalance = hRR - hNode;
2173 if ( rightBalance < -1 || rightBalance > 1 ) {
2174 m_stat.onRotateAfterLeftRotation();
2178 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_relaxed) ) {
2179 m_stat.onDamageAfterLeftRotation();
2183 return fix_height_locked( pParent );
2186 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
2188 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2189 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
2191 node_type * pPL = child( pParent, left_child );
2192 node_type * pLRL = child( pLRight, left_child );
2193 node_type * pLRR = child( pLRight, right_child );
2194 int hLRR = height_null( pLRR );
2196 begin_change( pNode, nodeVersion );
2197 begin_change( pLeft, leftVersion );
2199 // fix up pNode links, careful about the order!
2200 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
2201 if ( pLRR != nullptr )
2202 pLRR->parent( pNode, memory_model::memory_order_relaxed );
2203 atomics::atomic_thread_fence( memory_model::memory_order_release );
2205 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
2206 if ( pLRL != nullptr )
2207 pLRL->parent( pLeft, memory_model::memory_order_relaxed );
2208 atomics::atomic_thread_fence( memory_model::memory_order_release );
2210 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
2211 pLeft->parent( pLRight, memory_model::memory_order_relaxed );
2212 atomics::atomic_thread_fence( memory_model::memory_order_release );
2214 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
2215 pNode->parent( pLRight, memory_model::memory_order_relaxed );
2216 atomics::atomic_thread_fence( memory_model::memory_order_release );
2219 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
2221 assert( child( pParent, right_child ) == pNode );
2222 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
2224 pLRight->parent( pParent, memory_model::memory_order_relaxed );
2225 atomics::atomic_thread_fence( memory_model::memory_order_release );
2228 int hNode = 1 + std::max( hLRR, hR );
2229 set_height( pNode, hNode );
2230 int hLeft = 1 + std::max( hLL, hLRL );
2231 set_height( pLeft, hLeft );
2232 set_height( pLRight, 1 + std::max( hLeft, hNode ));
2234 end_change( pNode, nodeVersion );
2235 end_change( pLeft, leftVersion );
2236 m_stat.onRotateRightOverLeft();
2238 // caller should have performed only a single rotation if pLeft was going
2239 // to end up damaged
2240 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2241 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
2243 // We have damaged pParent, pLR (now parent.child), and pNode (now
2244 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2245 // can with the locks we've got.
2247 // We've already fixed the height for pNode, but it might still be outside
2248 // our allowable balance range. In that case a simple fix_height_locked()
2250 int nodeBalance = hLRR - hR;
2251 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2252 // we need another rotation at pNode
2253 m_stat.onRotateAfterRLRotation();
2257 // pNode might also be damaged by being an unnecessary routing node
2258 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2259 // repair involves splicing out pNode and maybe more rotations
2260 m_stat.onRemoveAfterRLRotation();
2264 // we've already fixed the height at pLRight, do we need a rotation here?
2265 int balanceLR = hLeft - hNode;
2266 if ( balanceLR < -1 || balanceLR > 1 ) {
2267 m_stat.onRotateAfterRLRotation();
2271 // try to fix the parent height while we've still got the lock
2272 return fix_height_locked( pParent );
2275 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2277 version_type nodeVersion = pNode->version( memory_model::memory_order_acquire );
2278 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2280 node_type * pPL = child( pParent, left_child );
2281 node_type * pRLL = child( pRLeft, left_child );
2282 node_type * pRLR = child( pRLeft, right_child );
2283 int hRLL = height_null( pRLL );
2285 begin_change( pNode, nodeVersion );
2286 begin_change( pRight, rightVersion );
2288 // fix up pNode links, careful about the order!
2289 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
2290 if ( pRLL != nullptr )
2291 pRLL->parent( pNode, memory_model::memory_order_relaxed );
2292 atomics::atomic_thread_fence( memory_model::memory_order_release );
2294 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
2295 if ( pRLR != nullptr )
2296 pRLR->parent( pRight, memory_model::memory_order_relaxed );
2297 atomics::atomic_thread_fence( memory_model::memory_order_release );
2299 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
2300 pRight->parent( pRLeft, memory_model::memory_order_relaxed );
2301 atomics::atomic_thread_fence( memory_model::memory_order_release );
2303 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
2304 pNode->parent( pRLeft, memory_model::memory_order_relaxed );
2305 atomics::atomic_thread_fence( memory_model::memory_order_release );
2308 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
2310 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2311 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
2313 pRLeft->parent( pParent, memory_model::memory_order_relaxed );
2314 atomics::atomic_thread_fence( memory_model::memory_order_release );
2317 int hNode = 1 + std::max( hL, hRLL );
2318 set_height( pNode, hNode );
2319 int hRight = 1 + std::max( hRLR, hRR );
2320 set_height( pRight, hRight );
2321 set_height( pRLeft, 1 + std::max( hNode, hRight ));
2323 end_change( pNode, nodeVersion );
2324 end_change( pRight, rightVersion );
2325 m_stat.onRotateLeftOverRight();
2327 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2329 int nodeBalance = hRLL - hL;
2330 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2331 m_stat.onRotateAfterLRRotation();
2335 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2336 m_stat.onRemoveAfterLRRotation();
2340 int balRL = hRight - hNode;
2341 if ( balRL < -1 || balRL > 1 ) {
2342 m_stat.onRotateAfterLRRotation();
2346 return fix_height_locked( pParent );
2351 }} // namespace cds::container
2353 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H