3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
157 return pNode->child( nDir ).load( order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order )
162 return pNode->m_pParent.load( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
299 template <typename K>
300 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
302 return update( key, pVal, true );
307 /// Delete \p key from the map
309 RCU \p synchronize() method can be called. RCU should not be locked.
311 Return \p true if \p key is found and deleted, \p false otherwise
313 template <typename K>
314 bool erase( K const& key )
319 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
323 /// Deletes the item from the map using \p pred predicate for searching
325 The function is an analog of \p erase(K const&)
326 but \p pred is used for key comparing.
327 \p Less functor has the interface like \p std::less.
328 \p Less must imply the same element order as the comparator used for building the map.
330 template <typename K, typename Less>
331 bool erase_with( K const& key, Less pred )
336 cds::opt::details::make_comparator_from_less<Less>(),
337 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
341 /// Delete \p key from the map
343 The function searches an item with key \p key, calls \p f functor
344 and deletes the item. If \p key is not found, the functor is not called.
346 The functor \p Func interface:
349 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
353 RCU \p synchronize method can be called. RCU should not be locked.
355 Return \p true if key is found and deleted, \p false otherwise
357 template <typename K, typename Func>
358 bool erase( K const& key, Func f )
363 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
366 disp.dispose_value(pVal);
372 /// Deletes the item from the map using \p pred predicate for searching
374 The function is an analog of \p erase(K const&, Func)
375 but \p pred is used for key comparing.
376 \p Less functor has the interface like \p std::less.
377 \p Less must imply the same element order as the comparator used for building the map.
379 template <typename K, typename Less, typename Func>
380 bool erase_with( K const& key, Less pred, Func f )
385 cds::opt::details::make_comparator_from_less<Less>(),
386 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
389 disp.dispose_value(pVal);
395 /// Extracts a value with minimal key from the map
397 Returns \p exempt_ptr to the leftmost item.
398 If the tree is empty, returns empty \p exempt_ptr.
400 Note that the function returns only the value for minimal key.
401 To retrieve its key use \p extract_min( Func ) member function.
403 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
404 It means that the function gets leftmost leaf of the tree and tries to unlink it.
405 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
406 So, the function returns the item with minimum key at the moment of tree traversing.
408 RCU \p synchronize method can be called. RCU should NOT be locked.
409 The function does not free the item.
410 The deallocator will be implicitly invoked when the returned object is destroyed or when
411 its \p release() member function is called.
413 exempt_ptr extract_min()
415 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
418 /// Extracts minimal key key and corresponding value
420 Returns \p exempt_ptr to the leftmost item.
421 If the tree is empty, returns empty \p exempt_ptr.
423 \p Func functor is used to store minimal key.
424 \p Func has the following signature:
427 void operator()( key_type const& key );
430 If the tree is empty, \p f is not called.
431 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
434 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
435 It means that the function gets leftmost leaf of the tree and tries to unlink it.
436 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
437 So, the function returns the item with minimum key at the moment of tree traversing.
439 RCU \p synchronize method can be called. RCU should NOT be locked.
440 The function does not free the item.
441 The deallocator will be implicitly invoked when the returned object is destroyed or when
442 its \p release() member function is called.
444 template <typename Func>
445 exempt_ptr extract_min( Func f )
447 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
450 /// Extracts minimal key key and corresponding value
452 This function is a shortcut for the following call:
455 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
457 \p key_type should be copy-assignable. The copy of minimal key
458 is returned in \p min_key argument.
460 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
461 extract_min_key( key_type& min_key )
463 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
466 /// Extracts a value with maximal key from the tree
468 Returns \p exempt_ptr pointer to the rightmost item.
469 If the set is empty, returns empty \p exempt_ptr.
471 Note that the function returns only the value for maximal key.
472 To retrieve its key use \p extract_max( Func ) member function.
474 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
475 It means that the function gets rightmost leaf of the tree and tries to unlink it.
476 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
477 So, the function returns the item with maximum key at the moment of tree traversing.
479 RCU \p synchronize method can be called. RCU should NOT be locked.
480 The function does not free the item.
481 The deallocator will be implicitly invoked when the returned object is destroyed or when
482 its \p release() is called.
484 exempt_ptr extract_max()
486 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
489 /// Extracts the maximal key and corresponding value
491 Returns \p exempt_ptr pointer to the rightmost item.
492 If the set is empty, returns empty \p exempt_ptr.
494 \p Func functor is used to store maximal key.
495 \p Func has the following signature:
498 void operator()( key_type const& key );
501 If the tree is empty, \p f is not called.
502 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
505 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
506 It means that the function gets rightmost leaf of the tree and tries to unlink it.
507 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
508 So, the function returns the item with maximum key at the moment of tree traversing.
510 RCU \p synchronize method can be called. RCU should NOT be locked.
511 The function does not free the item.
512 The deallocator will be implicitly invoked when the returned object is destroyed or when
513 its \p release() is called.
515 template <typename Func>
516 exempt_ptr extract_max( Func f )
518 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
521 /// Extracts the maximal key and corresponding value
523 This function is a shortcut for the following call:
526 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
528 \p key_type should be copy-assignable. The copy of maximal key
529 is returned in \p max_key argument.
531 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
532 extract_max_key( key_type& max_key )
534 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
537 /// Extracts an item from the map
539 The function searches an item with key equal to \p key in the tree,
540 unlinks it, and returns \p exempt_ptr pointer to a value found.
541 If \p key is not found the function returns an empty \p exempt_ptr.
543 RCU \p synchronize method can be called. RCU should NOT be locked.
544 The function does not destroy the value found.
545 The disposer will be implicitly invoked when the returned object is destroyed or when
546 its \p release() member function is called.
548 template <typename Q>
549 exempt_ptr extract( Q const& key )
551 return exempt_ptr(do_extract( key ));
555 /// Extracts an item from the map using \p pred for searching
557 The function is an analog of \p extract(Q const&)
558 but \p pred is used for key compare.
559 \p Less has the interface like \p std::less.
560 \p pred must imply the same element order as the comparator used for building the tree.
562 template <typename Q, typename Less>
563 exempt_ptr extract_with( Q const& key, Less pred )
565 return exempt_ptr(do_extract_with( key, pred ));
568 /// Find the key \p key
570 The function searches the item with key equal to \p key and calls the functor \p f for item found.
571 The interface of \p Func functor is:
574 void operator()( key_type const& key, mapped_type& item );
577 where \p item is the item found.
578 The functor is called under node-level lock.
580 The function applies RCU lock internally.
582 The function returns \p true if \p key is found, \p false otherwise.
584 template <typename K, typename Func>
585 bool find( K const& key, Func f )
587 return do_find( key, key_comparator(),
588 [&f]( node_type * pNode ) -> bool {
589 assert( pNode != nullptr );
590 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
592 f( pNode->m_key, *pVal );
600 /// Finds the key \p val using \p pred predicate for searching
602 The function is an analog of \p find(K const&, Func)
603 but \p pred is used for key comparing.
604 \p Less functor has the interface like \p std::less.
605 \p Less must imply the same element order as the comparator used for building the map.
607 template <typename K, typename Less, typename Func>
608 bool find_with( K const& key, Less pred, Func f )
611 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
612 [&f]( node_type * pNode ) -> bool {
613 assert( pNode != nullptr );
614 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
616 f( pNode->m_key, *pVal );
624 /// Find the key \p key
626 The function searches the item with key equal to \p key
627 and returns \p true if it is found, and \p false otherwise.
629 The function applies RCU lock internally.
631 template <typename K>
632 bool find( K const& key )
634 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
637 /// Finds the key \p val using \p pred predicate for searching
639 The function is an analog of \p find(K const&)
640 but \p pred is used for key comparing.
641 \p Less functor has the interface like \p std::less.
642 \p Less must imply the same element order as the comparator used for building the map.
644 template <typename K, typename Less>
645 bool find_with( K const& key, Less pred )
648 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
651 /// Clears the tree (thread safe, not atomic)
653 The function unlink all items from the tree.
654 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
658 assert( set.empty() );
660 the assertion could be raised.
662 For each node the \ref disposer will be called after unlinking.
664 RCU \p synchronize method can be called. RCU should not be locked.
668 while ( extract_min() );
671 /// Clears the tree (not thread safe)
673 This function is not thread safe and may be called only when no other thread deals with the tree.
674 The function is used in the tree destructor.
678 clear(); // temp solution
682 /// Checks if the map is empty
685 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
688 /// Returns item count in the map
690 Only leaf nodes containing user data are counted.
692 The value returned depends on item counter type provided by \p Traits template parameter.
693 If it is \p atomicity::empty_item_counter this function always returns 0.
695 The function is not suitable for checking the tree emptiness, use \p empty()
696 member function for this purpose.
700 return m_ItemCounter;
703 /// Returns const reference to internal statistics
704 stat const& statistics() const
709 /// Returns reference to \p sync_monitor object
710 sync_monitor& monitor()
715 sync_monitor const& monitor() const
721 /// Checks internal consistency (not atomic, not thread-safe)
723 The debugging function to check internal consistency of the tree.
725 bool check_consistency() const
727 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
730 /// Checks internal consistency (not atomic, not thread-safe)
732 The debugging function to check internal consistency of the tree.
733 The functor \p Func is called if a violation of internal tree structure
737 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
741 - \p nLevel - the level where the violation is found
742 - \p hLeft - the height of left subtree
743 - \p hRight - the height of right subtree
745 The functor is called for each violation found.
747 template <typename Func>
748 bool check_consistency( Func f ) const
750 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
753 do_check_consistency( pChild, 1, f, nErrors );
761 template <typename Func>
762 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
765 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
766 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
768 if ( hLeft >= hRight ) {
769 if ( hLeft - hRight > 1 ) {
770 f( nLevel, hLeft, hRight );
776 if ( hRight - hLeft > 1 ) {
777 f( nLevel, hLeft, hRight );
786 template <typename Q, typename Compare, typename Func>
787 bool do_find( Q& key, Compare cmp, Func f ) const
792 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
794 assert( result != find_result::retry );
795 return result == find_result::found;
798 template <typename K, typename Compare, typename Func>
799 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
801 check_deadlock_policy::check();
803 rcu_disposer removed_list;
806 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
810 template <typename K, typename Compare, typename Func>
811 bool do_remove( K const& key, Compare cmp, Func func )
813 // Func must return true if the value was disposed
814 // or false if the value was extracted
816 check_deadlock_policy::check();
818 rcu_disposer removed_list;
821 return try_remove_root( key, cmp, func, removed_list );
825 template <typename Func>
826 mapped_type do_extract_min( Func f )
828 mapped_type pExtracted = nullptr;
831 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
836 template <typename Func>
837 mapped_type do_extract_max( Func f )
839 mapped_type pExtracted = nullptr;
842 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
847 template <typename Func>
848 void do_extract_minmax( int nDir, Func func )
850 check_deadlock_policy::check();
852 rcu_disposer removed_list;
856 int result = update_flags::failed;
858 // get right child of root
859 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
861 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
862 if ( nChildVersion & node_type::shrinking ) {
863 m_stat.onRemoveRootWaitShrinking();
864 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
865 result = update_flags::retry;
867 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
868 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
871 } while ( result == update_flags::retry );
875 template <typename Q>
876 mapped_type do_extract( Q const& key )
878 mapped_type pExtracted = nullptr;
882 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
887 template <typename Q, typename Less>
888 mapped_type do_extract_with( Q const& key, Less pred )
891 mapped_type pExtracted = nullptr;
894 cds::opt::details::make_comparator_from_less<Less>(),
895 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
904 template <typename Q, typename Compare, typename Func>
905 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
907 assert( gc::is_locked() );
911 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
913 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
914 m_stat.onFindRetry();
915 return find_result::retry;
918 m_stat.onFindFailed();
919 return find_result::not_found;
922 int nCmp = cmp( key, pChild->m_key );
924 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
926 node_scoped_lock l( m_Monitor, *pChild );
927 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
929 m_stat.onFindSuccess();
930 return find_result::found;
935 m_stat.onFindFailed();
936 return find_result::not_found;
939 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
940 if ( nChildVersion & node_type::shrinking ) {
941 m_stat.onFindWaitShrinking();
942 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
944 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
945 m_stat.onFindRetry();
946 return find_result::retry;
949 else if ( nChildVersion != node_type::unlinked ) {
950 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
951 m_stat.onFindRetry();
952 return find_result::retry;
955 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
956 if ( found != find_result::retry )
960 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
961 m_stat.onFindRetry();
962 return find_result::retry;
967 template <typename K, typename Compare, typename Func>
968 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
970 assert( gc::is_locked() );
974 // get right child of root
975 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
977 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
978 if ( nChildVersion & node_type::shrinking ) {
979 m_stat.onUpdateRootWaitShrinking();
980 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
981 result = update_flags::retry;
983 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
984 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
987 result = update_flags::retry;
991 if ( nFlags & update_flags::allow_insert ) {
992 // insert into tree as right child of the root
994 node_scoped_lock l( m_Monitor, *m_pRoot );
995 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
996 result = update_flags::retry;
1000 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1001 mapped_type pVal = funcUpdate( pNew );
1002 assert( pVal != nullptr );
1003 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1005 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
1006 m_pRoot->height( 2, memory_model::memory_order_relaxed );
1010 m_stat.onInsertSuccess();
1011 return update_flags::result_inserted;
1014 return update_flags::failed;
1016 } while ( result == update_flags::retry );
1020 template <typename K, typename Compare, typename Func>
1021 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1023 assert( gc::is_locked() );
1027 // get right child of root
1028 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1030 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
1031 if ( nChildVersion & node_type::shrinking ) {
1032 m_stat.onRemoveRootWaitShrinking();
1033 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1034 result = update_flags::retry;
1036 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1037 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1040 result = update_flags::retry;
1044 } while ( result == update_flags::retry );
1046 return result == update_flags::result_removed;
1049 template <typename K, typename Compare, typename Func>
1050 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1052 assert( gc::is_locked() );
1053 assert( nVersion != node_type::unlinked );
1054 CDS_UNUSED( pParent );
1056 int nCmp = cmp( key, pNode->m_key );
1058 if ( nFlags & update_flags::allow_update ) {
1059 return try_update_node( funcUpdate, pNode, disp );
1061 return update_flags::failed;
1066 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1067 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1068 m_stat.onUpdateRetry();
1069 return update_flags::retry;
1072 if ( pChild == nullptr ) {
1074 if ( nFlags & update_flags::allow_insert )
1075 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1077 result = update_flags::failed;
1081 result = update_flags::retry;
1082 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1083 if ( nChildVersion & node_type::shrinking ) {
1084 m_stat.onUpdateWaitShrinking();
1085 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1088 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1089 // this second read is important, because it is protected by nChildVersion
1091 // validate the read that our caller took to get to node
1092 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1093 m_stat.onUpdateRetry();
1094 return update_flags::retry;
1097 // At this point we know that the traversal our parent took to get to node is still valid.
1098 // The recursive implementation will validate the traversal from node to
1099 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1100 // This means that we are no longer vulnerable to node shrinks, and we don't need
1101 // to validate node version any more.
1102 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1106 if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1107 m_stat.onUpdateRetry();
1108 return update_flags::retry;
1110 } while ( result == update_flags::retry );
1114 template <typename K, typename Compare, typename Func>
1115 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1117 assert( gc::is_locked() );
1118 assert( nVersion != node_type::unlinked );
1120 int nCmp = cmp( key, pNode->m_key );
1122 return try_remove_node( pParent, pNode, nVersion, func, disp );
1126 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1127 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1128 m_stat.onRemoveRetry();
1129 return update_flags::retry;
1132 if ( pChild == nullptr ) {
1133 return update_flags::failed;
1137 result = update_flags::retry;
1138 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1139 if ( nChildVersion & node_type::shrinking ) {
1140 m_stat.onRemoveWaitShrinking();
1141 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1144 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1145 // this second read is important, because it is protected by nChildVersion
1147 // validate the read that our caller took to get to node
1148 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1149 m_stat.onRemoveRetry();
1150 return update_flags::retry;
1153 // At this point we know that the traversal our parent took to get to node is still valid.
1154 // The recursive implementation will validate the traversal from node to
1155 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1156 // This means that we are no longer vulnerable to node shrinks, and we don't need
1157 // to validate node version any more.
1158 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1162 if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1163 m_stat.onRemoveRetry();
1164 return update_flags::retry;
1166 } while ( result == update_flags::retry );
1170 template <typename Func>
1171 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1173 assert( gc::is_locked() );
1174 assert( nVersion != node_type::unlinked );
1178 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1179 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1180 m_stat.onRemoveRetry();
1181 return update_flags::retry;
1184 if ( pChild == nullptr ) {
1186 return try_remove_node( pParent, pNode, nVersion, func, disp );
1189 result = update_flags::retry;
1190 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1191 if ( nChildVersion & node_type::shrinking ) {
1192 m_stat.onRemoveWaitShrinking();
1193 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1196 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1197 // this second read is important, because it is protected by nChildVersion
1199 // validate the read that our caller took to get to node
1200 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1201 m_stat.onRemoveRetry();
1202 return update_flags::retry;
1205 // At this point we know that the traversal our parent took to get to node is still valid.
1206 // The recursive implementation will validate the traversal from node to
1207 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1208 // This means that we are no longer vulnerable to node shrinks, and we don't need
1209 // to validate node version any more.
1210 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1214 if ( result == update_flags::retry && pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1215 m_stat.onRemoveRetry();
1216 return update_flags::retry;
1218 } while ( result == update_flags::retry );
1222 template <typename K, typename Func>
1223 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1227 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1228 mapped_type pVal = funcUpdate( pNew );
1229 assert( pVal != nullptr );
1230 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1233 if ( c_bRelaxedInsert ) {
1234 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1235 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1237 m_stat.onInsertRetry();
1238 return update_flags::retry;
1241 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1244 node_type * pDamaged;
1246 assert( pNode != nullptr );
1247 node_scoped_lock l( m_Monitor, *pNode );
1249 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1250 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1252 if ( c_bRelaxedInsert ) {
1253 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1254 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1257 m_stat.onRelaxedInsertFailed();
1260 m_stat.onInsertRetry();
1261 return update_flags::retry;
1264 if ( !c_bRelaxedInsert )
1265 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1267 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1268 pDamaged = fix_height_locked( pNode );
1272 m_stat.onInsertSuccess();
1275 fix_height_and_rebalance( pDamaged, disp );
1276 m_stat.onInsertRebalanceRequired();
1279 return update_flags::result_inserted;
1282 template <typename Func>
1283 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1286 assert( pNode != nullptr );
1288 node_scoped_lock l( m_Monitor, *pNode );
1290 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1291 m_stat.onUpdateUnlinked();
1292 return update_flags::retry;
1295 pOld = pNode->value( memory_model::memory_order_relaxed );
1296 mapped_type pVal = funcUpdate( pNode );
1300 assert( pVal != nullptr );
1301 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1306 disp.dispose_value(pOld);
1307 m_stat.onDisposeValue();
1310 m_stat.onUpdateSuccess();
1311 return update_flags::result_updated;
1314 template <typename Func>
1315 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1317 assert( pParent != nullptr );
1318 assert( pNode != nullptr );
1320 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1321 return update_flags::failed;
1323 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1324 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1326 node_type * pDamaged;
1329 node_scoped_lock lp( m_Monitor, *pParent );
1330 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1331 return update_flags::retry;
1334 node_scoped_lock ln( m_Monitor, *pNode );
1335 pOld = pNode->value( memory_model::memory_order_relaxed );
1336 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1338 && try_unlink_locked( pParent, pNode, disp )))
1340 return update_flags::retry;
1343 pDamaged = fix_height_locked( pParent );
1347 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1348 m_stat.onDisposeValue();
1350 m_stat.onExtractValue();
1353 fix_height_and_rebalance( pDamaged, disp );
1354 m_stat.onRemoveRebalanceRequired();
1356 return update_flags::result_removed;
1359 int result = update_flags::retry;
1362 node_scoped_lock ln( m_Monitor, *pNode );
1363 pOld = pNode->value( atomics::memory_order_relaxed );
1364 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1365 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1366 result = update_flags::result_removed;
1370 if ( result == update_flags::result_removed ) {
1372 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1373 m_stat.onDisposeValue();
1375 m_stat.onExtractValue();
1382 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1384 // pParent and pNode must be locked
1385 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1387 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1388 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1389 if ( pNode != pParentLeft && pNode != pParentRight ) {
1390 // node is no longer a child of parent
1394 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1395 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1397 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1398 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1399 if ( pLeft != nullptr && pRight != nullptr ) {
1400 // splicing is no longer possible
1403 node_type * pSplice = pLeft ? pLeft : pRight;
1405 if ( pParentLeft == pNode )
1406 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1408 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1411 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1413 // Mark the node as unlinked
1414 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1416 // The value will be disposed by calling function
1417 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1419 disp.dispose( pNode );
1420 m_stat.onDisposeNode();
1427 private: // rotations
1429 int estimate_node_condition( node_type * pNode )
1431 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1432 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1434 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1435 return unlink_required;
1437 int h = pNode->height( memory_model::memory_order_relaxed );
1438 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1439 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1441 int hNew = 1 + std::max( hL, hR );
1442 int nBalance = hL - hR;
1444 if ( nBalance < -1 || nBalance > 1 )
1445 return rebalance_required;
1447 return h != hNew ? hNew : nothing_required;
1450 node_type * fix_height( node_type * pNode )
1452 assert( pNode != nullptr );
1453 node_scoped_lock l( m_Monitor, *pNode );
1454 return fix_height_locked( pNode );
1457 node_type * fix_height_locked( node_type * pNode )
1459 // pNode must be locked!!!
1460 int h = estimate_node_condition( pNode );
1462 case rebalance_required:
1463 case unlink_required:
1465 case nothing_required:
1468 pNode->height( h, memory_model::memory_order_relaxed );
1469 return parent( pNode, memory_model::memory_order_relaxed );
1473 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1475 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1476 int nCond = estimate_node_condition( pNode );
1477 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1480 if ( nCond != unlink_required && nCond != rebalance_required )
1481 pNode = fix_height( pNode );
1483 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1484 assert( pParent != nullptr );
1486 node_scoped_lock lp( m_Monitor, *pParent );
1487 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1488 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1490 node_scoped_lock ln( m_Monitor, *pNode );
1491 pNode = rebalance_locked( pParent, pNode, disp );
1498 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1500 // pParent and pNode should be locked.
1501 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1502 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1504 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1505 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1507 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1508 if ( try_unlink_locked( pParent, pNode, disp ))
1509 return fix_height_locked( pParent );
1511 // retry needed for pNode
1516 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1517 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1519 int h = pNode->height( memory_model::memory_order_relaxed );
1520 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1521 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1522 int hNew = 1 + std::max( hL, hR );
1523 int balance = hL - hR;
1526 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1527 else if ( balance < -1 )
1528 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1529 else if ( hNew != h ) {
1530 pNode->height( hNew, memory_model::memory_order_relaxed );
1532 // pParent is already locked
1533 return fix_height_locked( pParent );
1539 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1541 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1542 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1543 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1545 // pParent and pNode is locked yet
1546 // pNode->pLeft is too large, we will rotate-right.
1547 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1550 assert( pLeft != nullptr );
1551 node_scoped_lock l( m_Monitor, *pLeft );
1552 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1553 return pNode; // retry for pNode
1555 int hL = pLeft->height( memory_model::memory_order_relaxed );
1557 return pNode; // retry
1559 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1560 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1561 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1562 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1566 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1569 assert( pLRight != nullptr );
1571 node_scoped_lock lr( m_Monitor, *pLRight );
1572 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1573 return pNode; // retry
1575 hLR = pLRight->height( memory_model::memory_order_relaxed );
1577 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1579 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1580 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1581 int balance = hLL - hLRL;
1582 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1583 // nParent.child.left won't be damaged after a double rotation
1584 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1588 // focus on pLeft, if necessary pNode will be balanced later
1589 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1594 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1596 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1597 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1598 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1600 // pParent and pNode is locked yet
1602 assert( pRight != nullptr );
1603 node_scoped_lock l( m_Monitor, *pRight );
1604 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1605 return pNode; // retry for pNode
1607 int hR = pRight->height( memory_model::memory_order_relaxed );
1608 if ( hL - hR >= -1 )
1609 return pNode; // retry
1611 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1612 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1613 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1614 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1616 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1619 assert( pRLeft != nullptr );
1620 node_scoped_lock lrl( m_Monitor, *pRLeft );
1621 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1622 return pNode; // retry
1624 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1626 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1628 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1629 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1630 int balance = hRR - hRLR;
1631 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1632 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1634 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1638 static void begin_change( node_type * pNode, version_type version )
1640 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1642 static void end_change( node_type * pNode, version_type version )
1644 // Clear shrinking and unlinked flags and increment version
1645 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1648 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1650 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1651 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1653 begin_change( pNode, nodeVersion );
1655 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1656 if ( pLRight != nullptr )
1657 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1659 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1660 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1662 if ( pParentLeft == pNode )
1663 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1665 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1666 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1668 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1670 // fix up heights links
1671 int hNode = 1 + std::max( hLR, hR );
1672 pNode->height( hNode, memory_model::memory_order_relaxed );
1673 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1675 end_change( pNode, nodeVersion );
1676 m_stat.onRotateRight();
1678 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1679 // parent.child). pNode is the deepest. Perform as many fixes as we can
1680 // with the locks we've got.
1682 // We've already fixed the height for pNode, but it might still be outside
1683 // our allowable balance range. In that case a simple fix_height_locked()
1685 int nodeBalance = hLR - hR;
1686 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1687 // we need another rotation at pNode
1691 // we've fixed balance and height damage for pNode, now handle
1692 // extra-routing node damage
1693 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1694 // we need to remove pNode and then repair
1698 // we've already fixed the height at pLeft, do we need a rotation here?
1699 int leftBalance = hLL - hNode;
1700 if ( leftBalance < -1 || leftBalance > 1 )
1703 // pLeft might also have routing node damage (if pLeft.left was null)
1704 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1707 // try to fix the parent height while we've still got the lock
1708 return fix_height_locked( pParent );
1711 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1713 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1714 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1716 begin_change( pNode, nodeVersion );
1718 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1719 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1720 if ( pRLeft != nullptr )
1721 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1723 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1724 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1726 if ( pParentLeft == pNode )
1727 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1729 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1730 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1732 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1735 int hNode = 1 + std::max( hL, hRL );
1736 pNode->height( hNode, memory_model::memory_order_relaxed );
1737 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1739 end_change( pNode, nodeVersion );
1740 m_stat.onRotateLeft();
1742 int nodeBalance = hRL - hL;
1743 if ( nodeBalance < -1 || nodeBalance > 1 )
1746 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1749 int rightBalance = hRR - hNode;
1750 if ( rightBalance < -1 || rightBalance > 1 )
1753 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1756 return fix_height_locked( pParent );
1759 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1761 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1762 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1764 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1765 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1766 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1767 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1769 begin_change( pNode, nodeVersion );
1770 begin_change( pLeft, leftVersion );
1772 // fix up pNode links, careful about the order!
1773 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1774 if ( pLRR != nullptr )
1775 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1777 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1778 if ( pLRL != nullptr )
1779 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1781 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1782 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1783 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1784 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1787 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1789 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1790 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1792 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1795 int hNode = 1 + std::max( hLRR, hR );
1796 pNode->height( hNode, memory_model::memory_order_relaxed );
1797 int hLeft = 1 + std::max( hLL, hLRL );
1798 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1799 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1801 end_change( pNode, nodeVersion );
1802 end_change( pLeft, leftVersion );
1803 m_stat.onRotateRightOverLeft();
1805 // caller should have performed only a single rotation if pLeft was going
1806 // to end up damaged
1807 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1808 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1810 // We have damaged pParent, pLR (now parent.child), and pNode (now
1811 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1812 // can with the locks we've got.
1814 // We've already fixed the height for pNode, but it might still be outside
1815 // our allowable balance range. In that case a simple fix_height_locked()
1817 int nodeBalance = hLRR - hR;
1818 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1819 // we need another rotation at pNode
1823 // pNode might also be damaged by being an unnecessary routing node
1824 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1825 // repair involves splicing out pNode and maybe more rotations
1829 // we've already fixed the height at pLRight, do we need a rotation here?
1830 int balanceLR = hLeft - hNode;
1831 if ( balanceLR < -1 || balanceLR > 1 )
1834 // try to fix the parent height while we've still got the lock
1835 return fix_height_locked( pParent );
1838 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1840 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1841 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1843 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1844 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1845 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1846 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1848 begin_change( pNode, nodeVersion );
1849 begin_change( pRight, rightVersion );
1851 // fix up pNode links, careful about the order!
1852 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1853 if ( pRLL != nullptr )
1854 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1856 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1857 if ( pRLR != nullptr )
1858 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1860 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1861 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1862 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1863 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1866 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1868 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1869 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1871 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1874 int hNode = 1 + std::max( hL, hRLL );
1875 pNode->height( hNode, memory_model::memory_order_relaxed );
1876 int hRight = 1 + std::max( hRLR, hRR );
1877 pRight->height( hRight, memory_model::memory_order_relaxed );
1878 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1880 end_change( pNode, nodeVersion );
1881 end_change( pRight, rightVersion );
1882 m_stat.onRotateLeftOverRight();
1884 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1886 int nodeBalance = hRLL - hL;
1887 if ( nodeBalance < -1 || nodeBalance > 1 )
1889 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1892 int balRL = hRight - hNode;
1893 if ( balRL < -1 || balRL > 1 )
1896 return fix_height_locked( pParent );
1901 }} // namespace cds::container
1903 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H