3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 cxx_allocator().Delete( pNode );
148 static void free_value( mapped_type pVal )
153 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
155 return pNode->child( nDir ).load( order );
158 static node_type * parent( node_type * pNode, atomics::memory_order order )
160 return pNode->m_pParent.load( order );
166 node_type * m_pRetiredList; ///< head of retired node list
167 mapped_type m_pRetiredValue; ///< value retired
171 : m_pRetiredList( nullptr )
172 , m_pRetiredValue( nullptr )
180 void dispose( node_type * pNode )
182 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
183 pNode->m_pNextRemoved = m_pRetiredList;
184 m_pRetiredList = pNode;
187 void dispose_value( mapped_type pVal )
189 assert( m_pRetiredValue == nullptr );
190 m_pRetiredValue = pVal;
194 struct internal_disposer
196 void operator()( node_type * p ) const
204 assert( !gc::is_locked() );
206 // TODO: use RCU::batch_retire
209 for ( node_type * p = m_pRetiredList; p; ) {
210 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
211 // Value already disposed
212 gc::template retire_ptr<internal_disposer>( p );
217 if ( m_pRetiredValue )
218 gc::template retire_ptr<disposer>( m_pRetiredValue );
226 typename node_type::base_class m_Root;
228 item_counter m_ItemCounter;
229 mutable sync_monitor m_Monitor;
234 /// Creates empty map
236 : m_pRoot( static_cast<node_type *>( &m_Root ))
247 The \p key_type should be constructible from a value of type \p K.
249 RCU \p synchronize() can be called. RCU should not be locked.
251 Returns \p true if inserting successful, \p false otherwise.
253 template <typename K>
254 bool insert( K const& key, mapped_type pVal )
256 return do_update(key, key_comparator(),
257 [pVal]( node_type * pNode ) -> mapped_type
259 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
263 update_flags::allow_insert
264 ) == update_flags::result_inserted;
267 /// Updates the value for \p key
269 The operation performs inserting or updating the value for \p key with lock-free manner.
270 If \p bInsert is \p false, only updating of existing node is possible.
272 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
273 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
274 constructible from type \p K.
275 Otherwise, the value for \p key will be changed to \p pVal.
277 RCU \p synchronize() method can be called. RCU should not be locked.
279 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
280 \p second is \p true if new node has been added or \p false if the node with \p key
283 template <typename K>
284 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
286 int result = do_update( key, key_comparator(),
287 [pVal]( node_type * ) -> mapped_type
291 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
293 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
297 template <typename K>
298 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
300 return update( key, pVal, true );
305 /// Delete \p key from the map
307 RCU \p synchronize() method can be called. RCU should not be locked.
309 Return \p true if \p key is found and deleted, \p false otherwise
311 template <typename K>
312 bool erase( K const& key )
317 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
321 /// Deletes the item from the map using \p pred predicate for searching
323 The function is an analog of \p erase(K const&)
324 but \p pred is used for key comparing.
325 \p Less functor has the interface like \p std::less.
326 \p Less must imply the same element order as the comparator used for building the map.
328 template <typename K, typename Less>
329 bool erase_with( K const& key, Less pred )
334 cds::opt::details::make_comparator_from_less<Less>(),
335 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
339 /// Delete \p key from the map
341 The function searches an item with key \p key, calls \p f functor
342 and deletes the item. If \p key is not found, the functor is not called.
344 The functor \p Func interface:
347 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
351 RCU \p synchronize method can be called. RCU should not be locked.
353 Return \p true if key is found and deleted, \p false otherwise
355 template <typename K, typename Func>
356 bool erase( K const& key, Func f )
361 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
364 disp.dispose_value(pVal);
370 /// Deletes the item from the map using \p pred predicate for searching
372 The function is an analog of \p erase(K const&, Func)
373 but \p pred is used for key comparing.
374 \p Less functor has the interface like \p std::less.
375 \p Less must imply the same element order as the comparator used for building the map.
377 template <typename K, typename Less, typename Func>
378 bool erase_with( K const& key, Less pred, Func f )
383 cds::opt::details::make_comparator_from_less<Less>(),
384 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
387 disp.dispose_value(pVal);
393 /// Extracts a value with minimal key from the map
395 Returns \p exempt_ptr to the leftmost item.
396 If the tree is empty, returns empty \p exempt_ptr.
398 Note that the function returns only the value for minimal key.
399 To retrieve its key use \p extract_min( Func ) member function.
401 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
402 It means that the function gets leftmost leaf of the tree and tries to unlink it.
403 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
404 So, the function returns the item with minimum key at the moment of tree traversing.
406 RCU \p synchronize method can be called. RCU should NOT be locked.
407 The function does not free the item.
408 The deallocator will be implicitly invoked when the returned object is destroyed or when
409 its \p release() member function is called.
411 exempt_ptr extract_min()
413 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
416 /// Extracts minimal key key and corresponding value
418 Returns \p exempt_ptr to the leftmost item.
419 If the tree is empty, returns empty \p exempt_ptr.
421 \p Func functor is used to store minimal key.
422 \p Func has the following signature:
425 void operator()( key_type const& key );
428 If the tree is empty, \p f is not called.
429 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
432 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
433 It means that the function gets leftmost leaf of the tree and tries to unlink it.
434 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
435 So, the function returns the item with minimum key at the moment of tree traversing.
437 RCU \p synchronize method can be called. RCU should NOT be locked.
438 The function does not free the item.
439 The deallocator will be implicitly invoked when the returned object is destroyed or when
440 its \p release() member function is called.
442 template <typename Func>
443 exempt_ptr extract_min( Func f )
445 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
448 /// Extracts minimal key key and corresponding value
450 This function is a shortcut for the following call:
453 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
455 \p key_type should be copy-assignable. The copy of minimal key
456 is returned in \p min_key argument.
458 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
459 extract_min_key( key_type& min_key )
461 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
464 /// Extracts a value with maximal key from the tree
466 Returns \p exempt_ptr pointer to the rightmost item.
467 If the set is empty, returns empty \p exempt_ptr.
469 Note that the function returns only the value for maximal key.
470 To retrieve its key use \p extract_max( Func ) member function.
472 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
473 It means that the function gets rightmost leaf of the tree and tries to unlink it.
474 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
475 So, the function returns the item with maximum key at the moment of tree traversing.
477 RCU \p synchronize method can be called. RCU should NOT be locked.
478 The function does not free the item.
479 The deallocator will be implicitly invoked when the returned object is destroyed or when
480 its \p release() is called.
482 exempt_ptr extract_max()
484 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
487 /// Extracts the maximal key and corresponding value
489 Returns \p exempt_ptr pointer to the rightmost item.
490 If the set is empty, returns empty \p exempt_ptr.
492 \p Func functor is used to store maximal key.
493 \p Func has the following signature:
496 void operator()( key_type const& key );
499 If the tree is empty, \p f is not called.
500 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
503 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
504 It means that the function gets rightmost leaf of the tree and tries to unlink it.
505 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
506 So, the function returns the item with maximum key at the moment of tree traversing.
508 RCU \p synchronize method can be called. RCU should NOT be locked.
509 The function does not free the item.
510 The deallocator will be implicitly invoked when the returned object is destroyed or when
511 its \p release() is called.
513 template <typename Func>
514 exempt_ptr extract_max( Func f )
516 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
519 /// Extracts the maximal key and corresponding value
521 This function is a shortcut for the following call:
524 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
526 \p key_type should be copy-assignable. The copy of maximal key
527 is returned in \p max_key argument.
529 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
530 extract_max_key( key_type& max_key )
532 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
535 /// Extracts an item from the map
537 The function searches an item with key equal to \p key in the tree,
538 unlinks it, and returns \p exempt_ptr pointer to a value found.
539 If \p key is not found the function returns an empty \p exempt_ptr.
541 RCU \p synchronize method can be called. RCU should NOT be locked.
542 The function does not destroy the value found.
543 The disposer will be implicitly invoked when the returned object is destroyed or when
544 its \p release() member function is called.
546 template <typename Q>
547 exempt_ptr extract( Q const& key )
549 return exempt_ptr(do_extract( key ));
553 /// Extracts an item from the map using \p pred for searching
555 The function is an analog of \p extract(Q const&)
556 but \p pred is used for key compare.
557 \p Less has the interface like \p std::less.
558 \p pred must imply the same element order as the comparator used for building the tree.
560 template <typename Q, typename Less>
561 exempt_ptr extract_with( Q const& key, Less pred )
563 return exempt_ptr(do_extract_with( key, pred ));
566 /// Find the key \p key
568 The function searches the item with key equal to \p key and calls the functor \p f for item found.
569 The interface of \p Func functor is:
572 void operator()( key_type const& key, mapped_type& item );
575 where \p item is the item found.
576 The functor is called under node-level lock.
578 The function applies RCU lock internally.
580 The function returns \p true if \p key is found, \p false otherwise.
582 template <typename K, typename Func>
583 bool find( K const& key, Func f )
585 return do_find( key, key_comparator(),
586 [&f]( node_type * pNode ) -> bool {
587 assert( pNode != nullptr );
588 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
590 f( pNode->m_key, *pVal );
598 /// Finds the key \p val using \p pred predicate for searching
600 The function is an analog of \p find(K const&, Func)
601 but \p pred is used for key comparing.
602 \p Less functor has the interface like \p std::less.
603 \p Less must imply the same element order as the comparator used for building the map.
605 template <typename K, typename Less, typename Func>
606 bool find_with( K const& key, Less pred, Func f )
609 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
610 [&f]( node_type * pNode ) -> bool {
611 assert( pNode != nullptr );
612 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
614 f( pNode->m_key, *pVal );
622 /// Find the key \p key
624 The function searches the item with key equal to \p key
625 and returns \p true if it is found, and \p false otherwise.
627 The function applies RCU lock internally.
629 template <typename K>
630 bool find( K const& key )
632 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
635 /// Finds the key \p val using \p pred predicate for searching
637 The function is an analog of \p find(K const&)
638 but \p pred is used for key comparing.
639 \p Less functor has the interface like \p std::less.
640 \p Less must imply the same element order as the comparator used for building the map.
642 template <typename K, typename Less>
643 bool find_with( K const& key, Less pred )
646 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
649 /// Clears the tree (thread safe, not atomic)
651 The function unlink all items from the tree.
652 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
656 assert( set.empty() );
658 the assertion could be raised.
660 For each node the \ref disposer will be called after unlinking.
662 RCU \p synchronize method can be called. RCU should not be locked.
666 while ( extract_min() );
669 /// Clears the tree (not thread safe)
671 This function is not thread safe and may be called only when no other thread deals with the tree.
672 The function is used in the tree destructor.
676 clear(); // temp solution
680 /// Checks if the map is empty
683 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
686 /// Returns item count in the map
688 Only leaf nodes containing user data are counted.
690 The value returned depends on item counter type provided by \p Traits template parameter.
691 If it is \p atomicity::empty_item_counter this function always returns 0.
693 The function is not suitable for checking the tree emptiness, use \p empty()
694 member function for this purpose.
698 return m_ItemCounter;
701 /// Returns const reference to internal statistics
702 stat const& statistics() const
707 /// Returns reference to \p sync_monitor object
708 sync_monitor& monitor()
713 sync_monitor const& monitor() const
719 /// Checks internal consistency (not atomic, not thread-safe)
721 The debugging function to check internal consistency of the tree.
723 bool check_consistency() const
725 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
728 /// Checks internal consistency (not atomic, not thread-safe)
730 The debugging function to check internal consistency of the tree.
731 The functor \p Func is called if a violation of internal tree structure
735 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
739 - \p nLevel - the level where the violation is found
740 - \p hLeft - the height of left subtree
741 - \p hRight - the height of right subtree
743 The functor is called for each violation found.
745 template <typename Func>
746 bool check_consistency( Func f ) const
748 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
751 do_check_consistency( pChild, 1, f, nErrors );
759 template <typename Func>
760 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
763 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
764 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
766 if ( hLeft >= hRight ) {
767 if ( hLeft - hRight > 1 ) {
768 f( nLevel, hLeft, hRight );
774 if ( hRight - hLeft > 1 ) {
775 f( nLevel, hLeft, hRight );
784 template <typename Q, typename Compare, typename Func>
785 bool do_find( Q& key, Compare cmp, Func f ) const
790 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
792 assert( result != find_result::retry );
793 return result == find_result::found;
796 template <typename K, typename Compare, typename Func>
797 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
799 check_deadlock_policy::check();
801 rcu_disposer removed_list;
804 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
808 template <typename K, typename Compare, typename Func>
809 bool do_remove( K const& key, Compare cmp, Func func )
811 // Func must return true if the value was disposed
812 // or false if the value was extracted
814 check_deadlock_policy::check();
816 rcu_disposer removed_list;
819 return try_remove_root( key, cmp, func, removed_list );
823 template <typename Func>
824 mapped_type do_extract_min( Func f )
826 mapped_type pExtracted = nullptr;
829 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
834 template <typename Func>
835 mapped_type do_extract_max( Func f )
837 mapped_type pExtracted = nullptr;
840 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
845 template <typename Func>
846 void do_extract_minmax( int nDir, Func func )
848 check_deadlock_policy::check();
850 rcu_disposer removed_list;
854 int result = update_flags::failed;
856 // get right child of root
857 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
859 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
860 if ( nChildVersion & node_type::shrinking ) {
861 m_stat.onRemoveRootWaitShrinking();
862 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
863 result = update_flags::retry;
865 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
866 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
869 } while ( result == update_flags::retry );
873 template <typename Q>
874 mapped_type do_extract( Q const& key )
876 mapped_type pExtracted = nullptr;
880 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
885 template <typename Q, typename Less>
886 mapped_type do_extract_with( Q const& key, Less pred )
889 mapped_type pExtracted = nullptr;
892 cds::opt::details::make_comparator_from_less<Less>(),
893 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
902 template <typename Q, typename Compare, typename Func>
903 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
905 assert( gc::is_locked() );
909 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
911 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
912 m_stat.onFindRetry();
913 return find_result::retry;
916 m_stat.onFindFailed();
917 return find_result::not_found;
920 int nCmp = cmp( key, pChild->m_key );
922 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
924 node_scoped_lock l( m_Monitor, *pChild );
925 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
927 m_stat.onFindSuccess();
928 return find_result::found;
933 m_stat.onFindFailed();
934 return find_result::not_found;
937 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
938 if ( nChildVersion & node_type::shrinking ) {
939 m_stat.onFindWaitShrinking();
940 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
942 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
943 m_stat.onFindRetry();
944 return find_result::retry;
947 else if ( nChildVersion != node_type::unlinked ) {
949 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
950 m_stat.onFindRetry();
951 return find_result::retry;
954 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
955 if ( found != find_result::retry )
961 template <typename K, typename Compare, typename Func>
962 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
964 assert( gc::is_locked() );
968 // get right child of root
969 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
971 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
972 if ( nChildVersion & node_type::shrinking ) {
973 m_stat.onUpdateRootWaitShrinking();
974 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
975 result = update_flags::retry;
977 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
978 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
983 if ( nFlags & update_flags::allow_insert ) {
984 // insert into tree as right child of the root
986 node_scoped_lock l( m_Monitor, *m_pRoot );
987 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
988 result = result == update_flags::retry;
992 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
993 mapped_type pVal = funcUpdate( pNew );
994 assert( pVal != nullptr );
995 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
997 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
998 m_pRoot->height( 2, memory_model::memory_order_relaxed );
1002 m_stat.onInsertSuccess();
1003 return update_flags::result_inserted;
1006 return update_flags::failed;
1008 } while ( result == update_flags::retry );
1012 template <typename K, typename Compare, typename Func>
1013 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1015 assert( gc::is_locked() );
1019 // get right child of root
1020 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1022 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
1023 if ( nChildVersion & node_type::shrinking ) {
1024 m_stat.onRemoveRootWaitShrinking();
1025 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1026 result = update_flags::retry;
1028 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1029 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1034 } while ( result == update_flags::retry );
1036 return result == update_flags::result_removed;
1039 template <typename K, typename Compare, typename Func>
1040 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1042 assert( gc::is_locked() );
1043 assert( nVersion != node_type::unlinked );
1044 CDS_UNUSED( pParent );
1046 int nCmp = cmp( key, pNode->m_key );
1048 if ( nFlags & update_flags::allow_update ) {
1049 return try_update_node( funcUpdate, pNode, disp );
1051 return update_flags::failed;
1056 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1057 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1058 m_stat.onUpdateRetry();
1059 return update_flags::retry;
1062 if ( pChild == nullptr ) {
1064 if ( nFlags & update_flags::allow_insert )
1065 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1067 result = update_flags::failed;
1071 result = update_flags::retry;
1072 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1073 if ( nChildVersion & node_type::shrinking ) {
1074 m_stat.onUpdateWaitShrinking();
1075 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1078 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1079 // this second read is important, because it is protected by nChildVersion
1081 // validate the read that our caller took to get to node
1082 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1083 m_stat.onUpdateRetry();
1084 return update_flags::retry;
1087 // At this point we know that the traversal our parent took to get to node is still valid.
1088 // The recursive implementation will validate the traversal from node to
1089 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1090 // This means that we are no longer vulnerable to node shrinks, and we don't need
1091 // to validate node version any more.
1092 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1095 } while ( result == update_flags::retry );
1099 template <typename K, typename Compare, typename Func>
1100 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1102 assert( gc::is_locked() );
1103 assert( nVersion != node_type::unlinked );
1105 int nCmp = cmp( key, pNode->m_key );
1107 return try_remove_node( pParent, pNode, nVersion, func, disp );
1111 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1112 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1113 m_stat.onRemoveRetry();
1114 return update_flags::retry;
1117 if ( pChild == nullptr ) {
1118 return update_flags::failed;
1122 result = update_flags::retry;
1123 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1124 if ( nChildVersion & node_type::shrinking ) {
1125 m_stat.onRemoveWaitShrinking();
1126 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1129 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1130 // this second read is important, because it is protected by nChildVersion
1132 // validate the read that our caller took to get to node
1133 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1134 m_stat.onRemoveRetry();
1135 return update_flags::retry;
1138 // At this point we know that the traversal our parent took to get to node is still valid.
1139 // The recursive implementation will validate the traversal from node to
1140 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1141 // This means that we are no longer vulnerable to node shrinks, and we don't need
1142 // to validate node version any more.
1143 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1146 } while ( result == update_flags::retry );
1150 template <typename Func>
1151 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1153 assert( gc::is_locked() );
1154 assert( nVersion != node_type::unlinked );
1158 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1159 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1160 m_stat.onRemoveRetry();
1161 return update_flags::retry;
1164 if ( pChild == nullptr ) {
1166 return try_remove_node( pParent, pNode, nVersion, func, disp );
1169 result = update_flags::retry;
1170 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1171 if ( nChildVersion & node_type::shrinking ) {
1172 m_stat.onRemoveWaitShrinking();
1173 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1176 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1177 // this second read is important, because it is protected by nChildVersion
1179 // validate the read that our caller took to get to node
1180 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1181 m_stat.onRemoveRetry();
1182 return update_flags::retry;
1185 // At this point we know that the traversal our parent took to get to node is still valid.
1186 // The recursive implementation will validate the traversal from node to
1187 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1188 // This means that we are no longer vulnerable to node shrinks, and we don't need
1189 // to validate node version any more.
1190 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1193 } while ( result == update_flags::retry );
1197 template <typename K, typename Func>
1198 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1202 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1203 mapped_type pVal = funcUpdate( pNew );
1204 assert( pVal != nullptr );
1205 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1208 if ( c_bRelaxedInsert ) {
1209 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1210 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1212 m_stat.onInsertRetry();
1213 return update_flags::retry;
1216 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1219 node_type * pDamaged;
1221 assert( pNode != nullptr );
1222 node_scoped_lock l( m_Monitor, *pNode );
1224 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1225 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1227 if ( c_bRelaxedInsert ) {
1229 m_stat.onRelaxedInsertFailed();
1232 m_stat.onInsertRetry();
1233 return update_flags::retry;
1236 if ( !c_bRelaxedInsert )
1237 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1239 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1240 pDamaged = fix_height_locked( pNode );
1244 m_stat.onInsertSuccess();
1246 fix_height_and_rebalance( pDamaged, disp );
1248 return update_flags::result_inserted;
1251 template <typename Func>
1252 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1255 assert( pNode != nullptr );
1257 node_scoped_lock l( m_Monitor, *pNode );
1259 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1260 m_stat.onUpdateUnlinked();
1261 return update_flags::retry;
1264 pOld = pNode->value( memory_model::memory_order_relaxed );
1265 mapped_type pVal = funcUpdate( pNode );
1269 assert( pVal != nullptr );
1270 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1275 disp.dispose_value(pOld);
1276 m_stat.onDisposeValue();
1279 m_stat.onUpdateSuccess();
1280 return update_flags::result_updated;
1283 template <typename Func>
1284 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1286 assert( pParent != nullptr );
1287 assert( pNode != nullptr );
1289 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1290 return update_flags::failed;
1292 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1293 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1295 node_type * pDamaged;
1298 node_scoped_lock lp( m_Monitor, *pParent );
1299 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1300 return update_flags::retry;
1303 node_scoped_lock ln( m_Monitor, *pNode );
1304 pOld = pNode->value( memory_model::memory_order_relaxed );
1305 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1307 && try_unlink_locked( pParent, pNode, disp )))
1309 return update_flags::retry;
1312 pDamaged = fix_height_locked( pParent );
1316 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1317 m_stat.onDisposeValue();
1319 m_stat.onExtractValue();
1321 fix_height_and_rebalance( pDamaged, disp );
1322 return update_flags::result_removed;
1325 int result = update_flags::retry;
1328 node_scoped_lock ln( m_Monitor, *pNode );
1329 pOld = pNode->value( atomics::memory_order_relaxed );
1330 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1331 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1332 result = update_flags::result_removed;
1336 if ( result == update_flags::result_removed ) {
1338 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1339 m_stat.onDisposeValue();
1341 m_stat.onExtractValue();
1348 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1350 // pParent and pNode must be locked
1351 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1353 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1354 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1355 if ( pNode != pParentLeft && pNode != pParentRight ) {
1356 // node is no longer a child of parent
1360 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1361 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1363 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1364 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1365 if ( pLeft != nullptr && pRight != nullptr ) {
1366 // splicing is no longer possible
1369 node_type * pSplice = pLeft ? pLeft : pRight;
1371 if ( pParentLeft == pNode )
1372 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1374 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1377 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1379 // Mark the node as unlinked
1380 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1382 // The value will be disposed by calling function
1383 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1385 disp.dispose( pNode );
1386 m_stat.onDisposeNode();
1393 private: // rotations
1395 int estimate_node_condition( node_type * pNode )
1397 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1398 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1400 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1401 return unlink_required;
1403 int h = pNode->height( memory_model::memory_order_relaxed );
1404 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1405 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1407 int hNew = 1 + std::max( hL, hR );
1408 int nBalance = hL - hR;
1410 if ( nBalance < -1 || nBalance > 1 )
1411 return rebalance_required;
1413 return h != hNew ? hNew : nothing_required;
1416 node_type * fix_height( node_type * pNode )
1418 assert( pNode != nullptr );
1419 node_scoped_lock l( m_Monitor, *pNode );
1420 return fix_height_locked( pNode );
1423 node_type * fix_height_locked( node_type * pNode )
1425 // pNode must be locked!!!
1426 int h = estimate_node_condition( pNode );
1428 case rebalance_required:
1429 case unlink_required:
1431 case nothing_required:
1434 pNode->height( h, memory_model::memory_order_relaxed );
1435 return parent( pNode, memory_model::memory_order_relaxed );
1439 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1441 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1442 int nCond = estimate_node_condition( pNode );
1443 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1446 if ( nCond != unlink_required && nCond != rebalance_required )
1447 pNode = fix_height( pNode );
1449 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1450 assert( pParent != nullptr );
1452 node_scoped_lock lp( m_Monitor, *pParent );
1453 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1454 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1456 node_scoped_lock ln( m_Monitor, *pNode );
1457 pNode = rebalance_locked( pParent, pNode, disp );
1464 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1466 // pParent and pNode should be locked.
1467 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1468 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1469 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1470 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1472 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1473 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1475 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1476 if ( try_unlink_locked( pParent, pNode, disp ))
1477 return fix_height_locked( pParent );
1479 // retry needed for pNode
1484 int h = pNode->height( memory_model::memory_order_relaxed );
1485 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1486 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1487 int hNew = 1 + std::max( hL, hR );
1488 int balance = hL - hR;
1491 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1492 else if ( balance < -1 )
1493 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1494 else if ( hNew != h ) {
1495 pNode->height( hNew, memory_model::memory_order_relaxed );
1497 // pParent is already locked
1498 return fix_height_locked( pParent );
1504 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1506 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1507 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1508 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1510 // pParent and pNode is locked yet
1511 // pNode->pLeft is too large, we will rotate-right.
1512 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1515 assert( pLeft != nullptr );
1516 node_scoped_lock l( m_Monitor, *pLeft );
1517 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1518 return pNode; // retry for pNode
1520 int hL = pLeft->height( memory_model::memory_order_relaxed );
1522 return pNode; // retry
1524 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1525 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1526 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1527 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1531 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1534 assert( pLRight != nullptr );
1536 node_scoped_lock lr( m_Monitor, *pLRight );
1537 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1538 return pNode; // retry
1540 hLR = pLRight->height( memory_model::memory_order_relaxed );
1542 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1544 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1545 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1546 int balance = hLL - hLRL;
1547 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1548 // nParent.child.left won't be damaged after a double rotation
1549 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1553 // focus on pLeft, if necessary pNode will be balanced later
1554 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1559 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1561 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1562 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1563 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1565 // pParent and pNode is locked yet
1567 assert( pRight != nullptr );
1568 node_scoped_lock l( m_Monitor, *pRight );
1569 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1570 return pNode; // retry for pNode
1572 int hR = pRight->height( memory_model::memory_order_relaxed );
1573 if ( hL - hR >= -1 )
1574 return pNode; // retry
1576 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1577 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1578 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1579 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1581 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1584 assert( pRLeft != nullptr );
1585 node_scoped_lock lrl( m_Monitor, *pRLeft );
1586 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1587 return pNode; // retry
1589 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1591 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1593 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1594 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1595 int balance = hRR - hRLR;
1596 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1597 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1599 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1603 static void begin_change( node_type * pNode, version_type version )
1605 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1607 static void end_change( node_type * pNode, version_type version )
1609 // Clear shrinking and unlinked flags and increment version
1610 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1613 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1615 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1616 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1618 begin_change( pNode, nodeVersion );
1620 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1621 if ( pLRight != nullptr )
1622 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1624 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1625 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1627 if ( pParentLeft == pNode )
1628 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1630 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1631 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1633 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1635 // fix up heights links
1636 int hNode = 1 + std::max( hLR, hR );
1637 pNode->height( hNode, memory_model::memory_order_relaxed );
1638 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1640 end_change( pNode, nodeVersion );
1641 m_stat.onRotateRight();
1643 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1644 // parent.child). pNode is the deepest. Perform as many fixes as we can
1645 // with the locks we've got.
1647 // We've already fixed the height for pNode, but it might still be outside
1648 // our allowable balance range. In that case a simple fix_height_locked()
1650 int nodeBalance = hLR - hR;
1651 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1652 // we need another rotation at pNode
1656 // we've fixed balance and height damage for pNode, now handle
1657 // extra-routing node damage
1658 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1659 // we need to remove pNode and then repair
1663 // we've already fixed the height at pLeft, do we need a rotation here?
1664 int leftBalance = hLL - hNode;
1665 if ( leftBalance < -1 || leftBalance > 1 )
1668 // pLeft might also have routing node damage (if pLeft.left was null)
1669 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1672 // try to fix the parent height while we've still got the lock
1673 return fix_height_locked( pParent );
1676 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1678 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1679 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1681 begin_change( pNode, nodeVersion );
1683 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1684 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1685 if ( pRLeft != nullptr )
1686 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1688 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1689 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1691 if ( pParentLeft == pNode )
1692 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1694 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1695 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1697 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1700 int hNode = 1 + std::max( hL, hRL );
1701 pNode->height( hNode, memory_model::memory_order_relaxed );
1702 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1704 end_change( pNode, nodeVersion );
1705 m_stat.onRotateLeft();
1707 int nodeBalance = hRL - hL;
1708 if ( nodeBalance < -1 || nodeBalance > 1 )
1711 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1714 int rightBalance = hRR - hNode;
1715 if ( rightBalance < -1 || rightBalance > 1 )
1718 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1721 return fix_height_locked( pParent );
1724 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1726 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1727 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1729 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1730 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1731 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1732 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1734 begin_change( pNode, nodeVersion );
1735 begin_change( pLeft, leftVersion );
1737 // fix up pNode links, careful about the order!
1738 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1739 if ( pLRR != nullptr )
1740 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1742 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1743 if ( pLRL != nullptr )
1744 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1746 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1747 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1748 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1749 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1752 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1754 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1755 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1757 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1760 int hNode = 1 + std::max( hLRR, hR );
1761 pNode->height( hNode, memory_model::memory_order_relaxed );
1762 int hLeft = 1 + std::max( hLL, hLRL );
1763 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1764 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1766 end_change( pNode, nodeVersion );
1767 end_change( pLeft, leftVersion );
1768 m_stat.onRotateRightOverLeft();
1770 // caller should have performed only a single rotation if pLeft was going
1771 // to end up damaged
1772 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1773 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1775 // We have damaged pParent, pLR (now parent.child), and pNode (now
1776 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1777 // can with the locks we've got.
1779 // We've already fixed the height for pNode, but it might still be outside
1780 // our allowable balance range. In that case a simple fix_height_locked()
1782 int nodeBalance = hLRR - hR;
1783 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1784 // we need another rotation at pNode
1788 // pNode might also be damaged by being an unnecessary routing node
1789 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1790 // repair involves splicing out pNode and maybe more rotations
1794 // we've already fixed the height at pLRight, do we need a rotation here?
1795 int balanceLR = hLeft - hNode;
1796 if ( balanceLR < -1 || balanceLR > 1 )
1799 // try to fix the parent height while we've still got the lock
1800 return fix_height_locked( pParent );
1803 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1805 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1806 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1808 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1809 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1810 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1811 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1813 begin_change( pNode, nodeVersion );
1814 begin_change( pRight, rightVersion );
1816 // fix up pNode links, careful about the order!
1817 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1818 if ( pRLL != nullptr )
1819 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1821 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1822 if ( pRLR != nullptr )
1823 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1825 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1826 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1827 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1828 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1831 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1833 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1834 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1836 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1839 int hNode = 1 + std::max( hL, hRLL );
1840 pNode->height( hNode, memory_model::memory_order_relaxed );
1841 int hRight = 1 + std::max( hRLR, hRR );
1842 pRight->height( hRight, memory_model::memory_order_relaxed );
1843 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1845 end_change( pNode, nodeVersion );
1846 end_change( pRight, rightVersion );
1847 m_stat.onRotateLeftOverRight();
1849 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1851 int nodeBalance = hRLL - hL;
1852 if ( nodeBalance < -1 || nodeBalance > 1 )
1854 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1857 int balRL = hRight - hNode;
1858 if ( balRL < -1 || balRL > 1 )
1861 return fix_height_locked( pParent );
1866 }} // namespace cds::container
1868 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H