3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 cxx_allocator().Delete( pNode );
148 static void free_value( mapped_type pVal )
153 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
155 return pNode->child( nDir ).load( order );
158 static node_type * parent( node_type * pNode, atomics::memory_order order )
160 return pNode->m_pParent.load( order );
166 node_type * m_pRetiredList; ///< head of retired node list
167 mapped_type m_pRetiredValue; ///< value retired
171 : m_pRetiredList( nullptr )
172 , m_pRetiredValue( nullptr )
180 void dispose( node_type * pNode )
182 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
183 pNode->m_pNextRemoved = m_pRetiredList;
184 m_pRetiredList = pNode;
187 void dispose_value( mapped_type pVal )
189 assert( m_pRetiredValue == nullptr );
190 m_pRetiredValue = pVal;
194 struct internal_disposer
196 void operator()( node_type * p ) const
204 assert( !gc::is_locked() );
206 // TODO: use RCU::batch_retire
209 for ( node_type * p = m_pRetiredList; p; ) {
210 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
211 // Value already disposed
212 gc::template retire_ptr<internal_disposer>( p );
217 if ( m_pRetiredValue )
218 gc::template retire_ptr<disposer>( m_pRetiredValue );
226 typename node_type::base_class m_Root;
228 item_counter m_ItemCounter;
229 mutable sync_monitor m_Monitor;
234 /// Creates empty map
236 : m_pRoot( static_cast<node_type *>( &m_Root ))
247 The \p key_type should be constructible from a value of type \p K.
249 RCU \p synchronize() can be called. RCU should not be locked.
251 Returns \p true if inserting successful, \p false otherwise.
253 template <typename K>
254 bool insert( K const& key, mapped_type pVal )
256 return do_update(key, key_comparator(),
257 [pVal]( node_type * pNode ) -> mapped_type
259 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
262 update_flags::allow_insert
263 ) == update_flags::result_inserted;
266 /// Updates the value for \p key
268 The operation performs inserting or updating the value for \p key with lock-free manner.
269 If \p bInsert is \p false, only updating of existing node is possible.
271 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
272 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
273 constructible from type \p K.
274 Otherwise, the value for \p key will be changed to \p pVal.
276 RCU \p synchronize() method can be called. RCU should not be locked.
278 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
279 \p second is \p true if new node has been added or \p false if the node with \p key
282 template <typename K>
283 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
285 int result = do_update( key, key_comparator(),
286 [pVal]( node_type * ) -> mapped_type
290 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
292 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
296 template <typename K>
297 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
299 return update( key, pVal, true );
304 /// Delete \p key from the map
306 RCU \p synchronize() method can be called. RCU should not be locked.
308 Return \p true if \p key is found and deleted, \p false otherwise
310 template <typename K>
311 bool erase( K const& key )
316 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
320 /// Deletes the item from the map using \p pred predicate for searching
322 The function is an analog of \p erase(K const&)
323 but \p pred is used for key comparing.
324 \p Less functor has the interface like \p std::less.
325 \p Less must imply the same element order as the comparator used for building the map.
327 template <typename K, typename Less>
328 bool erase_with( K const& key, Less pred )
333 cds::opt::details::make_comparator_from_less<Less>(),
334 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
338 /// Delete \p key from the map
340 The function searches an item with key \p key, calls \p f functor
341 and deletes the item. If \p key is not found, the functor is not called.
343 The functor \p Func interface:
346 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
350 RCU \p synchronize method can be called. RCU should not be locked.
352 Return \p true if key is found and deleted, \p false otherwise
354 template <typename K, typename Func>
355 bool erase( K const& key, Func f )
360 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
363 disp.dispose_value(pVal);
369 /// Deletes the item from the map using \p pred predicate for searching
371 The function is an analog of \p erase(K const&, Func)
372 but \p pred is used for key comparing.
373 \p Less functor has the interface like \p std::less.
374 \p Less must imply the same element order as the comparator used for building the map.
376 template <typename K, typename Less, typename Func>
377 bool erase_with( K const& key, Less pred, Func f )
382 cds::opt::details::make_comparator_from_less<Less>(),
383 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
386 disp.dispose_value(pVal);
392 /// Extracts a value with minimal key from the map
394 Returns \p exempt_ptr to the leftmost item.
395 If the tree is empty, returns empty \p exempt_ptr.
397 Note that the function returns only the value for minimal key.
398 To retrieve its key use \p extract_min( Func ) member function.
400 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
401 It means that the function gets leftmost leaf of the tree and tries to unlink it.
402 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
403 So, the function returns the item with minimum key at the moment of tree traversing.
405 RCU \p synchronize method can be called. RCU should NOT be locked.
406 The function does not free the item.
407 The deallocator will be implicitly invoked when the returned object is destroyed or when
408 its \p release() member function is called.
410 exempt_ptr extract_min()
412 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
415 /// Extracts minimal key key and corresponding value
417 Returns \p exempt_ptr to the leftmost item.
418 If the tree is empty, returns empty \p exempt_ptr.
420 \p Func functor is used to store minimal key.
421 \p Func has the following signature:
424 void operator()( key_type const& key );
427 If the tree is empty, \p f is not called.
428 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
431 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
432 It means that the function gets leftmost leaf of the tree and tries to unlink it.
433 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
434 So, the function returns the item with minimum key at the moment of tree traversing.
436 RCU \p synchronize method can be called. RCU should NOT be locked.
437 The function does not free the item.
438 The deallocator will be implicitly invoked when the returned object is destroyed or when
439 its \p release() member function is called.
441 template <typename Func>
442 exempt_ptr extract_min( Func f )
444 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
447 /// Extracts minimal key key and corresponding value
449 This function is a shortcut for the following call:
452 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
454 \p key_type should be copy-assignable. The copy of minimal key
455 is returned in \p min_key argument.
457 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
458 extract_min_key( key_type& min_key )
460 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
463 /// Extracts a value with maximal key from the tree
465 Returns \p exempt_ptr pointer to the rightmost item.
466 If the set is empty, returns empty \p exempt_ptr.
468 Note that the function returns only the value for maximal key.
469 To retrieve its key use \p extract_max( Func ) member function.
471 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
472 It means that the function gets rightmost leaf of the tree and tries to unlink it.
473 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
474 So, the function returns the item with maximum key at the moment of tree traversing.
476 RCU \p synchronize method can be called. RCU should NOT be locked.
477 The function does not free the item.
478 The deallocator will be implicitly invoked when the returned object is destroyed or when
479 its \p release() is called.
481 exempt_ptr extract_max()
483 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
486 /// Extracts the maximal key and corresponding value
488 Returns \p exempt_ptr pointer to the rightmost item.
489 If the set is empty, returns empty \p exempt_ptr.
491 \p Func functor is used to store maximal key.
492 \p Func has the following signature:
495 void operator()( key_type const& key );
498 If the tree is empty, \p f is not called.
499 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
502 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
503 It means that the function gets rightmost leaf of the tree and tries to unlink it.
504 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
505 So, the function returns the item with maximum key at the moment of tree traversing.
507 RCU \p synchronize method can be called. RCU should NOT be locked.
508 The function does not free the item.
509 The deallocator will be implicitly invoked when the returned object is destroyed or when
510 its \p release() is called.
512 template <typename Func>
513 exempt_ptr extract_max( Func f )
515 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
518 /// Extracts the maximal key and corresponding value
520 This function is a shortcut for the following call:
523 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
525 \p key_type should be copy-assignable. The copy of maximal key
526 is returned in \p max_key argument.
528 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
529 extract_max_key( key_type& max_key )
531 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
534 /// Extracts an item from the map
536 The function searches an item with key equal to \p key in the tree,
537 unlinks it, and returns \p exempt_ptr pointer to a value found.
538 If \p key is not found the function returns an empty \p exempt_ptr.
540 RCU \p synchronize method can be called. RCU should NOT be locked.
541 The function does not destroy the value found.
542 The disposer will be implicitly invoked when the returned object is destroyed or when
543 its \p release() member function is called.
545 template <typename Q>
546 exempt_ptr extract( Q const& key )
548 return exempt_ptr(do_extract( key ));
552 /// Extracts an item from the map using \p pred for searching
554 The function is an analog of \p extract(Q const&)
555 but \p pred is used for key compare.
556 \p Less has the interface like \p std::less.
557 \p pred must imply the same element order as the comparator used for building the tree.
559 template <typename Q, typename Less>
560 exempt_ptr extract_with( Q const& key, Less pred )
562 return exempt_ptr(do_extract_with( key, pred ));
565 /// Find the key \p key
567 The function searches the item with key equal to \p key and calls the functor \p f for item found.
568 The interface of \p Func functor is:
571 void operator()( key_type const& key, mapped_type& item );
574 where \p item is the item found.
575 The functor is called under node-level lock.
577 The function applies RCU lock internally.
579 The function returns \p true if \p key is found, \p false otherwise.
581 template <typename K, typename Func>
582 bool find( K const& key, Func f )
584 return do_find( key, key_comparator(),
585 [&f]( node_type * pNode ) -> bool {
586 assert( pNode != nullptr );
587 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
589 f( pNode->m_key, *pVal );
597 /// Finds the key \p val using \p pred predicate for searching
599 The function is an analog of \p find(K const&, Func)
600 but \p pred is used for key comparing.
601 \p Less functor has the interface like \p std::less.
602 \p Less must imply the same element order as the comparator used for building the map.
604 template <typename K, typename Less, typename Func>
605 bool find_with( K const& key, Less pred, Func f )
608 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
609 [&f]( node_type * pNode ) -> bool {
610 assert( pNode != nullptr );
611 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
613 f( pNode->m_key, *pVal );
621 /// Find the key \p key
623 The function searches the item with key equal to \p key
624 and returns \p true if it is found, and \p false otherwise.
626 The function applies RCU lock internally.
628 template <typename K>
629 bool find( K const& key )
631 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
634 /// Finds the key \p val using \p pred predicate for searching
636 The function is an analog of \p find(K const&)
637 but \p pred is used for key comparing.
638 \p Less functor has the interface like \p std::less.
639 \p Less must imply the same element order as the comparator used for building the map.
641 template <typename K, typename Less>
642 bool find_with( K const& key, Less pred )
645 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
648 /// Clears the tree (thread safe, not atomic)
650 The function unlink all items from the tree.
651 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
655 assert( set.empty() );
657 the assertion could be raised.
659 For each node the \ref disposer will be called after unlinking.
661 RCU \p synchronize method can be called. RCU should not be locked.
665 while ( extract_min() );
668 /// Clears the tree (not thread safe)
670 This function is not thread safe and may be called only when no other thread deals with the tree.
671 The function is used in the tree destructor.
675 clear(); // temp solution
679 /// Checks if the map is empty
682 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
685 /// Returns item count in the map
687 Only leaf nodes containing user data are counted.
689 The value returned depends on item counter type provided by \p Traits template parameter.
690 If it is \p atomicity::empty_item_counter this function always returns 0.
692 The function is not suitable for checking the tree emptiness, use \p empty()
693 member function for this purpose.
697 return m_ItemCounter;
700 /// Returns const reference to internal statistics
701 stat const& statistics() const
706 /// Checks internal consistency (not atomic, not thread-safe)
708 The debugging function to check internal consistency of the tree.
710 bool check_consistency() const
712 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
715 /// Checks internal consistency (not atomic, not thread-safe)
717 The debugging function to check internal consistency of the tree.
718 The functor \p Func is called if a violation of internal tree structure
722 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
726 - \p nLevel - the level where the violation is found
727 - \p hLeft - the height of left subtree
728 - \p hRight - the height of right subtree
730 The functor is called for each violation found.
732 template <typename Func>
733 bool check_consistency( Func f ) const
735 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
738 do_check_consistency( pChild, 1, f, nErrors );
746 template <typename Func>
747 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
750 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
751 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
753 if ( hLeft >= hRight ) {
754 if ( hLeft - hRight > 1 ) {
755 f( nLevel, hLeft, hRight );
761 if ( hRight - hLeft > 1 ) {
762 f( nLevel, hLeft, hRight );
771 template <typename Q, typename Compare, typename Func>
772 bool do_find( Q& key, Compare cmp, Func f ) const
777 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
779 assert( result != find_result::retry );
780 return result == find_result::found;
783 template <typename K, typename Compare, typename Func>
784 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
786 check_deadlock_policy::check();
788 rcu_disposer removed_list;
791 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
795 template <typename K, typename Compare, typename Func>
796 bool do_remove( K const& key, Compare cmp, Func func )
798 // Func must return true if the value was disposed
799 // or false if the value was extracted
801 check_deadlock_policy::check();
803 rcu_disposer removed_list;
806 return try_remove_root( key, cmp, func, removed_list );
810 template <typename Func>
811 mapped_type do_extract_min( Func f )
813 mapped_type pExtracted = nullptr;
816 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
821 template <typename Func>
822 mapped_type do_extract_max( Func f )
824 mapped_type pExtracted = nullptr;
827 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
832 template <typename Func>
833 void do_extract_minmax( int nDir, Func func )
835 check_deadlock_policy::check();
837 rcu_disposer removed_list;
841 int result = update_flags::failed;
843 // get right child of root
844 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
846 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
847 if ( nChildVersion & node_type::shrinking ) {
848 m_stat.onRemoveRootWaitShrinking();
849 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
850 result = update_flags::retry;
852 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
853 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
856 } while ( result == update_flags::retry );
860 template <typename Q>
861 mapped_type do_extract( Q const& key )
863 mapped_type pExtracted = nullptr;
867 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
872 template <typename Q, typename Less>
873 mapped_type do_extract_with( Q const& key, Less pred )
876 mapped_type pExtracted = nullptr;
879 cds::opt::details::make_comparator_from_less<Less>(),
880 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
889 template <typename Q, typename Compare, typename Func>
890 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
892 assert( gc::is_locked() );
896 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
898 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
899 m_stat.onFindRetry();
900 return find_result::retry;
903 m_stat.onFindFailed();
904 return find_result::not_found;
907 int nCmp = cmp( key, pChild->m_key );
909 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
911 node_scoped_lock l( m_Monitor, *pChild );
912 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
914 m_stat.onFindSuccess();
915 return find_result::found;
920 m_stat.onFindFailed();
921 return find_result::not_found;
924 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
925 if ( nChildVersion & node_type::shrinking ) {
926 m_stat.onFindWaitShrinking();
927 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
929 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
930 m_stat.onFindRetry();
931 return find_result::retry;
934 else if ( nChildVersion != node_type::unlinked ) {
936 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
937 m_stat.onFindRetry();
938 return find_result::retry;
941 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
942 if ( found != find_result::retry )
948 template <typename K, typename Compare, typename Func>
949 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
951 assert( gc::is_locked() );
955 // get right child of root
956 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
958 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
959 if ( nChildVersion & node_type::shrinking ) {
960 m_stat.onUpdateRootWaitShrinking();
961 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
962 result = update_flags::retry;
964 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
965 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
970 if ( nFlags & update_flags::allow_insert ) {
971 // insert into tree as right child of the root
973 node_scoped_lock l( m_Monitor, *m_pRoot );
974 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
975 result = result == update_flags::retry;
979 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
980 mapped_type pVal = funcUpdate( pNew );
981 assert( pVal != nullptr );
982 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
984 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
985 m_pRoot->height( 2, memory_model::memory_order_relaxed );
989 m_stat.onInsertSuccess();
990 return update_flags::result_inserted;
993 return update_flags::failed;
995 } while ( result == update_flags::retry );
999 template <typename K, typename Compare, typename Func>
1000 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1002 assert( gc::is_locked() );
1006 // get right child of root
1007 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1009 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
1010 if ( nChildVersion & node_type::shrinking ) {
1011 m_stat.onRemoveRootWaitShrinking();
1012 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1013 result = update_flags::retry;
1015 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1016 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1021 } while ( result == update_flags::retry );
1023 return result == update_flags::result_removed;
1026 template <typename K, typename Compare, typename Func>
1027 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1029 assert( gc::is_locked() );
1030 assert( nVersion != node_type::unlinked );
1031 CDS_UNUSED( pParent );
1033 int nCmp = cmp( key, pNode->m_key );
1035 if ( nFlags & update_flags::allow_update ) {
1036 return try_update_node( funcUpdate, pNode, disp );
1038 return update_flags::failed;
1043 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1044 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1045 m_stat.onUpdateRetry();
1046 return update_flags::retry;
1049 if ( pChild == nullptr ) {
1051 if ( nFlags & update_flags::allow_insert )
1052 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1054 result = update_flags::failed;
1058 result = update_flags::retry;
1059 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1060 if ( nChildVersion & node_type::shrinking ) {
1061 m_stat.onUpdateWaitShrinking();
1062 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1065 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1066 // this second read is important, because it is protected by nChildVersion
1068 // validate the read that our caller took to get to node
1069 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1070 m_stat.onUpdateRetry();
1071 return update_flags::retry;
1074 // At this point we know that the traversal our parent took to get to node is still valid.
1075 // The recursive implementation will validate the traversal from node to
1076 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1077 // This means that we are no longer vulnerable to node shrinks, and we don't need
1078 // to validate node version any more.
1079 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1082 } while ( result == update_flags::retry );
1086 template <typename K, typename Compare, typename Func>
1087 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1089 assert( gc::is_locked() );
1090 assert( nVersion != node_type::unlinked );
1092 int nCmp = cmp( key, pNode->m_key );
1094 return try_remove_node( pParent, pNode, nVersion, func, disp );
1098 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1099 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1100 m_stat.onRemoveRetry();
1101 return update_flags::retry;
1104 if ( pChild == nullptr ) {
1105 return update_flags::failed;
1109 result = update_flags::retry;
1110 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1111 if ( nChildVersion & node_type::shrinking ) {
1112 m_stat.onRemoveWaitShrinking();
1113 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1116 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1117 // this second read is important, because it is protected by nChildVersion
1119 // validate the read that our caller took to get to node
1120 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1121 m_stat.onRemoveRetry();
1122 return update_flags::retry;
1125 // At this point we know that the traversal our parent took to get to node is still valid.
1126 // The recursive implementation will validate the traversal from node to
1127 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1128 // This means that we are no longer vulnerable to node shrinks, and we don't need
1129 // to validate node version any more.
1130 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1133 } while ( result == update_flags::retry );
1137 template <typename Func>
1138 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1140 assert( gc::is_locked() );
1141 assert( nVersion != node_type::unlinked );
1145 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1146 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1147 m_stat.onRemoveRetry();
1148 return update_flags::retry;
1151 if ( pChild == nullptr ) {
1153 return try_remove_node( pParent, pNode, nVersion, func, disp );
1156 result = update_flags::retry;
1157 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1158 if ( nChildVersion & node_type::shrinking ) {
1159 m_stat.onRemoveWaitShrinking();
1160 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1163 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1164 // this second read is important, because it is protected by nChildVersion
1166 // validate the read that our caller took to get to node
1167 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1168 m_stat.onRemoveRetry();
1169 return update_flags::retry;
1172 // At this point we know that the traversal our parent took to get to node is still valid.
1173 // The recursive implementation will validate the traversal from node to
1174 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1175 // This means that we are no longer vulnerable to node shrinks, and we don't need
1176 // to validate node version any more.
1177 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1180 } while ( result == update_flags::retry );
1184 template <typename K, typename Func>
1185 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1189 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1190 mapped_type pVal = funcUpdate( pNew );
1191 assert( pVal != nullptr );
1192 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1195 if ( c_bRelaxedInsert ) {
1196 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1197 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1199 m_stat.onInsertRetry();
1200 return update_flags::retry;
1203 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1206 node_type * pDamaged;
1208 assert( pNode != nullptr );
1209 node_scoped_lock l( m_Monitor, *pNode );
1211 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1212 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1214 if ( c_bRelaxedInsert ) {
1216 m_stat.onRelaxedInsertFailed();
1219 m_stat.onInsertRetry();
1220 return update_flags::retry;
1223 if ( !c_bRelaxedInsert )
1224 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1226 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1227 pDamaged = fix_height_locked( pNode );
1231 m_stat.onInsertSuccess();
1233 fix_height_and_rebalance( pDamaged, disp );
1235 return update_flags::result_inserted;
1238 template <typename Func>
1239 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1242 assert( pNode != nullptr );
1244 node_scoped_lock l( m_Monitor, *pNode );
1246 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1247 m_stat.onUpdateUnlinked();
1248 return update_flags::retry;
1251 pOld = pNode->value( memory_model::memory_order_relaxed );
1252 mapped_type pVal = funcUpdate( pNode );
1256 assert( pVal != nullptr );
1257 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1262 disp.dispose_value(pOld);
1263 m_stat.onDisposeValue();
1266 m_stat.onUpdateSuccess();
1267 return update_flags::result_updated;
1270 template <typename Func>
1271 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1273 assert( pParent != nullptr );
1274 assert( pNode != nullptr );
1276 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1277 return update_flags::failed;
1279 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1280 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1282 node_type * pDamaged;
1285 node_scoped_lock lp( m_Monitor, *pParent );
1286 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1287 return update_flags::retry;
1290 node_scoped_lock ln( m_Monitor, *pNode );
1291 pOld = pNode->value( memory_model::memory_order_relaxed );
1292 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1294 && try_unlink_locked( pParent, pNode, disp )))
1296 return update_flags::retry;
1299 pDamaged = fix_height_locked( pParent );
1303 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1304 m_stat.onDisposeValue();
1306 m_stat.onExtractValue();
1308 fix_height_and_rebalance( pDamaged, disp );
1309 return update_flags::result_removed;
1312 int result = update_flags::retry;
1315 node_scoped_lock ln( m_Monitor, *pNode );
1316 pOld = pNode->value( atomics::memory_order_relaxed );
1317 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1318 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1319 result = update_flags::result_removed;
1323 if ( result == update_flags::result_removed ) {
1325 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1326 m_stat.onDisposeValue();
1328 m_stat.onExtractValue();
1335 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1337 // pParent and pNode must be locked
1338 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1340 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1341 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1342 if ( pNode != pParentLeft && pNode != pParentRight ) {
1343 // node is no longer a child of parent
1347 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1348 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1350 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1351 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1352 if ( pLeft != nullptr && pRight != nullptr ) {
1353 // splicing is no longer possible
1356 node_type * pSplice = pLeft ? pLeft : pRight;
1358 if ( pParentLeft == pNode )
1359 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1361 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1364 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1366 // Mark the node as unlinked
1367 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1369 // The value will be disposed by calling function
1370 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1372 disp.dispose( pNode );
1373 m_stat.onDisposeNode();
1380 private: // rotations
1382 int estimate_node_condition( node_type * pNode )
1384 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1385 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1387 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1388 return unlink_required;
1390 int h = pNode->height( memory_model::memory_order_relaxed );
1391 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1392 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1394 int hNew = 1 + std::max( hL, hR );
1395 int nBalance = hL - hR;
1397 if ( nBalance < -1 || nBalance > 1 )
1398 return rebalance_required;
1400 return h != hNew ? hNew : nothing_required;
1403 node_type * fix_height( node_type * pNode )
1405 assert( pNode != nullptr );
1406 node_scoped_lock l( m_Monitor, *pNode );
1407 return fix_height_locked( pNode );
1410 node_type * fix_height_locked( node_type * pNode )
1412 // pNode must be locked!!!
1413 int h = estimate_node_condition( pNode );
1415 case rebalance_required:
1416 case unlink_required:
1418 case nothing_required:
1421 pNode->height( h, memory_model::memory_order_relaxed );
1422 return parent( pNode, memory_model::memory_order_relaxed );
1426 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1428 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1429 int nCond = estimate_node_condition( pNode );
1430 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1433 if ( nCond != unlink_required && nCond != rebalance_required )
1434 pNode = fix_height( pNode );
1436 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1437 assert( pParent != nullptr );
1439 node_scoped_lock lp( m_Monitor, *pParent );
1440 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1441 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1443 node_scoped_lock ln( m_Monitor, *pNode );
1444 pNode = rebalance_locked( pParent, pNode, disp );
1451 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1453 // pParent and pNode should be locked.
1454 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1455 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1456 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1457 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1459 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1460 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1462 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1463 if ( try_unlink_locked( pParent, pNode, disp ))
1464 return fix_height_locked( pParent );
1466 // retry needed for pNode
1471 int h = pNode->height( memory_model::memory_order_relaxed );
1472 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1473 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1474 int hNew = 1 + std::max( hL, hR );
1475 int balance = hL - hR;
1478 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1479 else if ( balance < -1 )
1480 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1481 else if ( hNew != h ) {
1482 pNode->height( hNew, memory_model::memory_order_relaxed );
1484 // pParent is already locked
1485 return fix_height_locked( pParent );
1491 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1493 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1494 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1495 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1497 // pParent and pNode is locked yet
1498 // pNode->pLeft is too large, we will rotate-right.
1499 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1502 assert( pLeft != nullptr );
1503 node_scoped_lock l( m_Monitor, *pLeft );
1504 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1505 return pNode; // retry for pNode
1507 int hL = pLeft->height( memory_model::memory_order_relaxed );
1509 return pNode; // retry
1511 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1512 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1513 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1514 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1518 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1521 assert( pLRight != nullptr );
1523 node_scoped_lock lr( m_Monitor, *pLRight );
1524 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1525 return pNode; // retry
1527 hLR = pLRight->height( memory_model::memory_order_relaxed );
1529 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1531 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1532 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1533 int balance = hLL - hLRL;
1534 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1535 // nParent.child.left won't be damaged after a double rotation
1536 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1540 // focus on pLeft, if necessary pNode will be balanced later
1541 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1546 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1548 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1549 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1550 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1552 // pParent and pNode is locked yet
1554 assert( pRight != nullptr );
1555 node_scoped_lock l( m_Monitor, *pRight );
1556 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1557 return pNode; // retry for pNode
1559 int hR = pRight->height( memory_model::memory_order_relaxed );
1560 if ( hL - hR >= -1 )
1561 return pNode; // retry
1563 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1564 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1565 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1566 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1568 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1571 assert( pRLeft != nullptr );
1572 node_scoped_lock lrl( m_Monitor, *pRLeft );
1573 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1574 return pNode; // retry
1576 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1578 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1580 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1581 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1582 int balance = hRR - hRLR;
1583 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1584 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1586 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1590 static void begin_change( node_type * pNode, version_type version )
1592 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1594 static void end_change( node_type * pNode, version_type version )
1596 // Clear shrinking and unlinked flags and increment version
1597 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1600 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1602 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1603 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1605 begin_change( pNode, nodeVersion );
1607 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1608 if ( pLRight != nullptr )
1609 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1611 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1612 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1614 if ( pParentLeft == pNode )
1615 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1617 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1618 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1620 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1622 // fix up heights links
1623 int hNode = 1 + std::max( hLR, hR );
1624 pNode->height( hNode, memory_model::memory_order_relaxed );
1625 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1627 end_change( pNode, nodeVersion );
1628 m_stat.onRotateRight();
1630 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1631 // parent.child). pNode is the deepest. Perform as many fixes as we can
1632 // with the locks we've got.
1634 // We've already fixed the height for pNode, but it might still be outside
1635 // our allowable balance range. In that case a simple fix_height_locked()
1637 int nodeBalance = hLR - hR;
1638 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1639 // we need another rotation at pNode
1643 // we've fixed balance and height damage for pNode, now handle
1644 // extra-routing node damage
1645 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1646 // we need to remove pNode and then repair
1650 // we've already fixed the height at pLeft, do we need a rotation here?
1651 int leftBalance = hLL - hNode;
1652 if ( leftBalance < -1 || leftBalance > 1 )
1655 // pLeft might also have routing node damage (if pLeft.left was null)
1656 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1659 // try to fix the parent height while we've still got the lock
1660 return fix_height_locked( pParent );
1663 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1665 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1666 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1668 begin_change( pNode, nodeVersion );
1670 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1671 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1672 if ( pRLeft != nullptr )
1673 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1675 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1676 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1678 if ( pParentLeft == pNode )
1679 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1681 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1682 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1684 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1687 int hNode = 1 + std::max( hL, hRL );
1688 pNode->height( hNode, memory_model::memory_order_relaxed );
1689 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1691 end_change( pNode, nodeVersion );
1692 m_stat.onRotateLeft();
1694 int nodeBalance = hRL - hL;
1695 if ( nodeBalance < -1 || nodeBalance > 1 )
1698 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1701 int rightBalance = hRR - hNode;
1702 if ( rightBalance < -1 || rightBalance > 1 )
1705 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1708 return fix_height_locked( pParent );
1711 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1713 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1714 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1716 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1717 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1718 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1719 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1721 begin_change( pNode, nodeVersion );
1722 begin_change( pLeft, leftVersion );
1724 // fix up pNode links, careful about the order!
1725 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1726 if ( pLRR != nullptr )
1727 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1729 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1730 if ( pLRL != nullptr )
1731 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1733 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1734 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1735 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1736 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1739 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1741 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1742 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1744 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1747 int hNode = 1 + std::max( hLRR, hR );
1748 pNode->height( hNode, memory_model::memory_order_relaxed );
1749 int hLeft = 1 + std::max( hLL, hLRL );
1750 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1751 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1753 end_change( pNode, nodeVersion );
1754 end_change( pLeft, leftVersion );
1755 m_stat.onRotateRightOverLeft();
1757 // caller should have performed only a single rotation if pLeft was going
1758 // to end up damaged
1759 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1760 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1762 // We have damaged pParent, pLR (now parent.child), and pNode (now
1763 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1764 // can with the locks we've got.
1766 // We've already fixed the height for pNode, but it might still be outside
1767 // our allowable balance range. In that case a simple fix_height_locked()
1769 int nodeBalance = hLRR - hR;
1770 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1771 // we need another rotation at pNode
1775 // pNode might also be damaged by being an unnecessary routing node
1776 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1777 // repair involves splicing out pNode and maybe more rotations
1781 // we've already fixed the height at pLRight, do we need a rotation here?
1782 int balanceLR = hLeft - hNode;
1783 if ( balanceLR < -1 || balanceLR > 1 )
1786 // try to fix the parent height while we've still got the lock
1787 return fix_height_locked( pParent );
1790 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1792 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1793 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1795 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1796 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1797 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1798 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1800 begin_change( pNode, nodeVersion );
1801 begin_change( pRight, rightVersion );
1803 // fix up pNode links, careful about the order!
1804 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1805 if ( pRLL != nullptr )
1806 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1808 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1809 if ( pRLR != nullptr )
1810 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1812 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1813 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1814 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1815 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1818 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1820 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1821 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1823 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1826 int hNode = 1 + std::max( hL, hRLL );
1827 pNode->height( hNode, memory_model::memory_order_relaxed );
1828 int hRight = 1 + std::max( hRLR, hRR );
1829 pRight->height( hRight, memory_model::memory_order_relaxed );
1830 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1832 end_change( pNode, nodeVersion );
1833 end_change( pRight, rightVersion );
1834 m_stat.onRotateLeftOverRight();
1836 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1838 int nodeBalance = hRLL - hL;
1839 if ( nodeBalance < -1 || nodeBalance > 1 )
1841 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1844 int balRL = hRight - hNode;
1845 if ( balRL < -1 || balRL > 1 )
1848 return fix_height_locked( pParent );
1853 }} // namespace cds::container
1855 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H