3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
18 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
19 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
20 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
21 the disposer functor provided by \p Traits template parameter.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
70 # ifdef CDS_DOXYGEN_INVOKED
71 /// Returned pointer to \p mapped_type of extracted node
72 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
74 typedef cds::urcu::exempt_ptr< gc,
75 typename std::remove_pointer<mapped_type>::type,
76 typename std::remove_pointer<mapped_type>::type,
82 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
86 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
87 typedef typename node_type::version_type version_type;
89 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
90 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
92 enum class find_result
109 result_inserted = allow_insert,
110 result_updated = allow_update,
117 nothing_required = -3,
118 rebalance_required = -2,
127 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
132 template <typename K>
133 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
135 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
138 static void free_node( node_type * pNode )
140 // Free node without disposer
141 cxx_allocator().Delete( pNode );
144 static void free_value( mapped_type pVal )
149 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
151 return pNode->child( nDir ).load( order );
154 static node_type * parent( node_type * pNode, atomics::memory_order order )
156 return pNode->m_pParent.load( order );
162 node_type * m_pRetiredList; ///< head of retired node list
163 mapped_type m_pRetiredValue; ///< value retired
167 : m_pRetiredList( nullptr )
168 , m_pRetiredValue( nullptr )
176 void dispose( node_type * pNode )
178 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
179 pNode->m_pNextRemoved = m_pRetiredList;
180 m_pRetiredList = pNode;
183 void dispose_value( mapped_type pVal )
185 assert( m_pRetiredValue == nullptr );
186 m_pRetiredValue = pVal;
190 struct internal_disposer
192 void operator()( node_type * p ) const
200 assert( !gc::is_locked() );
202 // TODO: use RCU::batch_retire
205 for ( node_type * p = m_pRetiredList; p; ) {
206 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
207 // Value already disposed
208 gc::template retire_ptr<internal_disposer>( p );
213 if ( m_pRetiredValue )
214 gc::template retire_ptr<disposer>( m_pRetiredValue );
222 typename node_type::base_class m_Root;
224 item_counter m_ItemCounter;
225 mutable sync_monitor m_Monitor;
230 /// Creates empty map
232 : m_pRoot( static_cast<node_type *>( &m_Root ))
243 The \p key_type should be constructible from a value of type \p K.
245 RCU \p synchronize() can be called. RCU should not be locked.
247 Returns \p true if inserting successful, \p false otherwise.
249 template <typename K>
250 bool insert( K const& key, mapped_type pVal )
252 return do_update(key, key_comparator(),
253 [pVal]( node_type * pNode ) -> mapped_type
255 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
258 update_flags::allow_insert
259 ) == update_flags::result_inserted;
262 /// Updates the value for \p key
264 The operation performs inserting or updating the value for \p key with lock-free manner.
265 If \p bInsert is \p false, only updating of existing node is possible.
267 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
268 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
269 constructible from type \p K.
270 Otherwise, the value for \p key will be changed to \p pVal.
272 RCU \p synchronize() method can be called. RCU should not be locked.
274 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
275 \p second is \p true if new node has been added or \p false if the node with \p key
278 template <typename K, typename Func>
279 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
281 int result = do_update( key, key_comparator(),
282 [pVal]( node_type * ) -> mapped_type
286 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
288 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
291 /// Delete \p key from the map
293 RCU \p synchronize() method can be called. RCU should not be locked.
295 Return \p true if \p key is found and deleted, \p false otherwise
297 template <typename K>
298 bool erase( K const& key )
303 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
307 /// Deletes the item from the map using \p pred predicate for searching
309 The function is an analog of \p erase(K const&)
310 but \p pred is used for key comparing.
311 \p Less functor has the interface like \p std::less.
312 \p Less must imply the same element order as the comparator used for building the map.
314 template <typename K, typename Less>
315 bool erase_with( K const& key, Less pred )
320 cds::opt::details::make_comparator_from_less<Less>(),
321 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
325 /// Delete \p key from the map
327 The function searches an item with key \p key, calls \p f functor
328 and deletes the item. If \p key is not found, the functor is not called.
330 The functor \p Func interface:
333 void operator()(mapped_type& item) { ... }
337 RCU \p synchronize method can be called. RCU should not be locked.
339 Return \p true if key is found and deleted, \p false otherwise
341 template <typename K, typename Func>
342 bool erase( K const& key, Func f )
347 [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
350 disp.dispose_value(pVal);
356 /// Deletes the item from the map using \p pred predicate for searching
358 The function is an analog of \p erase(K const&, Func)
359 but \p pred is used for key comparing.
360 \p Less functor has the interface like \p std::less.
361 \p Less must imply the same element order as the comparator used for building the map.
363 template <typename K, typename Less, typename Func>
364 bool erase_with( K const& key, Less pred, Func f )
369 cds::opt::details::make_comparator_from_less<Less>(),
370 [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
373 disp.dispose_value(pVal);
379 /// Extracts a value with minimal key from the map
381 Returns \p exempt_ptr to the leftmost item.
382 If the tree is empty, returns empty \p exempt_ptr.
384 Note that the function returns only the value for minimal key.
385 To retrieve its key use \p extract_min( Func ) member function.
387 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
388 It means that the function gets leftmost leaf of the tree and tries to unlink it.
389 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
390 So, the function returns the item with minimum key at the moment of tree traversing.
392 RCU \p synchronize method can be called. RCU should NOT be locked.
393 The function does not free the item.
394 The deallocator will be implicitly invoked when the returned object is destroyed or when
395 its \p release() member function is called.
397 exempt_ptr extract_min()
399 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
402 /// Extracts minimal key key and corresponding value
404 Returns \p exempt_ptr to the leftmost item.
405 If the tree is empty, returns empty \p exempt_ptr.
407 \p Func functor is used to store minimal key.
408 \p Func has the following signature:
411 void operator()( key_type const& key );
414 If the tree is empty, \p f is not called.
415 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
418 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
419 It means that the function gets leftmost leaf of the tree and tries to unlink it.
420 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
421 So, the function returns the item with minimum key at the moment of tree traversing.
423 RCU \p synchronize method can be called. RCU should NOT be locked.
424 The function does not free the item.
425 The deallocator will be implicitly invoked when the returned object is destroyed or when
426 its \p release() member function is called.
428 template <typename Func>
429 exempt_ptr extract_min( Func f )
431 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
434 /// Extracts minimal key key and corresponding value
436 This function is a shortcut for the following call:
439 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
441 \p key_type should be copy-assignable. The copy of minimal key
442 is returned in \p min_key argument.
444 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
445 extract_min_key( key_type& min_key )
447 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
450 /// Extracts a value with maximal key from the tree
452 Returns \p exempt_ptr pointer to the rightmost item.
453 If the set is empty, returns empty \p exempt_ptr.
455 Note that the function returns only the value for maximal key.
456 To retrieve its key use \p extract_max( Func ) member function.
458 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
459 It means that the function gets rightmost leaf of the tree and tries to unlink it.
460 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
461 So, the function returns the item with maximum key at the moment of tree traversing.
463 RCU \p synchronize method can be called. RCU should NOT be locked.
464 The function does not free the item.
465 The deallocator will be implicitly invoked when the returned object is destroyed or when
466 its \p release() is called.
468 exempt_ptr extract_max()
470 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
473 /// Extracts the maximal key and corresponding value
475 Returns \p exempt_ptr pointer to the rightmost item.
476 If the set is empty, returns empty \p exempt_ptr.
478 \p Func functor is used to store maximal key.
479 \p Func has the following signature:
482 void operator()( key_type const& key );
485 If the tree is empty, \p f is not called.
486 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
489 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
490 It means that the function gets rightmost leaf of the tree and tries to unlink it.
491 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
492 So, the function returns the item with maximum key at the moment of tree traversing.
494 RCU \p synchronize method can be called. RCU should NOT be locked.
495 The function does not free the item.
496 The deallocator will be implicitly invoked when the returned object is destroyed or when
497 its \p release() is called.
499 template <typename Func>
500 exempt_ptr extract_max( Func f )
502 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
505 /// Extracts the maximal key and corresponding value
507 This function is a shortcut for the following call:
510 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
512 \p key_type should be copy-assignable. The copy of maximal key
513 is returned in \p max_key argument.
515 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
516 extract_max_key( key_type& max_key )
518 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
521 /// Extracts an item from the map
523 The function searches an item with key equal to \p key in the tree,
524 unlinks it, and returns \p exempt_ptr pointer to a value found.
525 If \p key is not found the function returns an empty \p exempt_ptr.
527 RCU \p synchronize method can be called. RCU should NOT be locked.
528 The function does not destroy the value found.
529 The disposer will be implicitly invoked when the returned object is destroyed or when
530 its \p release() member function is called.
532 template <typename Q>
533 exempt_ptr extract( Q const& key )
535 return exempt_ptr(do_extract( key ));
539 /// Extracts an item from the map using \p pred for searching
541 The function is an analog of \p extract(Q const&)
542 but \p pred is used for key compare.
543 \p Less has the interface like \p std::less.
544 \p pred must imply the same element order as the comparator used for building the tree.
546 template <typename Q, typename Less>
547 exempt_ptr extract_with( Q const& key, Less pred )
549 return exempt_ptr(do_extract_with( key, pred ));
552 /// Find the key \p key
554 The function searches the item with key equal to \p key and calls the functor \p f for item found.
555 The interface of \p Func functor is:
558 void operator()( key_type const& key, mapped_type& item );
561 where \p item is the item found.
562 The functor is called under node-level lock.
564 The function applies RCU lock internally.
566 The function returns \p true if \p key is found, \p false otherwise.
568 template <typename K, typename Func>
569 bool find( K const& key, Func f )
571 return do_find( key, key_comparator(),
572 [&f]( node_type * pNode ) -> bool {
573 assert( pNode != nullptr );
574 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
576 f( pNode->m_key, *pVal );
584 /// Finds the key \p val using \p pred predicate for searching
586 The function is an analog of \p find(K const&, Func)
587 but \p pred is used for key comparing.
588 \p Less functor has the interface like \p std::less.
589 \p Less must imply the same element order as the comparator used for building the map.
591 template <typename K, typename Less, typename Func>
592 bool find_with( K const& key, Less pred, Func f )
595 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
596 [&f]( node_type * pNode ) -> bool {
597 assert( pNode != nullptr );
598 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
600 f( pNode->m_key, *pVal );
608 /// Find the key \p key
610 The function searches the item with key equal to \p key
611 and returns \p true if it is found, and \p false otherwise.
613 The function applies RCU lock internally.
615 template <typename K>
616 bool find( K const& key )
618 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
621 /// Finds the key \p val using \p pred predicate for searching
623 The function is an analog of \p find(K const&)
624 but \p pred is used for key comparing.
625 \p Less functor has the interface like \p std::less.
626 \p Less must imply the same element order as the comparator used for building the map.
628 template <typename K, typename Less>
629 bool find_with( K const& key, Less pred )
632 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
635 /// Clears the tree (thread safe, not atomic)
637 The function unlink all items from the tree.
638 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
642 assert( set.empty() );
644 the assertion could be raised.
646 For each node the \ref disposer will be called after unlinking.
648 RCU \p synchronize method can be called. RCU should not be locked.
652 while ( extract_min() );
655 /// Clears the tree (not thread safe)
657 This function is not thread safe and may be called only when no other thread deals with the tree.
658 The function is used in the tree destructor.
662 clear(); // temp solution
666 /// Checks if the map is empty
669 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
672 /// Returns item count in the map
674 Only leaf nodes containing user data are counted.
676 The value returned depends on item counter type provided by \p Traits template parameter.
677 If it is \p atomicity::empty_item_counter this function always returns 0.
679 The function is not suitable for checking the tree emptiness, use \p empty()
680 member function for this purpose.
684 return m_ItemCounter;
687 /// Returns const reference to internal statistics
688 stat const& statistics() const
693 /// Checks internal consistency (not atomic, not thread-safe)
695 The debugging function to check internal consistency of the tree.
697 bool check_consistency() const
699 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
702 /// Checks internal consistency (not atomic, not thread-safe)
704 The debugging function to check internal consistency of the tree.
705 The functor \p Func is called if a violation of internal tree structure
709 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
713 - \p nLevel - the level where the violation is found
714 - \p hLeft - the height of left subtree
715 - \p hRight - the height of right subtree
717 The functor is called for each violation found.
719 template <typename Func>
720 bool check_consistency( Func f ) const
722 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
725 do_check_consistency( pChild, 1, f, nErrors );
733 template <typename Func>
734 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
737 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
738 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
740 if ( hLeft >= hRight ) {
741 if ( hLeft - hRight > 1 ) {
742 f( nLevel, hLeft, hRight );
748 if ( hRight - hLeft > 1 ) {
749 f( nLevel, hLeft, hRight );
758 template <typename Q, typename Compare, typename Func>
759 bool do_find( Q& key, Compare cmp, Func f ) const
764 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
766 assert( result != find_result::retry );
767 return result == find_result::found;
770 template <typename K, typename Compare, typename Func>
771 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
773 check_deadlock_policy::check();
775 rcu_disposer removed_list;
778 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
782 template <typename K, typename Compare, typename Func>
783 bool do_remove( K const& key, Compare cmp, Func func )
785 // Func must return true if the value was disposed
786 // or false if the value was extracted
788 check_deadlock_policy::check();
790 rcu_disposer removed_list;
793 return try_remove_root( key, cmp, func, removed_list );
797 template <typename Func>
798 mapped_type do_extract_min( Func f )
800 mapped_type pExtracted = nullptr;
803 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
808 template <typename Func>
809 mapped_type do_extract_max( Func f )
811 mapped_type pExtracted = nullptr;
814 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
819 template <typename Func>
820 void do_extract_minmax( int nDir, Func func )
822 check_deadlock_policy::check();
824 rcu_disposer removed_list;
828 int result = update_flags::failed;
830 // get right child of root
831 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
833 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
834 if ( nChildVersion & node_type::shrinking ) {
835 m_stat.onRemoveRootWaitShrinking();
836 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
837 result = update_flags::retry;
839 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
840 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
843 } while ( result == update_flags::retry );
847 template <typename Q>
848 mapped_type do_extract( Q const& key )
850 mapped_type pExtracted = nullptr;
854 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
859 template <typename Q, typename Less>
860 mapped_type do_extract_with( Q const& key, Less pred )
863 mapped_type pExtracted = nullptr;
866 cds::opt::details::make_comparator_from_less<Less>(),
867 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
876 template <typename Q, typename Compare, typename Func>
877 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
879 assert( gc::is_locked() );
883 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
885 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
886 m_stat.onFindRetry();
887 return find_result::retry;
890 m_stat.onFindFailed();
891 return find_result::not_found;
894 int nCmp = cmp( key, pChild->m_key );
896 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
898 node_scoped_lock l( m_Monitor, *pChild );
899 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
901 m_stat.onFindSuccess();
902 return find_result::found;
907 m_stat.onFindFailed();
908 return find_result::not_found;
911 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
912 if ( nChildVersion & node_type::shrinking ) {
913 m_stat.onFindWaitShrinking();
914 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
916 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
917 m_stat.onFindRetry();
918 return find_result::retry;
921 else if ( nChildVersion != node_type::unlinked ) {
923 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
924 m_stat.onFindRetry();
925 return find_result::retry;
928 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
929 if ( found != find_result::retry )
935 template <typename K, typename Compare, typename Func>
936 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
938 assert( gc::is_locked() );
942 // get right child of root
943 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
945 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
946 if ( nChildVersion & node_type::shrinking ) {
947 m_stat.onUpdateRootWaitShrinking();
948 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
949 result = update_flags::retry;
951 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
952 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
957 if ( nFlags & update_flags::allow_insert ) {
958 // insert into tree as right child of the root
960 node_scoped_lock l( m_Monitor, *m_pRoot );
961 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
962 result = result == update_flags::retry;
966 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
967 mapped_type pVal = funcUpdate( pNew );
968 assert( pVal != nullptr );
969 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
971 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
972 m_pRoot->height( 2, memory_model::memory_order_relaxed );
976 m_stat.onInsertSuccess();
977 return update_flags::result_inserted;
980 return update_flags::failed;
982 } while ( result == update_flags::retry );
986 template <typename K, typename Compare, typename Func>
987 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
989 assert( gc::is_locked() );
993 // get right child of root
994 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
996 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
997 if ( nChildVersion & node_type::shrinking ) {
998 m_stat.onRemoveRootWaitShrinking();
999 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1000 result = update_flags::retry;
1002 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1003 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1008 } while ( result == update_flags::retry );
1010 return result == update_flags::result_removed;
1013 template <typename K, typename Compare, typename Func>
1014 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1016 assert( gc::is_locked() );
1017 assert( nVersion != node_type::unlinked );
1018 CDS_UNUSED( pParent );
1020 int nCmp = cmp( key, pNode->m_key );
1022 if ( nFlags & update_flags::allow_update ) {
1023 return try_update_node( funcUpdate, pNode, disp );
1025 return update_flags::failed;
1030 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1031 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1032 m_stat.onUpdateRetry();
1033 return update_flags::retry;
1036 if ( pChild == nullptr ) {
1038 if ( nFlags & update_flags::allow_insert )
1039 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1041 result = update_flags::failed;
1045 result = update_flags::retry;
1046 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1047 if ( nChildVersion & node_type::shrinking ) {
1048 m_stat.onUpdateWaitShrinking();
1049 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1052 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1053 // this second read is important, because it is protected by nChildVersion
1055 // validate the read that our caller took to get to node
1056 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1057 m_stat.onUpdateRetry();
1058 return update_flags::retry;
1061 // At this point we know that the traversal our parent took to get to node is still valid.
1062 // The recursive implementation will validate the traversal from node to
1063 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1064 // This means that we are no longer vulnerable to node shrinks, and we don't need
1065 // to validate node version any more.
1066 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1069 } while ( result == update_flags::retry );
1073 template <typename K, typename Compare, typename Func>
1074 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1076 assert( gc::is_locked() );
1077 assert( nVersion != node_type::unlinked );
1079 int nCmp = cmp( key, pNode->m_key );
1081 return try_remove_node( pParent, pNode, nVersion, func, disp );
1085 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1086 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1087 m_stat.onRemoveRetry();
1088 return update_flags::retry;
1091 if ( pChild == nullptr ) {
1092 return update_flags::failed;
1096 result = update_flags::retry;
1097 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1098 if ( nChildVersion & node_type::shrinking ) {
1099 m_stat.onRemoveWaitShrinking();
1100 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1103 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1104 // this second read is important, because it is protected by nChildVersion
1106 // validate the read that our caller took to get to node
1107 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1108 m_stat.onRemoveRetry();
1109 return update_flags::retry;
1112 // At this point we know that the traversal our parent took to get to node is still valid.
1113 // The recursive implementation will validate the traversal from node to
1114 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1115 // This means that we are no longer vulnerable to node shrinks, and we don't need
1116 // to validate node version any more.
1117 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1120 } while ( result == update_flags::retry );
1124 template <typename Func>
1125 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1127 assert( gc::is_locked() );
1128 assert( nVersion != node_type::unlinked );
1132 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1133 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1134 m_stat.onRemoveRetry();
1135 return update_flags::retry;
1138 if ( pChild == nullptr ) {
1140 return try_remove_node( pParent, pNode, nVersion, func, disp );
1143 result = update_flags::retry;
1144 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1145 if ( nChildVersion & node_type::shrinking ) {
1146 m_stat.onRemoveWaitShrinking();
1147 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1150 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1151 // this second read is important, because it is protected by nChildVersion
1153 // validate the read that our caller took to get to node
1154 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1155 m_stat.onRemoveRetry();
1156 return update_flags::retry;
1159 // At this point we know that the traversal our parent took to get to node is still valid.
1160 // The recursive implementation will validate the traversal from node to
1161 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1162 // This means that we are no longer vulnerable to node shrinks, and we don't need
1163 // to validate node version any more.
1164 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1167 } while ( result == update_flags::retry );
1171 template <typename K, typename Func>
1172 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1176 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1177 mapped_type pVal = funcUpdate( pNew );
1178 assert( pVal != nullptr );
1179 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1182 if ( c_bRelaxedInsert ) {
1183 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1184 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1186 m_stat.onInsertRetry();
1187 return update_flags::retry;
1190 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1193 node_type * pDamaged;
1195 assert( pNode != nullptr );
1196 node_scoped_lock l( m_Monitor, *pNode );
1198 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1199 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1201 if ( c_bRelaxedInsert ) {
1203 m_stat.onRelaxedInsertFailed();
1206 m_stat.onInsertRetry();
1207 return update_flags::retry;
1210 if ( !c_bRelaxedInsert )
1211 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1213 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1214 pDamaged = fix_height_locked( pNode );
1218 m_stat.onInsertSuccess();
1220 fix_height_and_rebalance( pDamaged, disp );
1222 return update_flags::result_inserted;
1225 template <typename Func>
1226 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1229 assert( pNode != nullptr );
1231 node_scoped_lock l( m_Monitor, *pNode );
1233 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1234 m_stat.onUpdateUnlinked();
1235 return update_flags::retry;
1238 pOld = pNode->value( memory_model::memory_order_relaxed );
1239 mapped_type pVal = funcUpdate( pNode );
1243 assert( pVal != nullptr );
1244 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1249 disp.dispose_value(pOld);
1250 m_stat.onDisposeValue();
1253 m_stat.onUpdateSuccess();
1254 return update_flags::result_updated;
1257 template <typename Func>
1258 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1260 assert( pParent != nullptr );
1261 assert( pNode != nullptr );
1263 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1264 return update_flags::failed;
1266 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1267 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1269 node_type * pDamaged;
1272 node_scoped_lock lp( m_Monitor, *pParent );
1273 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1274 return update_flags::retry;
1277 node_scoped_lock ln( m_Monitor, *pNode );
1278 pOld = pNode->value( memory_model::memory_order_relaxed );
1279 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1281 && try_unlink_locked( pParent, pNode, disp )))
1283 return update_flags::retry;
1286 pDamaged = fix_height_locked( pParent );
1290 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1291 m_stat.onDisposeValue();
1293 m_stat.onExtractValue();
1295 fix_height_and_rebalance( pDamaged, disp );
1296 return update_flags::result_removed;
1299 int result = update_flags::retry;
1302 node_scoped_lock ln( m_Monitor, *pNode );
1303 pOld = pNode->value( atomics::memory_order_relaxed );
1304 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1305 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1306 result = update_flags::result_removed;
1310 if ( result == update_flags::result_removed ) {
1312 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1313 m_stat.onDisposeValue();
1315 m_stat.onExtractValue();
1322 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1324 // pParent and pNode must be locked
1325 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1327 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1328 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1329 if ( pNode != pParentLeft && pNode != pParentRight ) {
1330 // node is no longer a child of parent
1334 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1335 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1337 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1338 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1339 if ( pLeft != nullptr && pRight != nullptr ) {
1340 // splicing is no longer possible
1343 node_type * pSplice = pLeft ? pLeft : pRight;
1345 if ( pParentLeft == pNode )
1346 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1348 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1351 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1353 // Mark the node as unlinked
1354 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1356 // The value will be disposed by calling function
1357 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1359 disp.dispose( pNode );
1360 m_stat.onDisposeNode();
1367 private: // rotations
1369 int estimate_node_condition( node_type * pNode )
1371 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1372 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1374 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1375 return unlink_required;
1377 int h = pNode->height( memory_model::memory_order_relaxed );
1378 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1379 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1381 int hNew = 1 + std::max( hL, hR );
1382 int nBalance = hL - hR;
1384 if ( nBalance < -1 || nBalance > 1 )
1385 return rebalance_required;
1387 return h != hNew ? hNew : nothing_required;
1390 node_type * fix_height( node_type * pNode )
1392 assert( pNode != nullptr );
1393 node_scoped_lock l( m_Monitor, *pNode );
1394 return fix_height_locked( pNode );
1397 node_type * fix_height_locked( node_type * pNode )
1399 // pNode must be locked!!!
1400 int h = estimate_node_condition( pNode );
1402 case rebalance_required:
1403 case unlink_required:
1405 case nothing_required:
1408 pNode->height( h, memory_model::memory_order_relaxed );
1409 return parent( pNode, memory_model::memory_order_relaxed );
1413 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1415 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1416 int nCond = estimate_node_condition( pNode );
1417 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1420 if ( nCond != unlink_required && nCond != rebalance_required )
1421 pNode = fix_height( pNode );
1423 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1424 assert( pParent != nullptr );
1426 node_scoped_lock lp( m_Monitor, *pParent );
1427 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1428 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1430 node_scoped_lock ln( m_Monitor, *pNode );
1431 pNode = rebalance_locked( pParent, pNode, disp );
1438 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1440 // pParent and pNode should be locked.
1441 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1442 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1443 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1444 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1446 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1447 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1449 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1450 if ( try_unlink_locked( pParent, pNode, disp ))
1451 return fix_height_locked( pParent );
1453 // retry needed for pNode
1458 int h = pNode->height( memory_model::memory_order_relaxed );
1459 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1460 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1461 int hNew = 1 + std::max( hL, hR );
1462 int balance = hL - hR;
1465 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1466 else if ( balance < -1 )
1467 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1468 else if ( hNew != h ) {
1469 pNode->height( hNew, memory_model::memory_order_relaxed );
1471 // pParent is already locked
1472 return fix_height_locked( pParent );
1478 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1480 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1481 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1482 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1484 // pParent and pNode is locked yet
1485 // pNode->pLeft is too large, we will rotate-right.
1486 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1489 assert( pLeft != nullptr );
1490 node_scoped_lock l( m_Monitor, *pLeft );
1491 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1492 return pNode; // retry for pNode
1494 int hL = pLeft->height( memory_model::memory_order_relaxed );
1496 return pNode; // retry
1498 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1499 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1500 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1501 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1505 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1508 assert( pLRight != nullptr );
1510 node_scoped_lock lr( m_Monitor, *pLRight );
1511 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1512 return pNode; // retry
1514 hLR = pLRight->height( memory_model::memory_order_relaxed );
1516 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1518 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1519 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1520 int balance = hLL - hLRL;
1521 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1522 // nParent.child.left won't be damaged after a double rotation
1523 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1527 // focus on pLeft, if necessary pNode will be balanced later
1528 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1533 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1535 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1536 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1537 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1539 // pParent and pNode is locked yet
1541 assert( pRight != nullptr );
1542 node_scoped_lock l( m_Monitor, *pRight );
1543 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1544 return pNode; // retry for pNode
1546 int hR = pRight->height( memory_model::memory_order_relaxed );
1547 if ( hL - hR >= -1 )
1548 return pNode; // retry
1550 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1551 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1552 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1553 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1555 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1558 assert( pRLeft != nullptr );
1559 node_scoped_lock lrl( m_Monitor, *pRLeft );
1560 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1561 return pNode; // retry
1563 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1565 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1567 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1568 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1569 int balance = hRR - hRLR;
1570 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1571 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1573 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1577 static void begin_change( node_type * pNode, version_type version )
1579 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1581 static void end_change( node_type * pNode, version_type version )
1583 // Clear shrinking and unlinked flags and increment version
1584 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1587 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1589 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1590 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1592 begin_change( pNode, nodeVersion );
1594 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1595 if ( pLRight != nullptr )
1596 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1598 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1599 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1601 if ( pParentLeft == pNode )
1602 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1604 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1605 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1607 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1609 // fix up heights links
1610 int hNode = 1 + std::max( hLR, hR );
1611 pNode->height( hNode, memory_model::memory_order_relaxed );
1612 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1614 end_change( pNode, nodeVersion );
1615 m_stat.onRotateRight();
1617 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1618 // parent.child). pNode is the deepest. Perform as many fixes as we can
1619 // with the locks we've got.
1621 // We've already fixed the height for pNode, but it might still be outside
1622 // our allowable balance range. In that case a simple fix_height_locked()
1624 int nodeBalance = hLR - hR;
1625 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1626 // we need another rotation at pNode
1630 // we've fixed balance and height damage for pNode, now handle
1631 // extra-routing node damage
1632 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1633 // we need to remove pNode and then repair
1637 // we've already fixed the height at pLeft, do we need a rotation here?
1638 int leftBalance = hLL - hNode;
1639 if ( leftBalance < -1 || leftBalance > 1 )
1642 // pLeft might also have routing node damage (if pLeft.left was null)
1643 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1646 // try to fix the parent height while we've still got the lock
1647 return fix_height_locked( pParent );
1650 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1652 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1653 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1655 begin_change( pNode, nodeVersion );
1657 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1658 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1659 if ( pRLeft != nullptr )
1660 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1662 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1663 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1665 if ( pParentLeft == pNode )
1666 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1668 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1669 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1671 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1674 int hNode = 1 + std::max( hL, hRL );
1675 pNode->height( hNode, memory_model::memory_order_relaxed );
1676 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1678 end_change( pNode, nodeVersion );
1679 m_stat.onRotateLeft();
1681 int nodeBalance = hRL - hL;
1682 if ( nodeBalance < -1 || nodeBalance > 1 )
1685 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1688 int rightBalance = hRR - hNode;
1689 if ( rightBalance < -1 || rightBalance > 1 )
1692 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1695 return fix_height_locked( pParent );
1698 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1700 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1701 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1703 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1704 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1705 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1706 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1708 begin_change( pNode, nodeVersion );
1709 begin_change( pLeft, leftVersion );
1711 // fix up pNode links, careful about the order!
1712 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1713 if ( pLRR != nullptr )
1714 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1716 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1717 if ( pLRL != nullptr )
1718 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1720 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1721 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1722 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1723 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1726 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1728 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1729 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1731 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1734 int hNode = 1 + std::max( hLRR, hR );
1735 pNode->height( hNode, memory_model::memory_order_relaxed );
1736 int hLeft = 1 + std::max( hLL, hLRL );
1737 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1738 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1740 end_change( pNode, nodeVersion );
1741 end_change( pLeft, leftVersion );
1742 m_stat.onRotateRightOverLeft();
1744 // caller should have performed only a single rotation if pLeft was going
1745 // to end up damaged
1746 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1747 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1749 // We have damaged pParent, pLR (now parent.child), and pNode (now
1750 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1751 // can with the locks we've got.
1753 // We've already fixed the height for pNode, but it might still be outside
1754 // our allowable balance range. In that case a simple fix_height_locked()
1756 int nodeBalance = hLRR - hR;
1757 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1758 // we need another rotation at pNode
1762 // pNode might also be damaged by being an unnecessary routing node
1763 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1764 // repair involves splicing out pNode and maybe more rotations
1768 // we've already fixed the height at pLRight, do we need a rotation here?
1769 int balanceLR = hLeft - hNode;
1770 if ( balanceLR < -1 || balanceLR > 1 )
1773 // try to fix the parent height while we've still got the lock
1774 return fix_height_locked( pParent );
1777 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1779 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1780 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1782 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1783 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1784 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1785 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1787 begin_change( pNode, nodeVersion );
1788 begin_change( pRight, rightVersion );
1790 // fix up pNode links, careful about the order!
1791 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1792 if ( pRLL != nullptr )
1793 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1795 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1796 if ( pRLR != nullptr )
1797 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1799 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1800 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1801 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1802 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1805 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1807 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1808 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1810 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1813 int hNode = 1 + std::max( hL, hRLL );
1814 pNode->height( hNode, memory_model::memory_order_relaxed );
1815 int hRight = 1 + std::max( hRLR, hRR );
1816 pRight->height( hRight, memory_model::memory_order_relaxed );
1817 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1819 end_change( pNode, nodeVersion );
1820 end_change( pRight, rightVersion );
1821 m_stat.onRotateLeftOverRight();
1823 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1825 int nodeBalance = hRLL - hL;
1826 if ( nodeBalance < -1 || nodeBalance > 1 )
1828 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1831 int balRL = hRight - hNode;
1832 if ( balRL < -1 || balRL > 1 )
1835 return fix_height_locked( pParent );
1840 }} // namespace cds::container
1842 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H