3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <memory> // unique_ptr
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
10 namespace cds { namespace container {
12 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
13 /** @ingroup cds_nonintrusive_map
14 @ingroup cds_nonintrusive_tree
15 @headerfile cds/container/bronson_avltree_map_rcu.h
17 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
18 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
19 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
20 the disposer functor provided by \p Traits template parameter.
22 <b>Template arguments</b>:
23 - \p RCU - one of \ref cds_urcu_gc "RCU type"
25 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
27 - \p Traits - tree traits, default is \p bronson_avltree::traits
28 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
29 instead of \p Traits template argument.
31 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
32 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
38 # ifdef CDS_DOXYGEN_INVOKED
39 typename Traits = bronson_avltree::traits
44 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
47 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
48 typedef Key key_type; ///< type of a key stored in the map
49 typedef T * mapped_type; ///< type of value stored in the map
50 typedef Traits traits; ///< Traits template parameter
52 # ifdef CDS_DOXYGEN_INVOKED
53 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
55 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
57 typedef typename traits::item_counter item_counter; ///< Item counting policy
58 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
59 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
60 typedef typename traits::stat stat; ///< internal statistics
61 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
62 typedef typename traits::back_off back_off; ///< Back-off strategy
63 typedef typename traits::disposer disposer; ///< Value disposer
64 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
66 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
67 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
69 /// Returned pointer to \p mapped_type of extracted node
70 typedef std::unique_ptr< T, disposer > unique_ptr;
72 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
76 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
77 typedef typename node_type::version_type version_type;
79 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
80 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
82 enum class find_result
99 result_inserted = allow_insert,
100 result_updated = allow_update,
107 nothing_required = -3,
108 rebalance_required = -2,
117 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
122 template <typename K>
123 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
125 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
128 static void free_node( node_type * pNode )
130 // Free node without disposer
131 cxx_allocator().Delete( pNode );
134 static void free_value( mapped_type pVal )
139 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
141 return pNode->child( nDir ).load( order );
144 static node_type * parent( node_type * pNode, atomics::memory_order order )
146 return pNode->m_pParent.load( order );
149 // RCU safe disposer for node
152 node_type * m_pRetiredList; ///< head of retired list
156 : m_pRetiredList( nullptr )
164 void dispose( node_type * pNode )
166 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
167 pNode->m_pNextRemoved = m_pRetiredList;
168 m_pRetiredList = pNode;
172 struct internal_disposer
174 void operator()( node_type * p ) const
182 assert( !gc::is_locked() );
184 for ( node_type * p = m_pRetiredList; p; ) {
185 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
186 // Value already disposed
187 gc::template retire_ptr<internal_disposer>( p );
197 typename node_type::base_class m_Root;
199 item_counter m_ItemCounter;
200 mutable sync_monitor m_Monitor;
205 /// Creates empty map
207 : m_pRoot( static_cast<node_type *>( &m_Root ))
218 The \p key_type should be constructible from a value of type \p K.
220 RCU \p synchronize() can be called. RCU should not be locked.
222 Returns \p true if inserting successful, \p false otherwise.
224 template <typename K>
225 bool insert( K const& key, mapped_type pVal )
227 return do_update(key, key_comparator(),
228 [pVal]( node_type * pNode ) -> mapped_type
230 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
233 update_flags::allow_insert
234 ) == update_flags::result_inserted;
237 /// Updates the value for \p key
239 The operation performs inserting or updating the value for \p key with lock-free manner.
240 If \p bInsert is \p false, only updating of existing node is possible.
242 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
243 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
244 constructible from type \p K.
245 Otherwise, the value is changed to \p pVal.
247 RCU \p synchronize() method can be called. RCU should not be locked.
249 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
250 \p second is \p true if new node has been added or \p false if the node with \p key
251 already is in the tree.
253 template <typename K, typename Func>
254 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
256 int result = do_update( key, key_comparator(),
257 [pVal]( node_type * ) -> mapped_type
261 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
263 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
266 /// Delete \p key from the map
268 RCU \p synchronize() method can be called. RCU should not be locked.
270 Return \p true if \p key is found and deleted, \p false otherwise
272 template <typename K>
273 bool erase( K const& key )
278 []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
282 /// Deletes the item from the map using \p pred predicate for searching
284 The function is an analog of \p erase(K const&)
285 but \p pred is used for key comparing.
286 \p Less functor has the interface like \p std::less.
287 \p Less must imply the same element order as the comparator used for building the map.
289 template <typename K, typename Less>
290 bool erase_with( K const& key, Less pred )
295 cds::opt::details::make_comparator_from_less<Less>(),
296 []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
300 /// Delete \p key from the map
302 The function searches an item with key \p key, calls \p f functor
303 and deletes the item. If \p key is not found, the functor is not called.
305 The functor \p Func interface:
308 void operator()(mapped_type& item) { ... }
312 RCU \p synchronize method can be called. RCU should not be locked.
314 Return \p true if key is found and deleted, \p false otherwise
316 template <typename K, typename Func>
317 bool erase( K const& key, Func f )
322 [&f]( mapped_type pVal ) -> bool {
331 /// Deletes the item from the map using \p pred predicate for searching
333 The function is an analog of \p erase(K const&, Func)
334 but \p pred is used for key comparing.
335 \p Less functor has the interface like \p std::less.
336 \p Less must imply the same element order as the comparator used for building the map.
338 template <typename K, typename Less, typename Func>
339 bool erase_with( K const& key, Less pred, Func f )
344 cds::opt::details::make_comparator_from_less<Less>(),
345 [&f]( mapped_type pVal ) -> bool {
354 /// Extracts an item with minimal key from the map
356 Returns \p std::unique_ptr to the leftmost item.
357 If the set is empty, returns empty \p std::unique_ptr.
359 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
360 It means that the function gets leftmost leaf of the tree and tries to unlink it.
361 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
362 So, the function returns the item with minimum key at the moment of tree traversing.
364 RCU \p synchronize method can be called. RCU should NOT be locked.
365 The function does not free the item.
366 The deallocator will be implicitly invoked when the returned object is destroyed or when
367 its \p reset(nullptr) member function is called.
369 unique_ptr extract_min()
375 /// Extracts an item with maximal key from the map
377 Returns \p std::unique_ptr pointer to the rightmost item.
378 If the set is empty, returns empty \p std::unique_ptr.
380 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
381 It means that the function gets rightmost leaf of the tree and tries to unlink it.
382 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
383 So, the function returns the item with maximum key at the moment of tree traversing.
385 RCU \p synchronize method can be called. RCU should NOT be locked.
386 The function does not free the item.
387 The deallocator will be implicitly invoked when the returned object is destroyed or when
388 its \p reset(nullptr) is called.
389 @note Before reusing \p result object you should call its \p release() method.
391 unique_ptr extract_max()
397 /// Extracts an item from the map
399 The function searches an item with key equal to \p key in the tree,
400 unlinks it, and returns \p std::unique_ptr pointer to a value found.
401 If \p key is not found the function returns an empty \p std::unique_ptr.
403 RCU \p synchronize method can be called. RCU should NOT be locked.
404 The function does not destroy the value found.
405 The disposer will be implicitly invoked when the returned object is destroyed or when
406 its \p reset(nullptr) member function is called.
408 template <typename Q>
409 unique_ptr extract( Q const& key )
411 unique_ptr pExtracted;
416 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
421 /// Extracts an item from the map using \p pred for searching
423 The function is an analog of \p extract(Q const&)
424 but \p pred is used for key compare.
425 \p Less has the interface like \p std::less.
426 \p pred must imply the same element order as the comparator used for building the tree.
428 template <typename Q, typename Less>
429 unique_ptr extract_with( Q const& key, Less pred )
432 unique_ptr pExtracted;
436 cds::opt::details::make_comparator_from_less<Less>(),
437 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
442 /// Find the key \p key
444 The function searches the item with key equal to \p key and calls the functor \p f for item found.
445 The interface of \p Func functor is:
448 void operator()( key_type const& key, mapped_type& item );
451 where \p item is the item found.
452 The functor is called under node-level lock.
454 The function applies RCU lock internally.
456 The function returns \p true if \p key is found, \p false otherwise.
458 template <typename K, typename Func>
459 bool find( K const& key, Func f )
461 return do_find( key, key_comparator(),
462 [&f]( node_type * pNode ) -> bool {
463 assert( pNode != nullptr );
464 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
466 f( pNode->m_key, *pVal );
474 /// Finds the key \p val using \p pred predicate for searching
476 The function is an analog of \p find(K const&, Func)
477 but \p pred is used for key comparing.
478 \p Less functor has the interface like \p std::less.
479 \p Less must imply the same element order as the comparator used for building the map.
481 template <typename K, typename Less, typename Func>
482 bool find_with( K const& key, Less pred, Func f )
485 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
486 [&f]( node_type * pNode ) -> bool {
487 assert( pNode != nullptr );
488 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
490 f( pNode->m_key, *pVal );
498 /// Find the key \p key
500 The function searches the item with key equal to \p key
501 and returns \p true if it is found, and \p false otherwise.
503 The function applies RCU lock internally.
505 template <typename K>
506 bool find( K const& key )
508 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
511 /// Finds the key \p val using \p pred predicate for searching
513 The function is an analog of \p find(K const&)
514 but \p pred is used for key comparing.
515 \p Less functor has the interface like \p std::less.
516 \p Less must imply the same element order as the comparator used for building the map.
518 template <typename K, typename Less>
519 bool find_with( K const& key, Less pred )
522 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
525 /// Clears the tree (thread safe, not atomic)
527 The function unlink all items from the tree.
528 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
532 assert( set.empty() );
534 the assertion could be raised.
536 For each node the \ref disposer will be called after unlinking.
538 RCU \p synchronize method can be called. RCU should not be locked.
542 while ( extract_min() );
545 /// Clears the tree (not thread safe)
547 This function is not thread safe and may be called only when no other thread deals with the tree.
548 The function is used in the tree destructor.
555 /// Checks if the map is empty
558 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
561 /// Returns item count in the map
563 Only leaf nodes containing user data are counted.
565 The value returned depends on item counter type provided by \p Traits template parameter.
566 If it is \p atomicity::empty_item_counter this function always returns 0.
568 The function is not suitable for checking the tree emptiness, use \p empty()
569 member function for this purpose.
573 return m_ItemCounter;
576 /// Returns const reference to internal statistics
577 stat const& statistics() const
582 /// Checks internal consistency (not atomic, not thread-safe)
584 The debugging function to check internal consistency of the tree.
586 bool check_consistency() const
594 template <typename Q, typename Compare, typename Func>
595 bool do_find( Q& key, Compare cmp, Func f ) const
600 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
602 assert( result != find_result::retry );
603 return result == find_result::found;
606 template <typename K, typename Compare, typename Func>
607 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
609 check_deadlock_policy::check();
611 rcu_disposer removed_list;
614 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
618 template <typename K, typename Compare, typename Func>
619 bool do_remove( K const& key, Compare cmp, Func func )
621 // Func must return true if the value was disposed
622 // or false if the value was extracted
624 check_deadlock_policy::check();
626 rcu_disposer removed_list;
629 return try_remove_root( key, cmp, func, removed_list );
637 template <typename Q, typename Compare, typename Func>
638 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
640 assert( gc::is_locked() );
644 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
646 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
647 m_stat.onFindRetry();
648 return find_result::retry;
651 m_stat.onFindFailed();
652 return find_result::not_found;
655 int nCmp = cmp( key, pChild->m_key );
657 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
659 node_scoped_lock l( m_Monitor, *pChild );
660 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
662 m_stat.onFindSuccess();
663 return find_result::found;
668 m_stat.onFindFailed();
669 return find_result::not_found;
672 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
673 if ( nChildVersion & node_type::shrinking ) {
674 m_stat.onFindWaitShrinking();
675 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
677 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
678 m_stat.onFindRetry();
679 return find_result::retry;
682 else if ( nChildVersion != node_type::unlinked ) {
684 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
685 m_stat.onFindRetry();
686 return find_result::retry;
689 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
690 if ( found != find_result::retry )
696 template <typename K, typename Compare, typename Func>
697 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
699 assert( gc::is_locked() );
703 // get right child of root
704 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
706 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
707 if ( nChildVersion & node_type::shrinking ) {
708 m_stat.onUpdateRootWaitShrinking();
709 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
710 result = update_flags::retry;
712 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
713 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
718 if ( nFlags & update_flags::allow_insert ) {
719 // insert into tree as right child of the root
721 node_scoped_lock l( m_Monitor, *m_pRoot );
722 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
723 result = result == update_flags::retry;
727 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
728 mapped_type pVal = funcUpdate( pNew );
729 assert( pVal != nullptr );
730 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
732 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
733 m_pRoot->height( 2, memory_model::memory_order_relaxed );
737 m_stat.onInsertSuccess();
738 return update_flags::result_inserted;
741 return update_flags::failed;
743 } while ( result == update_flags::retry );
747 template <typename K, typename Compare, typename Func>
748 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
750 assert( gc::is_locked() );
754 // get right child of root
755 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
757 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
758 if ( nChildVersion & node_type::shrinking ) {
759 m_stat.onUpdateRootWaitShrinking();
760 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
761 result = update_flags::retry;
763 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
764 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
769 } while ( result == update_flags::retry );
771 return result == update_flags::result_removed;
774 template <typename K, typename Compare, typename Func>
775 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
777 assert( gc::is_locked() );
778 assert( nVersion != node_type::unlinked );
779 CDS_UNUSED( pParent );
781 int nCmp = cmp( key, pNode->m_key );
783 if ( nFlags & update_flags::allow_update ) {
784 return try_update_node( funcUpdate, pNode );
786 return update_flags::failed;
791 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
792 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
793 return update_flags::retry;
795 if ( pChild == nullptr ) {
797 if ( nFlags & update_flags::allow_insert )
798 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
800 result = update_flags::failed;
804 result = update_flags::retry;
805 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
806 if ( nChildVersion & node_type::shrinking ) {
807 m_stat.onUpdateWaitShrinking();
808 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
811 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
812 // this second read is important, because it is protected by nChildVersion
814 // validate the read that our caller took to get to node
815 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
816 m_stat.onUpdateRetry();
817 return update_flags::retry;
820 // At this point we know that the traversal our parent took to get to node is still valid.
821 // The recursive implementation will validate the traversal from node to
822 // child, so just prior to the node nVersion validation both traversals were definitely okay.
823 // This means that we are no longer vulnerable to node shrinks, and we don't need
824 // to validate node version any more.
825 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
828 } while ( result == update_flags::retry );
832 template <typename K, typename Compare, typename Func>
833 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
835 assert( gc::is_locked() );
836 assert( nVersion != node_type::unlinked );
838 int nCmp = cmp( key, pNode->m_key );
840 return try_remove_node( pParent, pNode, nVersion, func, disp );
844 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
845 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
846 return update_flags::retry;
848 if ( pChild == nullptr ) {
849 return update_flags::failed;
853 result = update_flags::retry;
854 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
855 if ( nChildVersion & node_type::shrinking ) {
856 m_stat.onUpdateWaitShrinking();
857 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
860 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
861 // this second read is important, because it is protected by nChildVersion
863 // validate the read that our caller took to get to node
864 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
865 m_stat.onUpdateRetry();
866 return update_flags::retry;
869 // At this point we know that the traversal our parent took to get to node is still valid.
870 // The recursive implementation will validate the traversal from node to
871 // child, so just prior to the node nVersion validation both traversals were definitely okay.
872 // This means that we are no longer vulnerable to node shrinks, and we don't need
873 // to validate node version any more.
874 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
877 } while ( result == update_flags::retry );
881 template <typename K, typename Func>
882 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
886 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
887 mapped_type pVal = funcUpdate( pNew );
888 assert( pVal != nullptr );
889 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
892 if ( c_bRelaxedInsert ) {
893 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
894 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
896 m_stat.onInsertRetry();
897 return update_flags::retry;
900 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
903 node_type * pDamaged;
905 assert( pNode != nullptr );
906 node_scoped_lock l( m_Monitor, *pNode );
908 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
909 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
911 if ( c_bRelaxedInsert ) {
913 m_stat.onRelaxedInsertFailed();
916 m_stat.onInsertRetry();
917 return update_flags::retry;
920 if ( !c_bRelaxedInsert )
921 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
923 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
924 pDamaged = fix_height_locked( pNode );
928 m_stat.onInsertSuccess();
930 fix_height_and_rebalance( pDamaged, disp );
932 return update_flags::result_inserted;
935 template <typename Func>
936 int try_update_node( Func funcUpdate, node_type * pNode )
939 assert( pNode != nullptr );
941 node_scoped_lock l( m_Monitor, *pNode );
943 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
944 m_stat.onUpdateUnlinked();
945 return update_flags::retry;
948 pOld = pNode->value( memory_model::memory_order_relaxed );
949 mapped_type pVal = funcUpdate( pNode );
953 assert( pVal != nullptr );
954 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
960 m_stat.onDisposeValue();
963 m_stat.onUpdateSuccess();
964 return update_flags::result_updated;
967 template <typename Func>
968 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
970 assert( pParent != nullptr );
971 assert( pNode != nullptr );
973 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
974 return update_flags::failed;
976 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
977 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
979 node_type * pDamaged;
982 node_scoped_lock lp( m_Monitor, *pParent );
983 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
984 return update_flags::retry;
987 node_scoped_lock ln( m_Monitor, *pNode );
988 pOld = pNode->value( memory_model::memory_order_relaxed );
989 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
991 && try_unlink_locked( pParent, pNode, disp )))
993 return update_flags::retry;
996 pDamaged = fix_height_locked( pParent );
1000 if ( func( pOld ) ) // calls pOld disposer inside
1001 m_stat.onDisposeValue();
1003 m_stat.onExtractValue();
1005 fix_height_and_rebalance( pDamaged, disp );
1006 return update_flags::result_removed;
1009 int result = update_flags::retry;
1012 node_scoped_lock ln( m_Monitor, *pNode );
1013 pOld = pNode->value( atomics::memory_order_relaxed );
1014 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1015 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1016 result = update_flags::result_removed;
1020 if ( result == update_flags::result_removed ) {
1022 if ( func( pOld )) // calls pOld disposer inside
1023 m_stat.onDisposeValue();
1025 m_stat.onExtractValue();
1032 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1034 // pParent and pNode must be locked
1035 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1037 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1038 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1039 if ( pNode != pParentLeft && pNode != pParentRight ) {
1040 // node is no longer a child of parent
1044 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1045 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1047 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1048 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1049 if ( pLeft != nullptr && pRight != nullptr ) {
1050 // splicing is no longer possible
1053 node_type * pSplice = pLeft ? pLeft : pRight;
1055 if ( pParentLeft == pNode )
1056 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1058 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1061 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1063 // Mark the node as unlinked
1064 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1066 // The value will be disposed by calling function
1067 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1069 disp.dispose( pNode );
1070 m_stat.onDisposeNode();
1077 private: // rotations
1079 int estimate_node_condition( node_type * pNode )
1081 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1082 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1084 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1085 return unlink_required;
1087 int h = pNode->height( memory_model::memory_order_relaxed );
1088 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1089 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1091 int hNew = 1 + std::max( hL, hR );
1092 int nBalance = hL - hR;
1094 if ( nBalance < -1 || nBalance > 1 )
1095 return rebalance_required;
1097 return h != hNew ? hNew : nothing_required;
1100 node_type * fix_height( node_type * pNode )
1102 assert( pNode != nullptr );
1103 node_scoped_lock l( m_Monitor, *pNode );
1104 return fix_height_locked( pNode );
1107 node_type * fix_height_locked( node_type * pNode )
1109 // pNode must be locked!!!
1110 int h = estimate_node_condition( pNode );
1112 case rebalance_required:
1113 case unlink_required:
1115 case nothing_required:
1118 pNode->height( h, memory_model::memory_order_relaxed );
1119 return parent( pNode, memory_model::memory_order_relaxed );
1123 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1125 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1126 int nCond = estimate_node_condition( pNode );
1127 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1130 if ( nCond != unlink_required && nCond != rebalance_required )
1131 pNode = fix_height( pNode );
1133 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1134 assert( pParent != nullptr );
1136 node_scoped_lock lp( m_Monitor, *pParent );
1137 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1138 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1140 node_scoped_lock ln( m_Monitor, *pNode );
1141 pNode = rebalance_locked( pParent, pNode, disp );
1148 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1150 // pParent and pNode should be locked.
1151 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1152 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1153 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1154 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1156 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1157 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1159 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1160 if ( try_unlink_locked( pParent, pNode, disp ))
1161 return fix_height_locked( pParent );
1163 // retry needed for pNode
1168 int h = pNode->height( memory_model::memory_order_relaxed );
1169 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1170 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1171 int hNew = 1 + std::max( hL, hR );
1172 int balance = hL - hR;
1175 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1176 else if ( balance < -1 )
1177 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1178 else if ( hNew != h ) {
1179 pNode->height( hNew, memory_model::memory_order_relaxed );
1181 // pParent is already locked
1182 return fix_height_locked( pParent );
1188 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1190 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1191 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1192 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1194 // pParent and pNode is locked yet
1195 // pNode->pLeft is too large, we will rotate-right.
1196 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1199 assert( pLeft != nullptr );
1200 node_scoped_lock l( m_Monitor, *pLeft );
1201 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1202 return pNode; // retry for pNode
1204 int hL = pLeft->height( memory_model::memory_order_relaxed );
1206 return pNode; // retry
1208 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1209 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1210 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1211 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1215 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1218 assert( pLRight != nullptr );
1220 node_scoped_lock lr( m_Monitor, *pLRight );
1221 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1222 return pNode; // retry
1224 hLR = pLRight->height( memory_model::memory_order_relaxed );
1226 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1228 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1229 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1230 int balance = hLL - hLRL;
1231 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1232 // nParent.child.left won't be damaged after a double rotation
1233 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1237 // focus on pLeft, if necessary pNode will be balanced later
1238 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1243 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1245 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1246 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1247 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1249 // pParent and pNode is locked yet
1251 assert( pRight != nullptr );
1252 node_scoped_lock l( m_Monitor, *pRight );
1253 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1254 return pNode; // retry for pNode
1256 int hR = pRight->height( memory_model::memory_order_relaxed );
1257 if ( hL - hR >= -1 )
1258 return pNode; // retry
1260 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1261 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1262 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1263 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1265 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1268 assert( pRLeft != nullptr );
1269 node_scoped_lock lrl( m_Monitor, *pRLeft );
1270 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1271 return pNode; // retry
1273 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1275 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1277 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1278 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1279 int balance = hRR - hRLR;
1280 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1281 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1283 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1287 static void begin_change( node_type * pNode, version_type version )
1289 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1291 static void end_change( node_type * pNode, version_type version )
1293 // Clear shrinking and unlinked flags and increment version
1294 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1297 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1299 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1300 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1302 begin_change( pNode, nodeVersion );
1304 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1305 if ( pLRight != nullptr )
1306 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1308 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1309 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1311 if ( pParentLeft == pNode )
1312 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1314 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1315 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1317 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1319 // fix up heights links
1320 int hNode = 1 + std::max( hLR, hR );
1321 pNode->height( hNode, memory_model::memory_order_relaxed );
1322 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1324 end_change( pNode, nodeVersion );
1325 m_stat.onRotateRight();
1327 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1328 // parent.child). pNode is the deepest. Perform as many fixes as we can
1329 // with the locks we've got.
1331 // We've already fixed the height for pNode, but it might still be outside
1332 // our allowable balance range. In that case a simple fix_height_locked()
1334 int nodeBalance = hLR - hR;
1335 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1336 // we need another rotation at pNode
1340 // we've fixed balance and height damage for pNode, now handle
1341 // extra-routing node damage
1342 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1343 // we need to remove pNode and then repair
1347 // we've already fixed the height at pLeft, do we need a rotation here?
1348 int leftBalance = hLL - hNode;
1349 if ( leftBalance < -1 || leftBalance > 1 )
1352 // pLeft might also have routing node damage (if pLeft.left was null)
1353 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1356 // try to fix the parent height while we've still got the lock
1357 return fix_height_locked( pParent );
1360 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1362 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1363 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1365 begin_change( pNode, nodeVersion );
1367 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1368 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1369 if ( pRLeft != nullptr )
1370 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1372 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1373 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1375 if ( pParentLeft == pNode )
1376 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1378 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1379 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1381 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1384 int hNode = 1 + std::max( hL, hRL );
1385 pNode->height( hNode, memory_model::memory_order_relaxed );
1386 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1388 end_change( pNode, nodeVersion );
1389 m_stat.onRotateLeft();
1391 int nodeBalance = hRL - hL;
1392 if ( nodeBalance < -1 || nodeBalance > 1 )
1395 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1398 int rightBalance = hRR - hNode;
1399 if ( rightBalance < -1 || rightBalance > 1 )
1402 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1405 return fix_height_locked( pParent );
1408 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1410 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1411 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1413 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1414 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1415 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1416 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1418 begin_change( pNode, nodeVersion );
1419 begin_change( pLeft, leftVersion );
1421 // fix up pNode links, careful about the order!
1422 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1423 if ( pLRR != nullptr )
1424 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1426 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1427 if ( pLRL != nullptr )
1428 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1430 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1431 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1432 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1433 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1436 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1438 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1439 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1441 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1444 int hNode = 1 + std::max( hLRR, hR );
1445 pNode->height( hNode, memory_model::memory_order_relaxed );
1446 int hLeft = 1 + std::max( hLL, hLRL );
1447 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1448 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1450 end_change( pNode, nodeVersion );
1451 end_change( pLeft, leftVersion );
1452 m_stat.onRotateRightOverLeft();
1454 // caller should have performed only a single rotation if pLeft was going
1455 // to end up damaged
1456 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1457 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1459 // We have damaged pParent, pLR (now parent.child), and pNode (now
1460 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1461 // can with the locks we've got.
1463 // We've already fixed the height for pNode, but it might still be outside
1464 // our allowable balance range. In that case a simple fix_height_locked()
1466 int nodeBalance = hLRR - hR;
1467 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1468 // we need another rotation at pNode
1472 // pNode might also be damaged by being an unnecessary routing node
1473 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1474 // repair involves splicing out pNode and maybe more rotations
1478 // we've already fixed the height at pLRight, do we need a rotation here?
1479 int balanceLR = hLeft - hNode;
1480 if ( balanceLR < -1 || balanceLR > 1 )
1483 // try to fix the parent height while we've still got the lock
1484 return fix_height_locked( pParent );
1487 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1489 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1490 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1492 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1493 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1494 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1495 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1497 begin_change( pNode, nodeVersion );
1498 begin_change( pRight, rightVersion );
1500 // fix up pNode links, careful about the order!
1501 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1502 if ( pRLL != nullptr )
1503 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1505 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1506 if ( pRLR != nullptr )
1507 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1509 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1510 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1511 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1512 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1515 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1517 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1518 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1520 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1523 int hNode = 1 + std::max( hL, hRLL );
1524 pNode->height( hNode, memory_model::memory_order_relaxed );
1525 int hRight = 1 + std::max( hRLR, hRR );
1526 pRight->height( hRight, memory_model::memory_order_relaxed );
1527 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1529 end_change( pNode, nodeVersion );
1530 end_change( pRight, rightVersion );
1531 m_stat.onRotateLeftOverRight();
1533 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1535 int nodeBalance = hRLL - hL;
1536 if ( nodeBalance < -1 || nodeBalance > 1 )
1538 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1541 int balRL = hRight - hNode;
1542 if ( balRL < -1 || balRL > 1 )
1545 return fix_height_locked( pParent );
1550 }} // namespace cds::container
1552 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H