3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <memory> // unique_ptr
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/details/binary_functor_wrapper.h>
12 namespace cds { namespace container {
14 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
15 /** @ingroup cds_nonintrusive_map
16 @ingroup cds_nonintrusive_tree
17 @headerfile cds/container/bronson_avltree_map_rcu.h
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Returned pointer to \p mapped_type of extracted node
72 typedef std::unique_ptr< T, disposer > unique_ptr;
74 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
78 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
79 typedef typename node_type::version_type version_type;
81 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
82 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
84 enum class find_result
101 result_inserted = allow_insert,
102 result_updated = allow_update,
109 nothing_required = -3,
110 rebalance_required = -2,
119 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
124 template <typename K>
125 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
127 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
130 static void free_node( node_type * pNode )
132 // Free node without disposer
133 cxx_allocator().Delete( pNode );
136 static void free_value( mapped_type pVal )
141 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
143 return pNode->child( nDir ).load( order );
146 static node_type * parent( node_type * pNode, atomics::memory_order order )
148 return pNode->m_pParent.load( order );
151 // RCU safe disposer for node
154 node_type * m_pRetiredList; ///< head of retired list
158 : m_pRetiredList( nullptr )
166 void dispose( node_type * pNode )
168 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
169 pNode->m_pNextRemoved = m_pRetiredList;
170 m_pRetiredList = pNode;
174 struct internal_disposer
176 void operator()( node_type * p ) const
184 assert( !gc::is_locked() );
186 for ( node_type * p = m_pRetiredList; p; ) {
187 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
188 // Value already disposed
189 gc::template retire_ptr<internal_disposer>( p );
199 typename node_type::base_class m_Root;
201 item_counter m_ItemCounter;
202 mutable sync_monitor m_Monitor;
207 /// Creates empty map
209 : m_pRoot( static_cast<node_type *>( &m_Root ))
220 The \p key_type should be constructible from a value of type \p K.
222 RCU \p synchronize() can be called. RCU should not be locked.
224 Returns \p true if inserting successful, \p false otherwise.
226 template <typename K>
227 bool insert( K const& key, mapped_type pVal )
229 return do_update(key, key_comparator(),
230 [pVal]( node_type * pNode ) -> mapped_type
232 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
235 update_flags::allow_insert
236 ) == update_flags::result_inserted;
239 /// Updates the value for \p key
241 The operation performs inserting or updating the value for \p key with lock-free manner.
242 If \p bInsert is \p false, only updating of existing node is possible.
244 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
245 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
246 constructible from type \p K.
247 Otherwise, the value is changed to \p pVal.
249 RCU \p synchronize() method can be called. RCU should not be locked.
251 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
252 \p second is \p true if new node has been added or \p false if the node with \p key
253 already is in the tree.
255 template <typename K, typename Func>
256 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
258 int result = do_update( key, key_comparator(),
259 [pVal]( node_type * ) -> mapped_type
263 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
265 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
268 /// Delete \p key from the map
270 RCU \p synchronize() method can be called. RCU should not be locked.
272 Return \p true if \p key is found and deleted, \p false otherwise
274 template <typename K>
275 bool erase( K const& key )
280 []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
284 /// Deletes the item from the map using \p pred predicate for searching
286 The function is an analog of \p erase(K const&)
287 but \p pred is used for key comparing.
288 \p Less functor has the interface like \p std::less.
289 \p Less must imply the same element order as the comparator used for building the map.
291 template <typename K, typename Less>
292 bool erase_with( K const& key, Less pred )
297 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
298 []( mapped_type pVal ) -> bool { free_value( pVal ); return true; }
302 /// Delete \p key from the map
304 The function searches an item with key \p key, calls \p f functor
305 and deletes the item. If \p key is not found, the functor is not called.
307 The functor \p Func interface:
310 void operator()(mapped_type& item) { ... }
314 RCU \p synchronize method can be called. RCU should not be locked.
316 Return \p true if key is found and deleted, \p false otherwise
318 template <typename K, typename Func>
319 bool erase( K const& key, Func f )
324 [&f]( mapped_type pVal ) -> bool {
333 /// Deletes the item from the map using \p pred predicate for searching
335 The function is an analog of \p erase(K const&, Func)
336 but \p pred is used for key comparing.
337 \p Less functor has the interface like \p std::less.
338 \p Less must imply the same element order as the comparator used for building the map.
340 template <typename K, typename Less, typename Func>
341 bool erase_with( K const& key, Less pred, Func f )
346 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
347 [&f]( mapped_type pVal ) -> bool {
356 /// Extracts an item with minimal key from the map
358 Returns \p std::unique_ptr to the leftmost item.
359 If the set is empty, returns empty \p std::unique_ptr.
361 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
362 It means that the function gets leftmost leaf of the tree and tries to unlink it.
363 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
364 So, the function returns the item with minimum key at the moment of tree traversing.
366 RCU \p synchronize method can be called. RCU should NOT be locked.
367 The function does not free the item.
368 The deallocator will be implicitly invoked when the returned object is destroyed or when
369 its \p reset(nullptr) member function is called.
371 unique_ptr extract_min()
377 /// Extracts an item with maximal key from the map
379 Returns \p std::unique_ptr pointer to the rightmost item.
380 If the set is empty, returns empty \p std::unique_ptr.
382 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
383 It means that the function gets rightmost leaf of the tree and tries to unlink it.
384 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
385 So, the function returns the item with maximum key at the moment of tree traversing.
387 RCU \p synchronize method can be called. RCU should NOT be locked.
388 The function does not free the item.
389 The deallocator will be implicitly invoked when the returned object is destroyed or when
390 its \p reset(nullptr) is called.
391 @note Before reusing \p result object you should call its \p release() method.
393 unique_ptr extract_max()
399 /// Extracts an item from the map
401 The function searches an item with key equal to \p key in the tree,
402 unlinks it, and returns \p std::unique_ptr pointer to a value found.
403 If \p key is not found the function returns an empty \p std::unique_ptr.
405 RCU \p synchronize method can be called. RCU should NOT be locked.
406 The function does not destroy the value found.
407 The disposer will be implicitly invoked when the returned object is destroyed or when
408 its \p reset(nullptr) member function is called.
410 template <typename Q>
411 unique_ptr extract( Q const& key )
413 unique_ptr pExtracted;
418 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
423 /// Extracts an item from the map using \p pred for searching
425 The function is an analog of \p extract(Q const&)
426 but \p pred is used for key compare.
427 \p Less has the interface like \p std::less.
428 \p pred must imply the same element order as the comparator used for building the tree.
430 template <typename Q, typename Less>
431 unique_ptr extract_with( Q const& key, Less pred )
434 unique_ptr pExtracted;
438 cds::details::predicate_wrapper<key_type, Less, cds::details::trivial_accessor >(),
439 [&pExtracted]( mapped_type pVal ) -> bool { pExtracted.reset( pVal ); return false; }
444 /// Find the key \p key
446 The function searches the item with key equal to \p key and calls the functor \p f for item found.
447 The interface of \p Func functor is:
450 void operator()( key_type const& key, mapped_type& item );
453 where \p item is the item found.
454 The functor is called under node-level lock.
456 The function applies RCU lock internally.
458 The function returns \p true if \p key is found, \p false otherwise.
460 template <typename K, typename Func>
461 bool find( K const& key, Func f )
463 return do_find( key, key_comparator(),
464 [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
465 assert( pNode != nullptr );
466 node_scoped_lock l( monitor, *pNode );
467 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
469 f( pNode->m_key, *pVal );
477 /// Finds the key \p val using \p pred predicate for searching
479 The function is an analog of \p find(K const&, Func)
480 but \p pred is used for key comparing.
481 \p Less functor has the interface like \p std::less.
482 \p Less must imply the same element order as the comparator used for building the map.
484 template <typename K, typename Less, typename Func>
485 bool find_with( K const& key, Less pred, Func f )
488 return do_find( key, opt::details::make_comparator_from_less<Less>(),
489 [&f]( sync_monitor& monitor, node_type * pNode ) -> bool {
490 assert( pNode != nullptr );
491 node_scoped_lock l( monitor, *pNode );
492 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
494 f( pNode->m_key, *pVal );
502 /// Find the key \p key
504 The function searches the item with key equal to \p key
505 and returns \p true if it is found, and \p false otherwise.
507 The function applies RCU lock internally.
509 template <typename K>
510 bool find( K const& key )
512 return do_find( key, key_comparator(), []( sync_monitor&, node_type * ) -> bool { return true; });
515 /// Finds the key \p val using \p pred predicate for searching
517 The function is an analog of \p find(K const&)
518 but \p pred is used for key comparing.
519 \p Less functor has the interface like \p std::less.
520 \p Less must imply the same element order as the comparator used for building the map.
522 template <typename K, typename Less>
523 bool find_with( K const& key, Less pred )
526 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( sync_monitor&, node_type * ) -> bool { return true; } );
529 /// Clears the tree (thread safe, not atomic)
531 The function unlink all items from the tree.
532 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
536 assert( set.empty() );
538 the assertion could be raised.
540 For each node the \ref disposer will be called after unlinking.
542 RCU \p synchronize method can be called. RCU should not be locked.
546 while ( extract_min() );
549 /// Clears the tree (not thread safe)
551 This function is not thread safe and may be called only when no other thread deals with the tree.
552 The function is used in the tree destructor.
559 /// Checks if the map is empty
562 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
565 /// Returns item count in the map
567 Only leaf nodes containing user data are counted.
569 The value returned depends on item counter type provided by \p Traits template parameter.
570 If it is \p atomicity::empty_item_counter this function always returns 0.
572 The function is not suitable for checking the tree emptiness, use \p empty()
573 member function for this purpose.
577 return m_ItemCounter;
580 /// Returns const reference to internal statistics
581 stat const& statistics() const
586 /// Checks internal consistency (not atomic, not thread-safe)
588 The debugging function to check internal consistency of the tree.
590 bool check_consistency() const
598 template <typename Q, typename Compare, typename Func>
599 bool do_find( Q& key, Compare cmp, Func f ) const
604 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
606 assert( result != find_result::retry );
607 return result == find_result::found;
610 template <typename K, typename Compare, typename Func>
611 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
613 check_deadlock_policy::check();
615 rcu_disposer removed_list;
618 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
622 template <typename K, typename Compare, typename Func>
623 bool do_remove( K const& key, Compare cmp, Func func )
625 // Func must return true if the value was disposed
626 // or false if the value was extracted
628 check_deadlock_policy::check();
630 rcu_disposer removed_list;
633 return try_remove_root( key, cmp, func, removed_list );
641 template <typename Q, typename Compare, typename Func>
642 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
644 assert( gc::is_locked() );
648 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
650 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
651 m_stat.onFindRetry();
652 return find_result::retry;
655 m_stat.onFindFailed();
656 return find_result::not_found;
659 int nCmp = cmp( key, pChild->m_key );
661 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
663 node_scoped_lock l( m_Monitor, *pChild );
664 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
665 if ( f( m_Monitor, pChild ) ) {
666 m_stat.onFindSuccess();
667 return find_result::found;
672 m_stat.onFindFailed();
673 return find_result::not_found;
676 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
677 if ( nChildVersion & node_type::shrinking ) {
678 m_stat.onFindWaitShrinking();
679 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
681 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
682 m_stat.onFindRetry();
683 return find_result::retry;
686 else if ( nChildVersion != node_type::unlinked ) {
688 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
689 m_stat.onFindRetry();
690 return find_result::retry;
693 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
694 if ( found != find_result::retry )
700 template <typename K, typename Compare, typename Func>
701 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
703 assert( gc::is_locked() );
707 // get right child of root
708 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
710 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
711 if ( nChildVersion & node_type::shrinking ) {
712 m_stat.onUpdateRootWaitShrinking();
713 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
714 result = update_flags::retry;
716 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
717 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
722 if ( nFlags & update_flags::allow_insert ) {
723 // insert into tree as right child of the root
725 node_scoped_lock l( m_Monitor, *m_pRoot );
726 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
727 result = result == update_flags::retry;
731 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
732 mapped_type pVal = funcUpdate( pNew );
733 assert( pVal != nullptr );
734 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
736 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
737 m_pRoot->height( 2, memory_model::memory_order_relaxed );
741 m_stat.onInsertSuccess();
742 return update_flags::result_inserted;
745 return update_flags::failed;
747 } while ( result == update_flags::retry );
751 template <typename K, typename Compare, typename Func>
752 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
754 assert( gc::is_locked() );
758 // get right child of root
759 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
761 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
762 if ( nChildVersion & node_type::shrinking ) {
763 m_stat.onUpdateRootWaitShrinking();
764 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
765 result = update_flags::retry;
767 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
768 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
773 } while ( result == update_flags::retry );
775 return result == update_flags::result_removed;
778 template <typename K, typename Compare, typename Func>
779 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
781 assert( gc::is_locked() );
782 assert( nVersion != node_type::unlinked );
783 CDS_UNUSED( pParent );
785 int nCmp = cmp( key, pNode->m_key );
787 if ( nFlags & update_flags::allow_update ) {
788 return try_update_node( funcUpdate, pNode );
790 return update_flags::failed;
795 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
796 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
797 return update_flags::retry;
799 if ( pChild == nullptr ) {
801 if ( nFlags & update_flags::allow_insert )
802 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
804 result = update_flags::failed;
808 result = update_flags::retry;
809 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
810 if ( nChildVersion & node_type::shrinking ) {
811 m_stat.onUpdateWaitShrinking();
812 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
815 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
816 // this second read is important, because it is protected by nChildVersion
818 // validate the read that our caller took to get to node
819 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
820 m_stat.onUpdateRetry();
821 return update_flags::retry;
824 // At this point we know that the traversal our parent took to get to node is still valid.
825 // The recursive implementation will validate the traversal from node to
826 // child, so just prior to the node nVersion validation both traversals were definitely okay.
827 // This means that we are no longer vulnerable to node shrinks, and we don't need
828 // to validate node version any more.
829 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
832 } while ( result == update_flags::retry );
836 template <typename K, typename Compare, typename Func>
837 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
839 assert( gc::is_locked() );
840 assert( nVersion != node_type::unlinked );
842 int nCmp = cmp( key, pNode->m_key );
844 return try_remove_node( pParent, pNode, nVersion, func, disp );
848 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
849 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
850 return update_flags::retry;
852 if ( pChild == nullptr ) {
853 return update_flags::failed;
857 result = update_flags::retry;
858 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
859 if ( nChildVersion & node_type::shrinking ) {
860 m_stat.onUpdateWaitShrinking();
861 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
864 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
865 // this second read is important, because it is protected by nChildVersion
867 // validate the read that our caller took to get to node
868 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
869 m_stat.onUpdateRetry();
870 return update_flags::retry;
873 // At this point we know that the traversal our parent took to get to node is still valid.
874 // The recursive implementation will validate the traversal from node to
875 // child, so just prior to the node nVersion validation both traversals were definitely okay.
876 // This means that we are no longer vulnerable to node shrinks, and we don't need
877 // to validate node version any more.
878 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
881 } while ( result == update_flags::retry );
885 template <typename K, typename Func>
886 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
890 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
891 mapped_type pVal = funcUpdate( pNew );
892 assert( pVal != nullptr );
893 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
896 if ( c_bRelaxedInsert ) {
897 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
898 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
900 m_stat.onInsertRetry();
901 return update_flags::retry;
904 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
907 node_type * pDamaged;
909 assert( pNode != nullptr );
910 node_scoped_lock l( m_Monitor, *pNode );
912 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
913 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
915 if ( c_bRelaxedInsert ) {
917 m_stat.onRelaxedInsertFailed();
920 m_stat.onInsertRetry();
921 return update_flags::retry;
924 if ( !c_bRelaxedInsert )
925 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
927 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
928 pDamaged = fix_height_locked( pNode );
932 m_stat.onInsertSuccess();
934 fix_height_and_rebalance( pDamaged, disp );
936 return update_flags::result_inserted;
939 template <typename Func>
940 int try_update_node( Func funcUpdate, node_type * pNode )
944 assert( pNode != nullptr );
945 node_scoped_lock l( m_Monitor, *pNode );
947 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
948 m_stat.onUpdateUnlinked();
949 return update_flags::retry;
952 pOld = pNode->value( memory_model::memory_order_relaxed );
953 mapped_type pVal = funcUpdate( pNode );
954 assert( pVal != nullptr );
955 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
960 m_stat.onDisposeValue();
963 m_stat.onUpdateSuccess();
964 return update_flags::result_updated;
967 template <typename Func>
968 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
970 assert( pParent != nullptr );
971 assert( pNode != nullptr );
973 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
974 return update_flags::failed;
976 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
977 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
979 node_type * pDamaged;
982 node_scoped_lock lp( m_Monitor, *pParent );
983 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
984 return update_flags::retry;
987 node_scoped_lock ln( m_Monitor, *pNode );
988 pOld = pNode->value( memory_model::memory_order_relaxed );
989 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
991 && try_unlink_locked( pParent, pNode, disp )))
993 return update_flags::retry;
996 pDamaged = fix_height_locked( pParent );
1000 if ( func( pOld ) ) // calls pOld disposer inside
1001 m_stat.onDisposeValue();
1003 m_stat.onExtractValue();
1005 fix_height_and_rebalance( pDamaged, disp );
1006 return update_flags::result_removed;
1009 int result = update_flags::retry;
1012 node_scoped_lock ln( m_Monitor, *pNode );
1013 pOld = pNode->value( atomics::memory_order_relaxed );
1014 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1015 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1016 result = update_flags::result_removed;
1020 if ( result == update_flags::result_removed ) {
1022 if ( func( pOld )) // calls pOld disposer inside
1023 m_stat.onDisposeValue();
1025 m_stat.onExtractValue();
1032 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1034 // pParent and pNode must be locked
1035 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1037 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1038 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1039 if ( pNode != pParentLeft && pNode != pParentRight ) {
1040 // node is no longer a child of parent
1044 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1045 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1047 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1048 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1049 if ( pLeft != nullptr && pRight != nullptr ) {
1050 // splicing is no longer possible
1053 node_type * pSplice = pLeft ? pLeft : pRight;
1055 if ( pParentLeft == pNode )
1056 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1058 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1061 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1063 // Mark the node as unlinked
1064 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1065 mapped_type pVal = pNode->value( memory_model::memory_order_relaxed );
1068 m_stat.onDisposeValue();
1069 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1071 disp.dispose( pNode );
1072 m_stat.onDisposeNode();
1079 private: // rotations
1081 int estimate_node_condition( node_type * pNode )
1083 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1084 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1086 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1087 return unlink_required;
1089 int h = pNode->height( memory_model::memory_order_relaxed );
1090 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1091 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1093 int hNew = 1 + std::max( hL, hR );
1094 int nBalance = hL - hR;
1096 if ( nBalance < -1 || nBalance > 1 )
1097 return rebalance_required;
1099 return h != hNew ? hNew : nothing_required;
1102 node_type * fix_height( node_type * pNode )
1104 assert( pNode != nullptr );
1105 node_scoped_lock l( m_Monitor, *pNode );
1106 return fix_height_locked( pNode );
1109 node_type * fix_height_locked( node_type * pNode )
1111 // pNode must be locked!!!
1112 int h = estimate_node_condition( pNode );
1114 case rebalance_required:
1115 case unlink_required:
1117 case nothing_required:
1120 pNode->height( h, memory_model::memory_order_relaxed );
1121 return parent( pNode, memory_model::memory_order_relaxed );
1125 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1127 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1128 int nCond = estimate_node_condition( pNode );
1129 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1132 if ( nCond != unlink_required && nCond != rebalance_required )
1133 pNode = fix_height( pNode );
1135 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1136 assert( pParent != nullptr );
1138 node_scoped_lock lp( m_Monitor, *pParent );
1139 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1140 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1142 node_scoped_lock ln( m_Monitor, *pNode );
1143 pNode = rebalance_locked( pParent, pNode, disp );
1150 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1152 // pParent and pNode should be locked.
1153 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1154 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1155 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1156 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1158 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1159 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1161 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1162 if ( try_unlink_locked( pParent, pNode, disp ))
1163 return fix_height_locked( pParent );
1165 // retry needed for pNode
1170 int h = pNode->height( memory_model::memory_order_relaxed );
1171 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1172 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1173 int hNew = 1 + std::max( hL, hR );
1174 int balance = hL - hR;
1177 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1178 else if ( balance < -1 )
1179 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1180 else if ( hNew != h ) {
1181 pNode->height( hNew, memory_model::memory_order_relaxed );
1183 // pParent is already locked
1184 return fix_height_locked( pParent );
1190 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1192 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1193 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1194 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1196 // pParent and pNode is locked yet
1197 // pNode->pLeft is too large, we will rotate-right.
1198 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1201 assert( pLeft != nullptr );
1202 node_scoped_lock l( m_Monitor, *pLeft );
1203 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1204 return pNode; // retry for pNode
1206 int hL = pLeft->height( memory_model::memory_order_relaxed );
1208 return pNode; // retry
1210 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1211 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1212 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1213 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1217 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1220 assert( pLRight != nullptr );
1222 node_scoped_lock lr( m_Monitor, *pLRight );
1223 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1224 return pNode; // retry
1226 hLR = pLRight->height( memory_model::memory_order_relaxed );
1228 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1230 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1231 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1232 int balance = hLL - hLRL;
1233 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1234 // nParent.child.left won't be damaged after a double rotation
1235 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1239 // focus on pLeft, if necessary pNode will be balanced later
1240 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1245 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1247 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1248 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1249 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1251 // pParent and pNode is locked yet
1253 assert( pRight != nullptr );
1254 node_scoped_lock l( m_Monitor, *pRight );
1255 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1256 return pNode; // retry for pNode
1258 int hR = pRight->height( memory_model::memory_order_relaxed );
1259 if ( hL - hR >= -1 )
1260 return pNode; // retry
1262 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1263 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1264 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1265 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1267 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1270 assert( pRLeft != nullptr );
1271 node_scoped_lock lrl( m_Monitor, *pRLeft );
1272 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1273 return pNode; // retry
1275 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1277 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1279 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1280 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1281 int balance = hRR - hRLR;
1282 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1283 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1285 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1289 static void begin_change( node_type * pNode, version_type version )
1291 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1293 static void end_change( node_type * pNode, version_type version )
1295 // Clear shrinking and unlinked flags and increment version
1296 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1299 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1301 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1302 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1304 begin_change( pNode, nodeVersion );
1306 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1307 if ( pLRight != nullptr )
1308 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1310 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1311 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1313 if ( pParentLeft == pNode )
1314 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1316 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1317 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1319 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1321 // fix up heights links
1322 int hNode = 1 + std::max( hLR, hR );
1323 pNode->height( hNode, memory_model::memory_order_relaxed );
1324 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1326 end_change( pNode, nodeVersion );
1327 m_stat.onRotateRight();
1329 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1330 // parent.child). pNode is the deepest. Perform as many fixes as we can
1331 // with the locks we've got.
1333 // We've already fixed the height for pNode, but it might still be outside
1334 // our allowable balance range. In that case a simple fix_height_locked()
1336 int nodeBalance = hLR - hR;
1337 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1338 // we need another rotation at pNode
1342 // we've fixed balance and height damage for pNode, now handle
1343 // extra-routing node damage
1344 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1345 // we need to remove pNode and then repair
1349 // we've already fixed the height at pLeft, do we need a rotation here?
1350 int leftBalance = hLL - hNode;
1351 if ( leftBalance < -1 || leftBalance > 1 )
1354 // pLeft might also have routing node damage (if pLeft.left was null)
1355 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1358 // try to fix the parent height while we've still got the lock
1359 return fix_height_locked( pParent );
1362 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1364 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1365 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1367 begin_change( pNode, nodeVersion );
1369 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1370 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1371 if ( pRLeft != nullptr )
1372 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1374 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1375 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1377 if ( pParentLeft == pNode )
1378 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1380 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1381 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1383 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1386 int hNode = 1 + std::max( hL, hRL );
1387 pNode->height( hNode, memory_model::memory_order_relaxed );
1388 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1390 end_change( pNode, nodeVersion );
1391 m_stat.onRotateLeft();
1393 int nodeBalance = hRL - hL;
1394 if ( nodeBalance < -1 || nodeBalance > 1 )
1397 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1400 int rightBalance = hRR - hNode;
1401 if ( rightBalance < -1 || rightBalance > 1 )
1404 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1407 return fix_height_locked( pParent );
1410 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1412 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1413 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1415 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1416 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1417 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1418 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1420 begin_change( pNode, nodeVersion );
1421 begin_change( pLeft, leftVersion );
1423 // fix up pNode links, careful about the order!
1424 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1425 if ( pLRR != nullptr )
1426 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1428 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1429 if ( pLRL != nullptr )
1430 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1432 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1433 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1434 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1435 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1438 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1440 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1441 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1443 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1446 int hNode = 1 + std::max( hLRR, hR );
1447 pNode->height( hNode, memory_model::memory_order_relaxed );
1448 int hLeft = 1 + std::max( hLL, hLRL );
1449 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1450 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1452 end_change( pNode, nodeVersion );
1453 end_change( pLeft, leftVersion );
1454 m_stat.onRotateRightOverLeft();
1456 // caller should have performed only a single rotation if pLeft was going
1457 // to end up damaged
1458 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1459 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1461 // We have damaged pParent, pLR (now parent.child), and pNode (now
1462 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1463 // can with the locks we've got.
1465 // We've already fixed the height for pNode, but it might still be outside
1466 // our allowable balance range. In that case a simple fix_height_locked()
1468 int nodeBalance = hLRR - hR;
1469 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1470 // we need another rotation at pNode
1474 // pNode might also be damaged by being an unnecessary routing node
1475 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1476 // repair involves splicing out pNode and maybe more rotations
1480 // we've already fixed the height at pLRight, do we need a rotation here?
1481 int balanceLR = hLeft - hNode;
1482 if ( balanceLR < -1 || balanceLR > 1 )
1485 // try to fix the parent height while we've still got the lock
1486 return fix_height_locked( pParent );
1489 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1491 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1492 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1494 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1495 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1496 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1497 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1499 begin_change( pNode, nodeVersion );
1500 begin_change( pRight, rightVersion );
1502 // fix up pNode links, careful about the order!
1503 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1504 if ( pRLL != nullptr )
1505 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1507 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1508 if ( pRLR != nullptr )
1509 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1511 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1512 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1513 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1514 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1517 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1519 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1520 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1522 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1525 int hNode = 1 + std::max( hL, hRLL );
1526 pNode->height( hNode, memory_model::memory_order_relaxed );
1527 int hRight = 1 + std::max( hRLR, hRR );
1528 pRight->height( hRight, memory_model::memory_order_relaxed );
1529 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1531 end_change( pNode, nodeVersion );
1532 end_change( pRight, rightVersion );
1533 m_stat.onRotateLeftOverRight();
1535 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1537 int nodeBalance = hRLL - hL;
1538 if ( nodeBalance < -1 || nodeBalance > 1 )
1540 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1543 int balRL = hRight - hNode;
1544 if ( balRL < -1 || balRL > 1 )
1547 return fix_height_locked( pParent );
1552 }} // namespace cds::container
1554 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H