3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 # ifdef CDS_DOXYGEN_INVOKED
72 /// Returned pointer to \p mapped_type of extracted node
73 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
75 typedef cds::urcu::exempt_ptr< gc,
76 typename std::remove_pointer<mapped_type>::type,
77 typename std::remove_pointer<mapped_type>::type,
83 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
87 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
88 typedef typename node_type::version_type version_type;
90 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
91 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
93 enum class find_result
110 result_inserted = allow_insert,
111 result_updated = allow_update,
118 nothing_required = -3,
119 rebalance_required = -2,
128 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
133 template <typename K>
134 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
136 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
139 static void free_node( node_type * pNode )
141 // Free node without disposer
142 cxx_allocator().Delete( pNode );
145 static void free_value( mapped_type pVal )
150 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
152 return pNode->child( nDir ).load( order );
155 static node_type * parent( node_type * pNode, atomics::memory_order order )
157 return pNode->m_pParent.load( order );
163 node_type * m_pRetiredList; ///< head of retired node list
164 mapped_type m_pRetiredValue; ///< value retired
168 : m_pRetiredList( nullptr )
169 , m_pRetiredValue( nullptr )
177 void dispose( node_type * pNode )
179 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
180 pNode->m_pNextRemoved = m_pRetiredList;
181 m_pRetiredList = pNode;
184 void dispose_value( mapped_type pVal )
186 assert( m_pRetiredValue == nullptr );
187 m_pRetiredValue = pVal;
191 struct internal_disposer
193 void operator()( node_type * p ) const
201 assert( !gc::is_locked() );
203 // TODO: use RCU::batch_retire
206 for ( node_type * p = m_pRetiredList; p; ) {
207 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
208 // Value already disposed
209 gc::template retire_ptr<internal_disposer>( p );
214 if ( m_pRetiredValue )
215 gc::template retire_ptr<disposer>( m_pRetiredValue );
223 typename node_type::base_class m_Root;
225 item_counter m_ItemCounter;
226 mutable sync_monitor m_Monitor;
231 /// Creates empty map
233 : m_pRoot( static_cast<node_type *>( &m_Root ))
244 The \p key_type should be constructible from a value of type \p K.
246 RCU \p synchronize() can be called. RCU should not be locked.
248 Returns \p true if inserting successful, \p false otherwise.
250 template <typename K>
251 bool insert( K const& key, mapped_type pVal )
253 return do_update(key, key_comparator(),
254 [pVal]( node_type * pNode ) -> mapped_type
256 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
259 update_flags::allow_insert
260 ) == update_flags::result_inserted;
263 /// Updates the value for \p key
265 The operation performs inserting or updating the value for \p key with lock-free manner.
266 If \p bInsert is \p false, only updating of existing node is possible.
268 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
269 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
270 constructible from type \p K.
271 Otherwise, the value for \p key will be changed to \p pVal.
273 RCU \p synchronize() method can be called. RCU should not be locked.
275 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
276 \p second is \p true if new node has been added or \p false if the node with \p key
279 template <typename K, typename Func>
280 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
282 int result = do_update( key, key_comparator(),
283 [pVal]( node_type * ) -> mapped_type
287 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
289 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
292 /// Delete \p key from the map
294 RCU \p synchronize() method can be called. RCU should not be locked.
296 Return \p true if \p key is found and deleted, \p false otherwise
298 template <typename K>
299 bool erase( K const& key )
304 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
308 /// Deletes the item from the map using \p pred predicate for searching
310 The function is an analog of \p erase(K const&)
311 but \p pred is used for key comparing.
312 \p Less functor has the interface like \p std::less.
313 \p Less must imply the same element order as the comparator used for building the map.
315 template <typename K, typename Less>
316 bool erase_with( K const& key, Less pred )
321 cds::opt::details::make_comparator_from_less<Less>(),
322 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
326 /// Delete \p key from the map
328 The function searches an item with key \p key, calls \p f functor
329 and deletes the item. If \p key is not found, the functor is not called.
331 The functor \p Func interface:
334 void operator()(mapped_type& item) { ... }
338 RCU \p synchronize method can be called. RCU should not be locked.
340 Return \p true if key is found and deleted, \p false otherwise
342 template <typename K, typename Func>
343 bool erase( K const& key, Func f )
348 [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
351 disp.dispose_value(pVal);
357 /// Deletes the item from the map using \p pred predicate for searching
359 The function is an analog of \p erase(K const&, Func)
360 but \p pred is used for key comparing.
361 \p Less functor has the interface like \p std::less.
362 \p Less must imply the same element order as the comparator used for building the map.
364 template <typename K, typename Less, typename Func>
365 bool erase_with( K const& key, Less pred, Func f )
370 cds::opt::details::make_comparator_from_less<Less>(),
371 [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
374 disp.dispose_value(pVal);
380 /// Extracts a value with minimal key from the map
382 Returns \p exempt_ptr to the leftmost item.
383 If the tree is empty, returns empty \p exempt_ptr.
385 Note that the function returns only the value for minimal key.
386 To retrieve its key use \p extract_min( Func ) member function.
388 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
389 It means that the function gets leftmost leaf of the tree and tries to unlink it.
390 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
391 So, the function returns the item with minimum key at the moment of tree traversing.
393 RCU \p synchronize method can be called. RCU should NOT be locked.
394 The function does not free the item.
395 The deallocator will be implicitly invoked when the returned object is destroyed or when
396 its \p release() member function is called.
398 exempt_ptr extract_min()
400 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
403 /// Extracts minimal key key and corresponding value
405 Returns \p exempt_ptr to the leftmost item.
406 If the tree is empty, returns empty \p exempt_ptr.
408 \p Func functor is used to store minimal key.
409 \p Func has the following signature:
412 void operator()( key_type const& key );
415 If the tree is empty, \p f is not called.
416 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
419 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
420 It means that the function gets leftmost leaf of the tree and tries to unlink it.
421 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
422 So, the function returns the item with minimum key at the moment of tree traversing.
424 RCU \p synchronize method can be called. RCU should NOT be locked.
425 The function does not free the item.
426 The deallocator will be implicitly invoked when the returned object is destroyed or when
427 its \p release() member function is called.
429 template <typename Func>
430 exempt_ptr extract_min( Func f )
432 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
435 /// Extracts minimal key key and corresponding value
437 This function is a shortcut for the following call:
440 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
442 \p key_type should be copy-assignable. The copy of minimal key
443 is returned in \p min_key argument.
445 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
446 extract_min_key( key_type& min_key )
448 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
451 /// Extracts a value with maximal key from the tree
453 Returns \p exempt_ptr pointer to the rightmost item.
454 If the set is empty, returns empty \p exempt_ptr.
456 Note that the function returns only the value for maximal key.
457 To retrieve its key use \p extract_max( Func ) member function.
459 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
460 It means that the function gets rightmost leaf of the tree and tries to unlink it.
461 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
462 So, the function returns the item with maximum key at the moment of tree traversing.
464 RCU \p synchronize method can be called. RCU should NOT be locked.
465 The function does not free the item.
466 The deallocator will be implicitly invoked when the returned object is destroyed or when
467 its \p release() is called.
469 exempt_ptr extract_max()
471 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
474 /// Extracts the maximal key and corresponding value
476 Returns \p exempt_ptr pointer to the rightmost item.
477 If the set is empty, returns empty \p exempt_ptr.
479 \p Func functor is used to store maximal key.
480 \p Func has the following signature:
483 void operator()( key_type const& key );
486 If the tree is empty, \p f is not called.
487 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
490 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
491 It means that the function gets rightmost leaf of the tree and tries to unlink it.
492 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
493 So, the function returns the item with maximum key at the moment of tree traversing.
495 RCU \p synchronize method can be called. RCU should NOT be locked.
496 The function does not free the item.
497 The deallocator will be implicitly invoked when the returned object is destroyed or when
498 its \p release() is called.
500 template <typename Func>
501 exempt_ptr extract_max( Func f )
503 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
506 /// Extracts the maximal key and corresponding value
508 This function is a shortcut for the following call:
511 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
513 \p key_type should be copy-assignable. The copy of maximal key
514 is returned in \p max_key argument.
516 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
517 extract_max_key( key_type& max_key )
519 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
522 /// Extracts an item from the map
524 The function searches an item with key equal to \p key in the tree,
525 unlinks it, and returns \p exempt_ptr pointer to a value found.
526 If \p key is not found the function returns an empty \p exempt_ptr.
528 RCU \p synchronize method can be called. RCU should NOT be locked.
529 The function does not destroy the value found.
530 The disposer will be implicitly invoked when the returned object is destroyed or when
531 its \p release() member function is called.
533 template <typename Q>
534 exempt_ptr extract( Q const& key )
536 return exempt_ptr(do_extract( key ));
540 /// Extracts an item from the map using \p pred for searching
542 The function is an analog of \p extract(Q const&)
543 but \p pred is used for key compare.
544 \p Less has the interface like \p std::less.
545 \p pred must imply the same element order as the comparator used for building the tree.
547 template <typename Q, typename Less>
548 exempt_ptr extract_with( Q const& key, Less pred )
550 return exempt_ptr(do_extract_with( key, pred ));
553 /// Find the key \p key
555 The function searches the item with key equal to \p key and calls the functor \p f for item found.
556 The interface of \p Func functor is:
559 void operator()( key_type const& key, mapped_type& item );
562 where \p item is the item found.
563 The functor is called under node-level lock.
565 The function applies RCU lock internally.
567 The function returns \p true if \p key is found, \p false otherwise.
569 template <typename K, typename Func>
570 bool find( K const& key, Func f )
572 return do_find( key, key_comparator(),
573 [&f]( node_type * pNode ) -> bool {
574 assert( pNode != nullptr );
575 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
577 f( pNode->m_key, *pVal );
585 /// Finds the key \p val using \p pred predicate for searching
587 The function is an analog of \p find(K const&, Func)
588 but \p pred is used for key comparing.
589 \p Less functor has the interface like \p std::less.
590 \p Less must imply the same element order as the comparator used for building the map.
592 template <typename K, typename Less, typename Func>
593 bool find_with( K const& key, Less pred, Func f )
596 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
597 [&f]( node_type * pNode ) -> bool {
598 assert( pNode != nullptr );
599 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
601 f( pNode->m_key, *pVal );
609 /// Find the key \p key
611 The function searches the item with key equal to \p key
612 and returns \p true if it is found, and \p false otherwise.
614 The function applies RCU lock internally.
616 template <typename K>
617 bool find( K const& key )
619 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
622 /// Finds the key \p val using \p pred predicate for searching
624 The function is an analog of \p find(K const&)
625 but \p pred is used for key comparing.
626 \p Less functor has the interface like \p std::less.
627 \p Less must imply the same element order as the comparator used for building the map.
629 template <typename K, typename Less>
630 bool find_with( K const& key, Less pred )
633 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
636 /// Clears the tree (thread safe, not atomic)
638 The function unlink all items from the tree.
639 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
643 assert( set.empty() );
645 the assertion could be raised.
647 For each node the \ref disposer will be called after unlinking.
649 RCU \p synchronize method can be called. RCU should not be locked.
653 while ( extract_min() );
656 /// Clears the tree (not thread safe)
658 This function is not thread safe and may be called only when no other thread deals with the tree.
659 The function is used in the tree destructor.
663 clear(); // temp solution
667 /// Checks if the map is empty
670 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
673 /// Returns item count in the map
675 Only leaf nodes containing user data are counted.
677 The value returned depends on item counter type provided by \p Traits template parameter.
678 If it is \p atomicity::empty_item_counter this function always returns 0.
680 The function is not suitable for checking the tree emptiness, use \p empty()
681 member function for this purpose.
685 return m_ItemCounter;
688 /// Returns const reference to internal statistics
689 stat const& statistics() const
694 /// Checks internal consistency (not atomic, not thread-safe)
696 The debugging function to check internal consistency of the tree.
698 bool check_consistency() const
700 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
703 /// Checks internal consistency (not atomic, not thread-safe)
705 The debugging function to check internal consistency of the tree.
706 The functor \p Func is called if a violation of internal tree structure
710 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
714 - \p nLevel - the level where the violation is found
715 - \p hLeft - the height of left subtree
716 - \p hRight - the height of right subtree
718 The functor is called for each violation found.
720 template <typename Func>
721 bool check_consistency( Func f ) const
723 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
726 do_check_consistency( pChild, 1, f, nErrors );
734 template <typename Func>
735 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
738 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
739 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
741 if ( hLeft >= hRight ) {
742 if ( hLeft - hRight > 1 ) {
743 f( nLevel, hLeft, hRight );
749 if ( hRight - hLeft > 1 ) {
750 f( nLevel, hLeft, hRight );
759 template <typename Q, typename Compare, typename Func>
760 bool do_find( Q& key, Compare cmp, Func f ) const
765 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
767 assert( result != find_result::retry );
768 return result == find_result::found;
771 template <typename K, typename Compare, typename Func>
772 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
774 check_deadlock_policy::check();
776 rcu_disposer removed_list;
779 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
783 template <typename K, typename Compare, typename Func>
784 bool do_remove( K const& key, Compare cmp, Func func )
786 // Func must return true if the value was disposed
787 // or false if the value was extracted
789 check_deadlock_policy::check();
791 rcu_disposer removed_list;
794 return try_remove_root( key, cmp, func, removed_list );
798 template <typename Func>
799 mapped_type do_extract_min( Func f )
801 mapped_type pExtracted = nullptr;
804 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
809 template <typename Func>
810 mapped_type do_extract_max( Func f )
812 mapped_type pExtracted = nullptr;
815 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
820 template <typename Func>
821 void do_extract_minmax( int nDir, Func func )
823 check_deadlock_policy::check();
825 rcu_disposer removed_list;
829 int result = update_flags::failed;
831 // get right child of root
832 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
834 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
835 if ( nChildVersion & node_type::shrinking ) {
836 m_stat.onRemoveRootWaitShrinking();
837 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
838 result = update_flags::retry;
840 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
841 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
844 } while ( result == update_flags::retry );
848 template <typename Q>
849 mapped_type do_extract( Q const& key )
851 mapped_type pExtracted = nullptr;
855 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
860 template <typename Q, typename Less>
861 mapped_type do_extract_with( Q const& key, Less pred )
864 mapped_type pExtracted = nullptr;
867 cds::opt::details::make_comparator_from_less<Less>(),
868 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
877 template <typename Q, typename Compare, typename Func>
878 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
880 assert( gc::is_locked() );
884 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
886 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
887 m_stat.onFindRetry();
888 return find_result::retry;
891 m_stat.onFindFailed();
892 return find_result::not_found;
895 int nCmp = cmp( key, pChild->m_key );
897 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
899 node_scoped_lock l( m_Monitor, *pChild );
900 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
902 m_stat.onFindSuccess();
903 return find_result::found;
908 m_stat.onFindFailed();
909 return find_result::not_found;
912 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
913 if ( nChildVersion & node_type::shrinking ) {
914 m_stat.onFindWaitShrinking();
915 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
917 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
918 m_stat.onFindRetry();
919 return find_result::retry;
922 else if ( nChildVersion != node_type::unlinked ) {
924 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
925 m_stat.onFindRetry();
926 return find_result::retry;
929 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
930 if ( found != find_result::retry )
936 template <typename K, typename Compare, typename Func>
937 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
939 assert( gc::is_locked() );
943 // get right child of root
944 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
946 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
947 if ( nChildVersion & node_type::shrinking ) {
948 m_stat.onUpdateRootWaitShrinking();
949 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
950 result = update_flags::retry;
952 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
953 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
958 if ( nFlags & update_flags::allow_insert ) {
959 // insert into tree as right child of the root
961 node_scoped_lock l( m_Monitor, *m_pRoot );
962 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
963 result = result == update_flags::retry;
967 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
968 mapped_type pVal = funcUpdate( pNew );
969 assert( pVal != nullptr );
970 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
972 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
973 m_pRoot->height( 2, memory_model::memory_order_relaxed );
977 m_stat.onInsertSuccess();
978 return update_flags::result_inserted;
981 return update_flags::failed;
983 } while ( result == update_flags::retry );
987 template <typename K, typename Compare, typename Func>
988 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
990 assert( gc::is_locked() );
994 // get right child of root
995 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
997 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
998 if ( nChildVersion & node_type::shrinking ) {
999 m_stat.onRemoveRootWaitShrinking();
1000 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1001 result = update_flags::retry;
1003 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1004 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1009 } while ( result == update_flags::retry );
1011 return result == update_flags::result_removed;
1014 template <typename K, typename Compare, typename Func>
1015 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1017 assert( gc::is_locked() );
1018 assert( nVersion != node_type::unlinked );
1019 CDS_UNUSED( pParent );
1021 int nCmp = cmp( key, pNode->m_key );
1023 if ( nFlags & update_flags::allow_update ) {
1024 return try_update_node( funcUpdate, pNode, disp );
1026 return update_flags::failed;
1031 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1032 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1033 m_stat.onUpdateRetry();
1034 return update_flags::retry;
1037 if ( pChild == nullptr ) {
1039 if ( nFlags & update_flags::allow_insert )
1040 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1042 result = update_flags::failed;
1046 result = update_flags::retry;
1047 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1048 if ( nChildVersion & node_type::shrinking ) {
1049 m_stat.onUpdateWaitShrinking();
1050 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1053 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1054 // this second read is important, because it is protected by nChildVersion
1056 // validate the read that our caller took to get to node
1057 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1058 m_stat.onUpdateRetry();
1059 return update_flags::retry;
1062 // At this point we know that the traversal our parent took to get to node is still valid.
1063 // The recursive implementation will validate the traversal from node to
1064 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1065 // This means that we are no longer vulnerable to node shrinks, and we don't need
1066 // to validate node version any more.
1067 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1070 } while ( result == update_flags::retry );
1074 template <typename K, typename Compare, typename Func>
1075 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1077 assert( gc::is_locked() );
1078 assert( nVersion != node_type::unlinked );
1080 int nCmp = cmp( key, pNode->m_key );
1082 return try_remove_node( pParent, pNode, nVersion, func, disp );
1086 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1087 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1088 m_stat.onRemoveRetry();
1089 return update_flags::retry;
1092 if ( pChild == nullptr ) {
1093 return update_flags::failed;
1097 result = update_flags::retry;
1098 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1099 if ( nChildVersion & node_type::shrinking ) {
1100 m_stat.onRemoveWaitShrinking();
1101 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1104 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1105 // this second read is important, because it is protected by nChildVersion
1107 // validate the read that our caller took to get to node
1108 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1109 m_stat.onRemoveRetry();
1110 return update_flags::retry;
1113 // At this point we know that the traversal our parent took to get to node is still valid.
1114 // The recursive implementation will validate the traversal from node to
1115 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1116 // This means that we are no longer vulnerable to node shrinks, and we don't need
1117 // to validate node version any more.
1118 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1121 } while ( result == update_flags::retry );
1125 template <typename Func>
1126 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1128 assert( gc::is_locked() );
1129 assert( nVersion != node_type::unlinked );
1133 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1134 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1135 m_stat.onRemoveRetry();
1136 return update_flags::retry;
1139 if ( pChild == nullptr ) {
1141 return try_remove_node( pParent, pNode, nVersion, func, disp );
1144 result = update_flags::retry;
1145 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1146 if ( nChildVersion & node_type::shrinking ) {
1147 m_stat.onRemoveWaitShrinking();
1148 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1151 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1152 // this second read is important, because it is protected by nChildVersion
1154 // validate the read that our caller took to get to node
1155 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1156 m_stat.onRemoveRetry();
1157 return update_flags::retry;
1160 // At this point we know that the traversal our parent took to get to node is still valid.
1161 // The recursive implementation will validate the traversal from node to
1162 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1163 // This means that we are no longer vulnerable to node shrinks, and we don't need
1164 // to validate node version any more.
1165 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1168 } while ( result == update_flags::retry );
1172 template <typename K, typename Func>
1173 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1177 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1178 mapped_type pVal = funcUpdate( pNew );
1179 assert( pVal != nullptr );
1180 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1183 if ( c_bRelaxedInsert ) {
1184 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1185 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1187 m_stat.onInsertRetry();
1188 return update_flags::retry;
1191 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1194 node_type * pDamaged;
1196 assert( pNode != nullptr );
1197 node_scoped_lock l( m_Monitor, *pNode );
1199 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1200 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1202 if ( c_bRelaxedInsert ) {
1204 m_stat.onRelaxedInsertFailed();
1207 m_stat.onInsertRetry();
1208 return update_flags::retry;
1211 if ( !c_bRelaxedInsert )
1212 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1214 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1215 pDamaged = fix_height_locked( pNode );
1219 m_stat.onInsertSuccess();
1221 fix_height_and_rebalance( pDamaged, disp );
1223 return update_flags::result_inserted;
1226 template <typename Func>
1227 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1230 assert( pNode != nullptr );
1232 node_scoped_lock l( m_Monitor, *pNode );
1234 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1235 m_stat.onUpdateUnlinked();
1236 return update_flags::retry;
1239 pOld = pNode->value( memory_model::memory_order_relaxed );
1240 mapped_type pVal = funcUpdate( pNode );
1244 assert( pVal != nullptr );
1245 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1250 disp.dispose_value(pOld);
1251 m_stat.onDisposeValue();
1254 m_stat.onUpdateSuccess();
1255 return update_flags::result_updated;
1258 template <typename Func>
1259 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1261 assert( pParent != nullptr );
1262 assert( pNode != nullptr );
1264 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1265 return update_flags::failed;
1267 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1268 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1270 node_type * pDamaged;
1273 node_scoped_lock lp( m_Monitor, *pParent );
1274 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1275 return update_flags::retry;
1278 node_scoped_lock ln( m_Monitor, *pNode );
1279 pOld = pNode->value( memory_model::memory_order_relaxed );
1280 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1282 && try_unlink_locked( pParent, pNode, disp )))
1284 return update_flags::retry;
1287 pDamaged = fix_height_locked( pParent );
1291 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1292 m_stat.onDisposeValue();
1294 m_stat.onExtractValue();
1296 fix_height_and_rebalance( pDamaged, disp );
1297 return update_flags::result_removed;
1300 int result = update_flags::retry;
1303 node_scoped_lock ln( m_Monitor, *pNode );
1304 pOld = pNode->value( atomics::memory_order_relaxed );
1305 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1306 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1307 result = update_flags::result_removed;
1311 if ( result == update_flags::result_removed ) {
1313 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1314 m_stat.onDisposeValue();
1316 m_stat.onExtractValue();
1323 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1325 // pParent and pNode must be locked
1326 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1328 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1329 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1330 if ( pNode != pParentLeft && pNode != pParentRight ) {
1331 // node is no longer a child of parent
1335 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1336 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1338 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1339 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1340 if ( pLeft != nullptr && pRight != nullptr ) {
1341 // splicing is no longer possible
1344 node_type * pSplice = pLeft ? pLeft : pRight;
1346 if ( pParentLeft == pNode )
1347 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1349 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1352 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1354 // Mark the node as unlinked
1355 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1357 // The value will be disposed by calling function
1358 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1360 disp.dispose( pNode );
1361 m_stat.onDisposeNode();
1368 private: // rotations
1370 int estimate_node_condition( node_type * pNode )
1372 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1373 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1375 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1376 return unlink_required;
1378 int h = pNode->height( memory_model::memory_order_relaxed );
1379 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1380 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1382 int hNew = 1 + std::max( hL, hR );
1383 int nBalance = hL - hR;
1385 if ( nBalance < -1 || nBalance > 1 )
1386 return rebalance_required;
1388 return h != hNew ? hNew : nothing_required;
1391 node_type * fix_height( node_type * pNode )
1393 assert( pNode != nullptr );
1394 node_scoped_lock l( m_Monitor, *pNode );
1395 return fix_height_locked( pNode );
1398 node_type * fix_height_locked( node_type * pNode )
1400 // pNode must be locked!!!
1401 int h = estimate_node_condition( pNode );
1403 case rebalance_required:
1404 case unlink_required:
1406 case nothing_required:
1409 pNode->height( h, memory_model::memory_order_relaxed );
1410 return parent( pNode, memory_model::memory_order_relaxed );
1414 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1416 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1417 int nCond = estimate_node_condition( pNode );
1418 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1421 if ( nCond != unlink_required && nCond != rebalance_required )
1422 pNode = fix_height( pNode );
1424 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1425 assert( pParent != nullptr );
1427 node_scoped_lock lp( m_Monitor, *pParent );
1428 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1429 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1431 node_scoped_lock ln( m_Monitor, *pNode );
1432 pNode = rebalance_locked( pParent, pNode, disp );
1439 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1441 // pParent and pNode should be locked.
1442 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1443 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1444 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1445 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1447 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1448 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1450 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1451 if ( try_unlink_locked( pParent, pNode, disp ))
1452 return fix_height_locked( pParent );
1454 // retry needed for pNode
1459 int h = pNode->height( memory_model::memory_order_relaxed );
1460 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1461 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1462 int hNew = 1 + std::max( hL, hR );
1463 int balance = hL - hR;
1466 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1467 else if ( balance < -1 )
1468 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1469 else if ( hNew != h ) {
1470 pNode->height( hNew, memory_model::memory_order_relaxed );
1472 // pParent is already locked
1473 return fix_height_locked( pParent );
1479 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1481 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1482 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1483 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1485 // pParent and pNode is locked yet
1486 // pNode->pLeft is too large, we will rotate-right.
1487 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1490 assert( pLeft != nullptr );
1491 node_scoped_lock l( m_Monitor, *pLeft );
1492 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1493 return pNode; // retry for pNode
1495 int hL = pLeft->height( memory_model::memory_order_relaxed );
1497 return pNode; // retry
1499 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1500 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1501 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1502 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1506 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1509 assert( pLRight != nullptr );
1511 node_scoped_lock lr( m_Monitor, *pLRight );
1512 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1513 return pNode; // retry
1515 hLR = pLRight->height( memory_model::memory_order_relaxed );
1517 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1519 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1520 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1521 int balance = hLL - hLRL;
1522 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1523 // nParent.child.left won't be damaged after a double rotation
1524 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1528 // focus on pLeft, if necessary pNode will be balanced later
1529 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1534 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1536 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1537 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1538 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1540 // pParent and pNode is locked yet
1542 assert( pRight != nullptr );
1543 node_scoped_lock l( m_Monitor, *pRight );
1544 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1545 return pNode; // retry for pNode
1547 int hR = pRight->height( memory_model::memory_order_relaxed );
1548 if ( hL - hR >= -1 )
1549 return pNode; // retry
1551 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1552 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1553 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1554 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1556 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1559 assert( pRLeft != nullptr );
1560 node_scoped_lock lrl( m_Monitor, *pRLeft );
1561 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1562 return pNode; // retry
1564 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1566 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1568 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1569 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1570 int balance = hRR - hRLR;
1571 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1572 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1574 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1578 static void begin_change( node_type * pNode, version_type version )
1580 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1582 static void end_change( node_type * pNode, version_type version )
1584 // Clear shrinking and unlinked flags and increment version
1585 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1588 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1590 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1591 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1593 begin_change( pNode, nodeVersion );
1595 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1596 if ( pLRight != nullptr )
1597 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1599 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1600 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1602 if ( pParentLeft == pNode )
1603 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1605 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1606 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1608 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1610 // fix up heights links
1611 int hNode = 1 + std::max( hLR, hR );
1612 pNode->height( hNode, memory_model::memory_order_relaxed );
1613 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1615 end_change( pNode, nodeVersion );
1616 m_stat.onRotateRight();
1618 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1619 // parent.child). pNode is the deepest. Perform as many fixes as we can
1620 // with the locks we've got.
1622 // We've already fixed the height for pNode, but it might still be outside
1623 // our allowable balance range. In that case a simple fix_height_locked()
1625 int nodeBalance = hLR - hR;
1626 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1627 // we need another rotation at pNode
1631 // we've fixed balance and height damage for pNode, now handle
1632 // extra-routing node damage
1633 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1634 // we need to remove pNode and then repair
1638 // we've already fixed the height at pLeft, do we need a rotation here?
1639 int leftBalance = hLL - hNode;
1640 if ( leftBalance < -1 || leftBalance > 1 )
1643 // pLeft might also have routing node damage (if pLeft.left was null)
1644 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1647 // try to fix the parent height while we've still got the lock
1648 return fix_height_locked( pParent );
1651 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1653 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1654 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1656 begin_change( pNode, nodeVersion );
1658 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1659 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1660 if ( pRLeft != nullptr )
1661 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1663 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1664 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1666 if ( pParentLeft == pNode )
1667 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1669 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1670 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1672 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1675 int hNode = 1 + std::max( hL, hRL );
1676 pNode->height( hNode, memory_model::memory_order_relaxed );
1677 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1679 end_change( pNode, nodeVersion );
1680 m_stat.onRotateLeft();
1682 int nodeBalance = hRL - hL;
1683 if ( nodeBalance < -1 || nodeBalance > 1 )
1686 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1689 int rightBalance = hRR - hNode;
1690 if ( rightBalance < -1 || rightBalance > 1 )
1693 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1696 return fix_height_locked( pParent );
1699 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1701 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1702 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1704 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1705 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1706 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1707 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1709 begin_change( pNode, nodeVersion );
1710 begin_change( pLeft, leftVersion );
1712 // fix up pNode links, careful about the order!
1713 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1714 if ( pLRR != nullptr )
1715 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1717 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1718 if ( pLRL != nullptr )
1719 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1721 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1722 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1723 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1724 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1727 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1729 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1730 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1732 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1735 int hNode = 1 + std::max( hLRR, hR );
1736 pNode->height( hNode, memory_model::memory_order_relaxed );
1737 int hLeft = 1 + std::max( hLL, hLRL );
1738 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1739 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1741 end_change( pNode, nodeVersion );
1742 end_change( pLeft, leftVersion );
1743 m_stat.onRotateRightOverLeft();
1745 // caller should have performed only a single rotation if pLeft was going
1746 // to end up damaged
1747 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1748 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1750 // We have damaged pParent, pLR (now parent.child), and pNode (now
1751 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1752 // can with the locks we've got.
1754 // We've already fixed the height for pNode, but it might still be outside
1755 // our allowable balance range. In that case a simple fix_height_locked()
1757 int nodeBalance = hLRR - hR;
1758 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1759 // we need another rotation at pNode
1763 // pNode might also be damaged by being an unnecessary routing node
1764 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1765 // repair involves splicing out pNode and maybe more rotations
1769 // we've already fixed the height at pLRight, do we need a rotation here?
1770 int balanceLR = hLeft - hNode;
1771 if ( balanceLR < -1 || balanceLR > 1 )
1774 // try to fix the parent height while we've still got the lock
1775 return fix_height_locked( pParent );
1778 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1780 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1781 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1783 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1784 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1785 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1786 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1788 begin_change( pNode, nodeVersion );
1789 begin_change( pRight, rightVersion );
1791 // fix up pNode links, careful about the order!
1792 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1793 if ( pRLL != nullptr )
1794 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1796 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1797 if ( pRLR != nullptr )
1798 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1800 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1801 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1802 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1803 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1806 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1808 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1809 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1811 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1814 int hNode = 1 + std::max( hL, hRLL );
1815 pNode->height( hNode, memory_model::memory_order_relaxed );
1816 int hRight = 1 + std::max( hRLR, hRR );
1817 pRight->height( hRight, memory_model::memory_order_relaxed );
1818 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1820 end_change( pNode, nodeVersion );
1821 end_change( pRight, rightVersion );
1822 m_stat.onRotateLeftOverRight();
1824 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1826 int nodeBalance = hRLL - hL;
1827 if ( nodeBalance < -1 || nodeBalance > 1 )
1829 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1832 int balRL = hRight - hNode;
1833 if ( balRL < -1 || balRL > 1 )
1836 return fix_height_locked( pParent );
1841 }} // namespace cds::container
1843 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H