3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 cxx_allocator().Delete( pNode );
149 static void free_value( mapped_type pVal )
154 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
156 return pNode->child( nDir ).load( order );
159 static node_type * parent( node_type * pNode, atomics::memory_order order )
161 return pNode->m_pParent.load( order );
167 node_type * m_pRetiredList; ///< head of retired node list
168 mapped_type m_pRetiredValue; ///< value retired
172 : m_pRetiredList( nullptr )
173 , m_pRetiredValue( nullptr )
181 void dispose( node_type * pNode )
183 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
184 pNode->m_pNextRemoved = m_pRetiredList;
185 m_pRetiredList = pNode;
188 void dispose_value( mapped_type pVal )
190 assert( m_pRetiredValue == nullptr );
191 m_pRetiredValue = pVal;
195 struct internal_disposer
197 void operator()( node_type * p ) const
205 assert( !gc::is_locked() );
207 // TODO: use RCU::batch_retire
210 for ( node_type * p = m_pRetiredList; p; ) {
211 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
212 // Value already disposed
213 gc::template retire_ptr<internal_disposer>( p );
218 if ( m_pRetiredValue )
219 gc::template retire_ptr<disposer>( m_pRetiredValue );
227 typename node_type::base_class m_Root;
229 item_counter m_ItemCounter;
230 mutable sync_monitor m_Monitor;
235 /// Creates empty map
237 : m_pRoot( static_cast<node_type *>( &m_Root ))
248 The \p key_type should be constructible from a value of type \p K.
250 RCU \p synchronize() can be called. RCU should not be locked.
252 Returns \p true if inserting successful, \p false otherwise.
254 template <typename K>
255 bool insert( K const& key, mapped_type pVal )
257 return do_update(key, key_comparator(),
258 [pVal]( node_type * pNode ) -> mapped_type
260 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
264 update_flags::allow_insert
265 ) == update_flags::result_inserted;
268 /// Updates the value for \p key
270 The operation performs inserting or updating the value for \p key with lock-free manner.
271 If \p bInsert is \p false, only updating of existing node is possible.
273 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
274 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
275 constructible from type \p K.
276 Otherwise, the value for \p key will be changed to \p pVal.
278 RCU \p synchronize() method can be called. RCU should not be locked.
280 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
281 \p second is \p true if new node has been added or \p false if the node with \p key
284 template <typename K>
285 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
287 int result = do_update( key, key_comparator(),
288 [pVal]( node_type * ) -> mapped_type
292 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
294 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
298 template <typename K>
299 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
301 return update( key, pVal, true );
306 /// Delete \p key from the map
308 RCU \p synchronize() method can be called. RCU should not be locked.
310 Return \p true if \p key is found and deleted, \p false otherwise
312 template <typename K>
313 bool erase( K const& key )
318 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
322 /// Deletes the item from the map using \p pred predicate for searching
324 The function is an analog of \p erase(K const&)
325 but \p pred is used for key comparing.
326 \p Less functor has the interface like \p std::less.
327 \p Less must imply the same element order as the comparator used for building the map.
329 template <typename K, typename Less>
330 bool erase_with( K const& key, Less pred )
335 cds::opt::details::make_comparator_from_less<Less>(),
336 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
340 /// Delete \p key from the map
342 The function searches an item with key \p key, calls \p f functor
343 and deletes the item. If \p key is not found, the functor is not called.
345 The functor \p Func interface:
348 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
352 RCU \p synchronize method can be called. RCU should not be locked.
354 Return \p true if key is found and deleted, \p false otherwise
356 template <typename K, typename Func>
357 bool erase( K const& key, Func f )
362 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
365 disp.dispose_value(pVal);
371 /// Deletes the item from the map using \p pred predicate for searching
373 The function is an analog of \p erase(K const&, Func)
374 but \p pred is used for key comparing.
375 \p Less functor has the interface like \p std::less.
376 \p Less must imply the same element order as the comparator used for building the map.
378 template <typename K, typename Less, typename Func>
379 bool erase_with( K const& key, Less pred, Func f )
384 cds::opt::details::make_comparator_from_less<Less>(),
385 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
388 disp.dispose_value(pVal);
394 /// Extracts a value with minimal key from the map
396 Returns \p exempt_ptr to the leftmost item.
397 If the tree is empty, returns empty \p exempt_ptr.
399 Note that the function returns only the value for minimal key.
400 To retrieve its key use \p extract_min( Func ) member function.
402 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
403 It means that the function gets leftmost leaf of the tree and tries to unlink it.
404 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
405 So, the function returns the item with minimum key at the moment of tree traversing.
407 RCU \p synchronize method can be called. RCU should NOT be locked.
408 The function does not free the item.
409 The deallocator will be implicitly invoked when the returned object is destroyed or when
410 its \p release() member function is called.
412 exempt_ptr extract_min()
414 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
417 /// Extracts minimal key key and corresponding value
419 Returns \p exempt_ptr to the leftmost item.
420 If the tree is empty, returns empty \p exempt_ptr.
422 \p Func functor is used to store minimal key.
423 \p Func has the following signature:
426 void operator()( key_type const& key );
429 If the tree is empty, \p f is not called.
430 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
433 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
434 It means that the function gets leftmost leaf of the tree and tries to unlink it.
435 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
436 So, the function returns the item with minimum key at the moment of tree traversing.
438 RCU \p synchronize method can be called. RCU should NOT be locked.
439 The function does not free the item.
440 The deallocator will be implicitly invoked when the returned object is destroyed or when
441 its \p release() member function is called.
443 template <typename Func>
444 exempt_ptr extract_min( Func f )
446 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
449 /// Extracts minimal key key and corresponding value
451 This function is a shortcut for the following call:
454 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
456 \p key_type should be copy-assignable. The copy of minimal key
457 is returned in \p min_key argument.
459 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
460 extract_min_key( key_type& min_key )
462 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
465 /// Extracts a value with maximal key from the tree
467 Returns \p exempt_ptr pointer to the rightmost item.
468 If the set is empty, returns empty \p exempt_ptr.
470 Note that the function returns only the value for maximal key.
471 To retrieve its key use \p extract_max( Func ) member function.
473 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
474 It means that the function gets rightmost leaf of the tree and tries to unlink it.
475 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
476 So, the function returns the item with maximum key at the moment of tree traversing.
478 RCU \p synchronize method can be called. RCU should NOT be locked.
479 The function does not free the item.
480 The deallocator will be implicitly invoked when the returned object is destroyed or when
481 its \p release() is called.
483 exempt_ptr extract_max()
485 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
488 /// Extracts the maximal key and corresponding value
490 Returns \p exempt_ptr pointer to the rightmost item.
491 If the set is empty, returns empty \p exempt_ptr.
493 \p Func functor is used to store maximal key.
494 \p Func has the following signature:
497 void operator()( key_type const& key );
500 If the tree is empty, \p f is not called.
501 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
504 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
505 It means that the function gets rightmost leaf of the tree and tries to unlink it.
506 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
507 So, the function returns the item with maximum key at the moment of tree traversing.
509 RCU \p synchronize method can be called. RCU should NOT be locked.
510 The function does not free the item.
511 The deallocator will be implicitly invoked when the returned object is destroyed or when
512 its \p release() is called.
514 template <typename Func>
515 exempt_ptr extract_max( Func f )
517 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
520 /// Extracts the maximal key and corresponding value
522 This function is a shortcut for the following call:
525 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
527 \p key_type should be copy-assignable. The copy of maximal key
528 is returned in \p max_key argument.
530 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
531 extract_max_key( key_type& max_key )
533 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
536 /// Extracts an item from the map
538 The function searches an item with key equal to \p key in the tree,
539 unlinks it, and returns \p exempt_ptr pointer to a value found.
540 If \p key is not found the function returns an empty \p exempt_ptr.
542 RCU \p synchronize method can be called. RCU should NOT be locked.
543 The function does not destroy the value found.
544 The disposer will be implicitly invoked when the returned object is destroyed or when
545 its \p release() member function is called.
547 template <typename Q>
548 exempt_ptr extract( Q const& key )
550 return exempt_ptr(do_extract( key ));
554 /// Extracts an item from the map using \p pred for searching
556 The function is an analog of \p extract(Q const&)
557 but \p pred is used for key compare.
558 \p Less has the interface like \p std::less.
559 \p pred must imply the same element order as the comparator used for building the tree.
561 template <typename Q, typename Less>
562 exempt_ptr extract_with( Q const& key, Less pred )
564 return exempt_ptr(do_extract_with( key, pred ));
567 /// Find the key \p key
569 The function searches the item with key equal to \p key and calls the functor \p f for item found.
570 The interface of \p Func functor is:
573 void operator()( key_type const& key, mapped_type& item );
576 where \p item is the item found.
577 The functor is called under node-level lock.
579 The function applies RCU lock internally.
581 The function returns \p true if \p key is found, \p false otherwise.
583 template <typename K, typename Func>
584 bool find( K const& key, Func f )
586 return do_find( key, key_comparator(),
587 [&f]( node_type * pNode ) -> bool {
588 assert( pNode != nullptr );
589 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
591 f( pNode->m_key, *pVal );
599 /// Finds the key \p val using \p pred predicate for searching
601 The function is an analog of \p find(K const&, Func)
602 but \p pred is used for key comparing.
603 \p Less functor has the interface like \p std::less.
604 \p Less must imply the same element order as the comparator used for building the map.
606 template <typename K, typename Less, typename Func>
607 bool find_with( K const& key, Less pred, Func f )
610 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
611 [&f]( node_type * pNode ) -> bool {
612 assert( pNode != nullptr );
613 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
615 f( pNode->m_key, *pVal );
623 /// Find the key \p key
625 The function searches the item with key equal to \p key
626 and returns \p true if it is found, and \p false otherwise.
628 The function applies RCU lock internally.
630 template <typename K>
631 bool find( K const& key )
633 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
636 /// Finds the key \p val using \p pred predicate for searching
638 The function is an analog of \p find(K const&)
639 but \p pred is used for key comparing.
640 \p Less functor has the interface like \p std::less.
641 \p Less must imply the same element order as the comparator used for building the map.
643 template <typename K, typename Less>
644 bool find_with( K const& key, Less pred )
647 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
650 /// Clears the tree (thread safe, not atomic)
652 The function unlink all items from the tree.
653 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
657 assert( set.empty() );
659 the assertion could be raised.
661 For each node the \ref disposer will be called after unlinking.
663 RCU \p synchronize method can be called. RCU should not be locked.
667 while ( extract_min() );
670 /// Clears the tree (not thread safe)
672 This function is not thread safe and may be called only when no other thread deals with the tree.
673 The function is used in the tree destructor.
677 clear(); // temp solution
681 /// Checks if the map is empty
684 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
687 /// Returns item count in the map
689 Only leaf nodes containing user data are counted.
691 The value returned depends on item counter type provided by \p Traits template parameter.
692 If it is \p atomicity::empty_item_counter this function always returns 0.
694 The function is not suitable for checking the tree emptiness, use \p empty()
695 member function for this purpose.
699 return m_ItemCounter;
702 /// Returns const reference to internal statistics
703 stat const& statistics() const
708 /// Returns reference to \p sync_monitor object
709 sync_monitor& monitor()
714 sync_monitor const& monitor() const
720 /// Checks internal consistency (not atomic, not thread-safe)
722 The debugging function to check internal consistency of the tree.
724 bool check_consistency() const
726 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
729 /// Checks internal consistency (not atomic, not thread-safe)
731 The debugging function to check internal consistency of the tree.
732 The functor \p Func is called if a violation of internal tree structure
736 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
740 - \p nLevel - the level where the violation is found
741 - \p hLeft - the height of left subtree
742 - \p hRight - the height of right subtree
744 The functor is called for each violation found.
746 template <typename Func>
747 bool check_consistency( Func f ) const
749 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_relaxed );
752 do_check_consistency( pChild, 1, f, nErrors );
760 template <typename Func>
761 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
764 size_t hLeft = do_check_consistency( child( pNode, left_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
765 size_t hRight = do_check_consistency( child( pNode, right_child, memory_model::memory_order_relaxed ), nLevel + 1, f, nErrors );
767 if ( hLeft >= hRight ) {
768 if ( hLeft - hRight > 1 ) {
769 f( nLevel, hLeft, hRight );
775 if ( hRight - hLeft > 1 ) {
776 f( nLevel, hLeft, hRight );
785 template <typename Q, typename Compare, typename Func>
786 bool do_find( Q& key, Compare cmp, Func f ) const
791 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
793 assert( result != find_result::retry );
794 return result == find_result::found;
797 template <typename K, typename Compare, typename Func>
798 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
800 check_deadlock_policy::check();
802 rcu_disposer removed_list;
805 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
809 template <typename K, typename Compare, typename Func>
810 bool do_remove( K const& key, Compare cmp, Func func )
812 // Func must return true if the value was disposed
813 // or false if the value was extracted
815 check_deadlock_policy::check();
817 rcu_disposer removed_list;
820 return try_remove_root( key, cmp, func, removed_list );
824 template <typename Func>
825 mapped_type do_extract_min( Func f )
827 mapped_type pExtracted = nullptr;
830 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
835 template <typename Func>
836 mapped_type do_extract_max( Func f )
838 mapped_type pExtracted = nullptr;
841 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
846 template <typename Func>
847 void do_extract_minmax( int nDir, Func func )
849 check_deadlock_policy::check();
851 rcu_disposer removed_list;
855 int result = update_flags::failed;
857 // get right child of root
858 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
860 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
861 if ( nChildVersion & node_type::shrinking ) {
862 m_stat.onRemoveRootWaitShrinking();
863 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
864 result = update_flags::retry;
866 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
867 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
870 } while ( result == update_flags::retry );
874 template <typename Q>
875 mapped_type do_extract( Q const& key )
877 mapped_type pExtracted = nullptr;
881 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
886 template <typename Q, typename Less>
887 mapped_type do_extract_with( Q const& key, Less pred )
890 mapped_type pExtracted = nullptr;
893 cds::opt::details::make_comparator_from_less<Less>(),
894 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
903 template <typename Q, typename Compare, typename Func>
904 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
906 assert( gc::is_locked() );
910 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
912 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
913 m_stat.onFindRetry();
914 return find_result::retry;
917 m_stat.onFindFailed();
918 return find_result::not_found;
921 int nCmp = cmp( key, pChild->m_key );
923 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
925 node_scoped_lock l( m_Monitor, *pChild );
926 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
928 m_stat.onFindSuccess();
929 return find_result::found;
934 m_stat.onFindFailed();
935 return find_result::not_found;
938 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
939 if ( nChildVersion & node_type::shrinking ) {
940 m_stat.onFindWaitShrinking();
941 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
943 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
944 m_stat.onFindRetry();
945 return find_result::retry;
948 else if ( nChildVersion != node_type::unlinked ) {
950 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
951 m_stat.onFindRetry();
952 return find_result::retry;
955 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
956 if ( found != find_result::retry )
962 template <typename K, typename Compare, typename Func>
963 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
965 assert( gc::is_locked() );
969 // get right child of root
970 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
972 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
973 if ( nChildVersion & node_type::shrinking ) {
974 m_stat.onUpdateRootWaitShrinking();
975 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
976 result = update_flags::retry;
978 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
979 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
982 result = update_flags::retry;
986 if ( nFlags & update_flags::allow_insert ) {
987 // insert into tree as right child of the root
989 node_scoped_lock l( m_Monitor, *m_pRoot );
990 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
991 result = update_flags::retry;
995 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
996 mapped_type pVal = funcUpdate( pNew );
997 assert( pVal != nullptr );
998 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1000 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
1001 m_pRoot->height( 2, memory_model::memory_order_relaxed );
1005 m_stat.onInsertSuccess();
1006 return update_flags::result_inserted;
1009 return update_flags::failed;
1011 } while ( result == update_flags::retry );
1015 template <typename K, typename Compare, typename Func>
1016 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1018 assert( gc::is_locked() );
1022 // get right child of root
1023 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1025 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
1026 if ( nChildVersion & node_type::shrinking ) {
1027 m_stat.onRemoveRootWaitShrinking();
1028 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1029 result = update_flags::retry;
1031 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1032 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1035 result = update_flags::retry;
1039 } while ( result == update_flags::retry );
1041 return result == update_flags::result_removed;
1044 template <typename K, typename Compare, typename Func>
1045 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1047 assert( gc::is_locked() );
1048 assert( nVersion != node_type::unlinked );
1049 CDS_UNUSED( pParent );
1051 int nCmp = cmp( key, pNode->m_key );
1053 if ( nFlags & update_flags::allow_update ) {
1054 return try_update_node( funcUpdate, pNode, disp );
1056 return update_flags::failed;
1061 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1062 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1063 m_stat.onUpdateRetry();
1064 return update_flags::retry;
1067 if ( pChild == nullptr ) {
1069 if ( nFlags & update_flags::allow_insert )
1070 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1072 result = update_flags::failed;
1076 result = update_flags::retry;
1077 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1078 if ( nChildVersion & node_type::shrinking ) {
1079 m_stat.onUpdateWaitShrinking();
1080 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1083 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1084 // this second read is important, because it is protected by nChildVersion
1086 // validate the read that our caller took to get to node
1087 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1088 m_stat.onUpdateRetry();
1089 return update_flags::retry;
1092 // At this point we know that the traversal our parent took to get to node is still valid.
1093 // The recursive implementation will validate the traversal from node to
1094 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1095 // This means that we are no longer vulnerable to node shrinks, and we don't need
1096 // to validate node version any more.
1097 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1100 } while ( result == update_flags::retry );
1104 template <typename K, typename Compare, typename Func>
1105 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1107 assert( gc::is_locked() );
1108 assert( nVersion != node_type::unlinked );
1110 int nCmp = cmp( key, pNode->m_key );
1112 return try_remove_node( pParent, pNode, nVersion, func, disp );
1116 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1117 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1118 m_stat.onRemoveRetry();
1119 return update_flags::retry;
1122 if ( pChild == nullptr ) {
1123 return update_flags::failed;
1127 result = update_flags::retry;
1128 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1129 if ( nChildVersion & node_type::shrinking ) {
1130 m_stat.onRemoveWaitShrinking();
1131 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1134 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1135 // this second read is important, because it is protected by nChildVersion
1137 // validate the read that our caller took to get to node
1138 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1139 m_stat.onRemoveRetry();
1140 return update_flags::retry;
1143 // At this point we know that the traversal our parent took to get to node is still valid.
1144 // The recursive implementation will validate the traversal from node to
1145 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1146 // This means that we are no longer vulnerable to node shrinks, and we don't need
1147 // to validate node version any more.
1148 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1151 } while ( result == update_flags::retry );
1155 template <typename Func>
1156 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1158 assert( gc::is_locked() );
1159 assert( nVersion != node_type::unlinked );
1163 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1164 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1165 m_stat.onRemoveRetry();
1166 return update_flags::retry;
1169 if ( pChild == nullptr ) {
1171 return try_remove_node( pParent, pNode, nVersion, func, disp );
1174 result = update_flags::retry;
1175 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1176 if ( nChildVersion & node_type::shrinking ) {
1177 m_stat.onRemoveWaitShrinking();
1178 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1181 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1182 // this second read is important, because it is protected by nChildVersion
1184 // validate the read that our caller took to get to node
1185 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1186 m_stat.onRemoveRetry();
1187 return update_flags::retry;
1190 // At this point we know that the traversal our parent took to get to node is still valid.
1191 // The recursive implementation will validate the traversal from node to
1192 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1193 // This means that we are no longer vulnerable to node shrinks, and we don't need
1194 // to validate node version any more.
1195 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1198 } while ( result == update_flags::retry );
1202 template <typename K, typename Func>
1203 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1207 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1208 mapped_type pVal = funcUpdate( pNew );
1209 assert( pVal != nullptr );
1210 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1213 if ( c_bRelaxedInsert ) {
1214 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1215 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1217 m_stat.onInsertRetry();
1218 return update_flags::retry;
1221 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1224 node_type * pDamaged;
1226 assert( pNode != nullptr );
1227 node_scoped_lock l( m_Monitor, *pNode );
1229 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1230 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1232 if ( c_bRelaxedInsert ) {
1233 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1234 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1237 m_stat.onRelaxedInsertFailed();
1240 m_stat.onInsertRetry();
1241 return update_flags::retry;
1244 if ( !c_bRelaxedInsert )
1245 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1247 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1248 pDamaged = fix_height_locked( pNode );
1252 m_stat.onInsertSuccess();
1254 fix_height_and_rebalance( pDamaged, disp );
1256 return update_flags::result_inserted;
1259 template <typename Func>
1260 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1263 assert( pNode != nullptr );
1265 node_scoped_lock l( m_Monitor, *pNode );
1267 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1268 m_stat.onUpdateUnlinked();
1269 return update_flags::retry;
1272 pOld = pNode->value( memory_model::memory_order_relaxed );
1273 mapped_type pVal = funcUpdate( pNode );
1277 assert( pVal != nullptr );
1278 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1283 disp.dispose_value(pOld);
1284 m_stat.onDisposeValue();
1287 m_stat.onUpdateSuccess();
1288 return update_flags::result_updated;
1291 template <typename Func>
1292 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1294 assert( pParent != nullptr );
1295 assert( pNode != nullptr );
1297 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1298 return update_flags::failed;
1300 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1301 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1303 node_type * pDamaged;
1306 node_scoped_lock lp( m_Monitor, *pParent );
1307 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1308 return update_flags::retry;
1311 node_scoped_lock ln( m_Monitor, *pNode );
1312 pOld = pNode->value( memory_model::memory_order_relaxed );
1313 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1315 && try_unlink_locked( pParent, pNode, disp )))
1317 return update_flags::retry;
1320 pDamaged = fix_height_locked( pParent );
1324 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1325 m_stat.onDisposeValue();
1327 m_stat.onExtractValue();
1329 fix_height_and_rebalance( pDamaged, disp );
1330 return update_flags::result_removed;
1333 int result = update_flags::retry;
1336 node_scoped_lock ln( m_Monitor, *pNode );
1337 pOld = pNode->value( atomics::memory_order_relaxed );
1338 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1339 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1340 result = update_flags::result_removed;
1344 if ( result == update_flags::result_removed ) {
1346 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1347 m_stat.onDisposeValue();
1349 m_stat.onExtractValue();
1356 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1358 // pParent and pNode must be locked
1359 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1361 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1362 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1363 if ( pNode != pParentLeft && pNode != pParentRight ) {
1364 // node is no longer a child of parent
1368 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1369 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1371 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1372 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1373 if ( pLeft != nullptr && pRight != nullptr ) {
1374 // splicing is no longer possible
1377 node_type * pSplice = pLeft ? pLeft : pRight;
1379 if ( pParentLeft == pNode )
1380 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1382 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1385 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1387 // Mark the node as unlinked
1388 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1390 // The value will be disposed by calling function
1391 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1393 disp.dispose( pNode );
1394 m_stat.onDisposeNode();
1401 private: // rotations
1403 int estimate_node_condition( node_type * pNode )
1405 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1406 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1408 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1409 return unlink_required;
1411 int h = pNode->height( memory_model::memory_order_relaxed );
1412 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1413 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1415 int hNew = 1 + std::max( hL, hR );
1416 int nBalance = hL - hR;
1418 if ( nBalance < -1 || nBalance > 1 )
1419 return rebalance_required;
1421 return h != hNew ? hNew : nothing_required;
1424 node_type * fix_height( node_type * pNode )
1426 assert( pNode != nullptr );
1427 node_scoped_lock l( m_Monitor, *pNode );
1428 return fix_height_locked( pNode );
1431 node_type * fix_height_locked( node_type * pNode )
1433 // pNode must be locked!!!
1434 int h = estimate_node_condition( pNode );
1436 case rebalance_required:
1437 case unlink_required:
1439 case nothing_required:
1442 pNode->height( h, memory_model::memory_order_relaxed );
1443 return parent( pNode, memory_model::memory_order_relaxed );
1447 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1449 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1450 int nCond = estimate_node_condition( pNode );
1451 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1454 if ( nCond != unlink_required && nCond != rebalance_required )
1455 pNode = fix_height( pNode );
1457 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1458 assert( pParent != nullptr );
1460 node_scoped_lock lp( m_Monitor, *pParent );
1461 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1462 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1464 node_scoped_lock ln( m_Monitor, *pNode );
1465 pNode = rebalance_locked( pParent, pNode, disp );
1472 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1474 // pParent and pNode should be locked.
1475 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1476 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1478 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1479 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1481 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1482 if ( try_unlink_locked( pParent, pNode, disp ))
1483 return fix_height_locked( pParent );
1485 // retry needed for pNode
1490 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1491 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1493 int h = pNode->height( memory_model::memory_order_relaxed );
1494 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1495 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1496 int hNew = 1 + std::max( hL, hR );
1497 int balance = hL - hR;
1500 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1501 else if ( balance < -1 )
1502 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1503 else if ( hNew != h ) {
1504 pNode->height( hNew, memory_model::memory_order_relaxed );
1506 // pParent is already locked
1507 return fix_height_locked( pParent );
1513 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1515 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1516 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1517 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1519 // pParent and pNode is locked yet
1520 // pNode->pLeft is too large, we will rotate-right.
1521 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1524 assert( pLeft != nullptr );
1525 node_scoped_lock l( m_Monitor, *pLeft );
1526 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1527 return pNode; // retry for pNode
1529 int hL = pLeft->height( memory_model::memory_order_relaxed );
1531 return pNode; // retry
1533 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1534 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1535 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1536 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1540 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1543 assert( pLRight != nullptr );
1545 node_scoped_lock lr( m_Monitor, *pLRight );
1546 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1547 return pNode; // retry
1549 hLR = pLRight->height( memory_model::memory_order_relaxed );
1551 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1553 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1554 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1555 int balance = hLL - hLRL;
1556 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1557 // nParent.child.left won't be damaged after a double rotation
1558 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1562 // focus on pLeft, if necessary pNode will be balanced later
1563 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1568 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1570 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1571 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1572 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1574 // pParent and pNode is locked yet
1576 assert( pRight != nullptr );
1577 node_scoped_lock l( m_Monitor, *pRight );
1578 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1579 return pNode; // retry for pNode
1581 int hR = pRight->height( memory_model::memory_order_relaxed );
1582 if ( hL - hR >= -1 )
1583 return pNode; // retry
1585 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1586 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1587 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1588 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1590 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1593 assert( pRLeft != nullptr );
1594 node_scoped_lock lrl( m_Monitor, *pRLeft );
1595 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1596 return pNode; // retry
1598 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1600 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1602 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1603 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1604 int balance = hRR - hRLR;
1605 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1606 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1608 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1612 static void begin_change( node_type * pNode, version_type version )
1614 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1616 static void end_change( node_type * pNode, version_type version )
1618 // Clear shrinking and unlinked flags and increment version
1619 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1622 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1624 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1625 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1627 begin_change( pNode, nodeVersion );
1629 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1630 if ( pLRight != nullptr )
1631 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1633 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1634 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1636 if ( pParentLeft == pNode )
1637 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1639 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1640 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1642 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1644 // fix up heights links
1645 int hNode = 1 + std::max( hLR, hR );
1646 pNode->height( hNode, memory_model::memory_order_relaxed );
1647 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1649 end_change( pNode, nodeVersion );
1650 m_stat.onRotateRight();
1652 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1653 // parent.child). pNode is the deepest. Perform as many fixes as we can
1654 // with the locks we've got.
1656 // We've already fixed the height for pNode, but it might still be outside
1657 // our allowable balance range. In that case a simple fix_height_locked()
1659 int nodeBalance = hLR - hR;
1660 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1661 // we need another rotation at pNode
1665 // we've fixed balance and height damage for pNode, now handle
1666 // extra-routing node damage
1667 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1668 // we need to remove pNode and then repair
1672 // we've already fixed the height at pLeft, do we need a rotation here?
1673 int leftBalance = hLL - hNode;
1674 if ( leftBalance < -1 || leftBalance > 1 )
1677 // pLeft might also have routing node damage (if pLeft.left was null)
1678 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1681 // try to fix the parent height while we've still got the lock
1682 return fix_height_locked( pParent );
1685 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1687 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1688 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1690 begin_change( pNode, nodeVersion );
1692 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1693 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1694 if ( pRLeft != nullptr )
1695 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1697 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1698 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1700 if ( pParentLeft == pNode )
1701 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1703 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1704 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1706 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1709 int hNode = 1 + std::max( hL, hRL );
1710 pNode->height( hNode, memory_model::memory_order_relaxed );
1711 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1713 end_change( pNode, nodeVersion );
1714 m_stat.onRotateLeft();
1716 int nodeBalance = hRL - hL;
1717 if ( nodeBalance < -1 || nodeBalance > 1 )
1720 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1723 int rightBalance = hRR - hNode;
1724 if ( rightBalance < -1 || rightBalance > 1 )
1727 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1730 return fix_height_locked( pParent );
1733 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1735 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1736 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1738 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1739 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1740 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1741 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1743 begin_change( pNode, nodeVersion );
1744 begin_change( pLeft, leftVersion );
1746 // fix up pNode links, careful about the order!
1747 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1748 if ( pLRR != nullptr )
1749 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1751 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1752 if ( pLRL != nullptr )
1753 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1755 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1756 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1757 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1758 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1761 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1763 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1764 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1766 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1769 int hNode = 1 + std::max( hLRR, hR );
1770 pNode->height( hNode, memory_model::memory_order_relaxed );
1771 int hLeft = 1 + std::max( hLL, hLRL );
1772 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1773 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1775 end_change( pNode, nodeVersion );
1776 end_change( pLeft, leftVersion );
1777 m_stat.onRotateRightOverLeft();
1779 // caller should have performed only a single rotation if pLeft was going
1780 // to end up damaged
1781 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1782 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1784 // We have damaged pParent, pLR (now parent.child), and pNode (now
1785 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1786 // can with the locks we've got.
1788 // We've already fixed the height for pNode, but it might still be outside
1789 // our allowable balance range. In that case a simple fix_height_locked()
1791 int nodeBalance = hLRR - hR;
1792 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1793 // we need another rotation at pNode
1797 // pNode might also be damaged by being an unnecessary routing node
1798 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1799 // repair involves splicing out pNode and maybe more rotations
1803 // we've already fixed the height at pLRight, do we need a rotation here?
1804 int balanceLR = hLeft - hNode;
1805 if ( balanceLR < -1 || balanceLR > 1 )
1808 // try to fix the parent height while we've still got the lock
1809 return fix_height_locked( pParent );
1812 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1814 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1815 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1817 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1818 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1819 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1820 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1822 begin_change( pNode, nodeVersion );
1823 begin_change( pRight, rightVersion );
1825 // fix up pNode links, careful about the order!
1826 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1827 if ( pRLL != nullptr )
1828 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1830 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1831 if ( pRLR != nullptr )
1832 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1834 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1835 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1836 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1837 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1840 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1842 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1843 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1845 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1848 int hNode = 1 + std::max( hL, hRLL );
1849 pNode->height( hNode, memory_model::memory_order_relaxed );
1850 int hRight = 1 + std::max( hRLR, hRR );
1851 pRight->height( hRight, memory_model::memory_order_relaxed );
1852 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1854 end_change( pNode, nodeVersion );
1855 end_change( pRight, rightVersion );
1856 m_stat.onRotateLeftOverRight();
1858 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1860 int nodeBalance = hRLL - hL;
1861 if ( nodeBalance < -1 || nodeBalance > 1 )
1863 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1866 int balRL = hRight - hNode;
1867 if ( balRL < -1 || balRL > 1 )
1870 return fix_height_locked( pParent );
1875 }} // namespace cds::container
1877 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H