3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
157 return pNode->child( nDir ).load( order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order )
162 return pNode->parent( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
299 template <typename K>
300 std::pair<bool, bool> ensure( K const& key, mapped_type pVal )
302 return update( key, pVal, true );
307 /// Delete \p key from the map
309 RCU \p synchronize() method can be called. RCU should not be locked.
311 Return \p true if \p key is found and deleted, \p false otherwise
313 template <typename K>
314 bool erase( K const& key )
319 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
323 /// Deletes the item from the map using \p pred predicate for searching
325 The function is an analog of \p erase(K const&)
326 but \p pred is used for key comparing.
327 \p Less functor has the interface like \p std::less.
328 \p Less must imply the same element order as the comparator used for building the map.
330 template <typename K, typename Less>
331 bool erase_with( K const& key, Less pred )
336 cds::opt::details::make_comparator_from_less<Less>(),
337 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
341 /// Delete \p key from the map
343 The function searches an item with key \p key, calls \p f functor
344 and deletes the item. If \p key is not found, the functor is not called.
346 The functor \p Func interface:
349 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
353 RCU \p synchronize method can be called. RCU should not be locked.
355 Return \p true if key is found and deleted, \p false otherwise
357 template <typename K, typename Func>
358 bool erase( K const& key, Func f )
363 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
366 disp.dispose_value(pVal);
372 /// Deletes the item from the map using \p pred predicate for searching
374 The function is an analog of \p erase(K const&, Func)
375 but \p pred is used for key comparing.
376 \p Less functor has the interface like \p std::less.
377 \p Less must imply the same element order as the comparator used for building the map.
379 template <typename K, typename Less, typename Func>
380 bool erase_with( K const& key, Less pred, Func f )
385 cds::opt::details::make_comparator_from_less<Less>(),
386 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
389 disp.dispose_value(pVal);
395 /// Extracts a value with minimal key from the map
397 Returns \p exempt_ptr to the leftmost item.
398 If the tree is empty, returns empty \p exempt_ptr.
400 Note that the function returns only the value for minimal key.
401 To retrieve its key use \p extract_min( Func ) member function.
403 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
404 It means that the function gets leftmost leaf of the tree and tries to unlink it.
405 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
406 So, the function returns the item with minimum key at the moment of tree traversing.
408 RCU \p synchronize method can be called. RCU should NOT be locked.
409 The function does not free the item.
410 The deallocator will be implicitly invoked when the returned object is destroyed or when
411 its \p release() member function is called.
413 exempt_ptr extract_min()
415 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
418 /// Extracts minimal key key and corresponding value
420 Returns \p exempt_ptr to the leftmost item.
421 If the tree is empty, returns empty \p exempt_ptr.
423 \p Func functor is used to store minimal key.
424 \p Func has the following signature:
427 void operator()( key_type const& key );
430 If the tree is empty, \p f is not called.
431 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
434 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
435 It means that the function gets leftmost leaf of the tree and tries to unlink it.
436 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
437 So, the function returns the item with minimum key at the moment of tree traversing.
439 RCU \p synchronize method can be called. RCU should NOT be locked.
440 The function does not free the item.
441 The deallocator will be implicitly invoked when the returned object is destroyed or when
442 its \p release() member function is called.
444 template <typename Func>
445 exempt_ptr extract_min( Func f )
447 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
450 /// Extracts minimal key key and corresponding value
452 This function is a shortcut for the following call:
455 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
457 \p key_type should be copy-assignable. The copy of minimal key
458 is returned in \p min_key argument.
460 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
461 extract_min_key( key_type& min_key )
463 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
466 /// Extracts a value with maximal key from the tree
468 Returns \p exempt_ptr pointer to the rightmost item.
469 If the set is empty, returns empty \p exempt_ptr.
471 Note that the function returns only the value for maximal key.
472 To retrieve its key use \p extract_max( Func ) member function.
474 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
475 It means that the function gets rightmost leaf of the tree and tries to unlink it.
476 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
477 So, the function returns the item with maximum key at the moment of tree traversing.
479 RCU \p synchronize method can be called. RCU should NOT be locked.
480 The function does not free the item.
481 The deallocator will be implicitly invoked when the returned object is destroyed or when
482 its \p release() is called.
484 exempt_ptr extract_max()
486 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
489 /// Extracts the maximal key and corresponding value
491 Returns \p exempt_ptr pointer to the rightmost item.
492 If the set is empty, returns empty \p exempt_ptr.
494 \p Func functor is used to store maximal key.
495 \p Func has the following signature:
498 void operator()( key_type const& key );
501 If the tree is empty, \p f is not called.
502 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
505 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
506 It means that the function gets rightmost leaf of the tree and tries to unlink it.
507 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
508 So, the function returns the item with maximum key at the moment of tree traversing.
510 RCU \p synchronize method can be called. RCU should NOT be locked.
511 The function does not free the item.
512 The deallocator will be implicitly invoked when the returned object is destroyed or when
513 its \p release() is called.
515 template <typename Func>
516 exempt_ptr extract_max( Func f )
518 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
521 /// Extracts the maximal key and corresponding value
523 This function is a shortcut for the following call:
526 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
528 \p key_type should be copy-assignable. The copy of maximal key
529 is returned in \p max_key argument.
531 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
532 extract_max_key( key_type& max_key )
534 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
537 /// Extracts an item from the map
539 The function searches an item with key equal to \p key in the tree,
540 unlinks it, and returns \p exempt_ptr pointer to a value found.
541 If \p key is not found the function returns an empty \p exempt_ptr.
543 RCU \p synchronize method can be called. RCU should NOT be locked.
544 The function does not destroy the value found.
545 The disposer will be implicitly invoked when the returned object is destroyed or when
546 its \p release() member function is called.
548 template <typename Q>
549 exempt_ptr extract( Q const& key )
551 return exempt_ptr(do_extract( key ));
555 /// Extracts an item from the map using \p pred for searching
557 The function is an analog of \p extract(Q const&)
558 but \p pred is used for key compare.
559 \p Less has the interface like \p std::less.
560 \p pred must imply the same element order as the comparator used for building the tree.
562 template <typename Q, typename Less>
563 exempt_ptr extract_with( Q const& key, Less pred )
565 return exempt_ptr(do_extract_with( key, pred ));
568 /// Find the key \p key
570 The function searches the item with key equal to \p key and calls the functor \p f for item found.
571 The interface of \p Func functor is:
574 void operator()( key_type const& key, mapped_type& item );
577 where \p item is the item found.
578 The functor is called under node-level lock.
580 The function applies RCU lock internally.
582 The function returns \p true if \p key is found, \p false otherwise.
584 template <typename K, typename Func>
585 bool find( K const& key, Func f )
587 return do_find( key, key_comparator(),
588 [&f]( node_type * pNode ) -> bool {
589 assert( pNode != nullptr );
590 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
592 f( pNode->m_key, *pVal );
600 /// Finds the key \p val using \p pred predicate for searching
602 The function is an analog of \p find(K const&, Func)
603 but \p pred is used for key comparing.
604 \p Less functor has the interface like \p std::less.
605 \p Less must imply the same element order as the comparator used for building the map.
607 template <typename K, typename Less, typename Func>
608 bool find_with( K const& key, Less pred, Func f )
611 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
612 [&f]( node_type * pNode ) -> bool {
613 assert( pNode != nullptr );
614 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
616 f( pNode->m_key, *pVal );
624 /// Find the key \p key
626 The function searches the item with key equal to \p key
627 and returns \p true if it is found, and \p false otherwise.
629 The function applies RCU lock internally.
631 template <typename K>
632 bool find( K const& key )
634 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
637 /// Finds the key \p val using \p pred predicate for searching
639 The function is an analog of \p find(K const&)
640 but \p pred is used for key comparing.
641 \p Less functor has the interface like \p std::less.
642 \p Less must imply the same element order as the comparator used for building the map.
644 template <typename K, typename Less>
645 bool find_with( K const& key, Less pred )
648 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
651 /// Clears the tree (thread safe, not atomic)
653 The function unlink all items from the tree.
654 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
658 assert( set.empty() );
660 the assertion could be raised.
662 For each node the \ref disposer will be called after unlinking.
664 RCU \p synchronize method can be called. RCU should not be locked.
668 while ( extract_min() );
671 /// Clears the tree (not thread safe)
673 This function is not thread safe and may be called only when no other thread deals with the tree.
674 The function is used in the tree destructor.
678 clear(); // temp solution
682 /// Checks if the map is empty
685 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
688 /// Returns item count in the map
690 Only leaf nodes containing user data are counted.
692 The value returned depends on item counter type provided by \p Traits template parameter.
693 If it is \p atomicity::empty_item_counter this function always returns 0.
695 The function is not suitable for checking the tree emptiness, use \p empty()
696 member function for this purpose.
700 return m_ItemCounter;
703 /// Returns const reference to internal statistics
704 stat const& statistics() const
709 /// Returns reference to \p sync_monitor object
710 sync_monitor& monitor()
715 sync_monitor const& monitor() const
721 /// Checks internal consistency (not atomic, not thread-safe)
723 The debugging function to check internal consistency of the tree.
725 bool check_consistency() const
727 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
730 /// Checks internal consistency (not atomic, not thread-safe)
732 The debugging function to check internal consistency of the tree.
733 The functor \p Func is called if a violation of internal tree structure
737 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
741 - \p nLevel - the level where the violation is found
742 - \p hLeft - the height of left subtree
743 - \p hRight - the height of right subtree
745 The functor is called for each violation found.
747 template <typename Func>
748 bool check_consistency( Func f ) const
750 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
753 do_check_consistency( pChild, 1, f, nErrors );
761 template <typename Func>
762 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
766 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
767 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
768 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
770 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
773 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
774 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
776 if ( hLeft >= hRight ) {
777 if ( hLeft - hRight > 1 ) {
778 f( nLevel, hLeft, hRight );
784 if ( hRight - hLeft > 1 ) {
785 f( nLevel, hLeft, hRight );
794 template <typename Q, typename Compare, typename Func>
795 bool do_find( Q& key, Compare cmp, Func f ) const
800 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
802 assert( result != find_result::retry );
803 return result == find_result::found;
806 template <typename K, typename Compare, typename Func>
807 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
809 check_deadlock_policy::check();
811 rcu_disposer removed_list;
814 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
818 template <typename K, typename Compare, typename Func>
819 bool do_remove( K const& key, Compare cmp, Func func )
821 // Func must return true if the value was disposed
822 // or false if the value was extracted
824 check_deadlock_policy::check();
826 rcu_disposer removed_list;
829 return try_remove_root( key, cmp, func, removed_list );
833 template <typename Func>
834 mapped_type do_extract_min( Func f )
836 mapped_type pExtracted = nullptr;
839 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
844 template <typename Func>
845 mapped_type do_extract_max( Func f )
847 mapped_type pExtracted = nullptr;
850 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
855 template <typename Func>
856 void do_extract_minmax( int nDir, Func func )
858 check_deadlock_policy::check();
860 rcu_disposer removed_list;
867 // get right child of root
868 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
870 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
871 if ( nChildVersion & node_type::shrinking ) {
872 m_stat.onRemoveRootWaitShrinking();
873 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
874 result = update_flags::retry;
876 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
877 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
880 result = update_flags::retry;
885 if ( result == update_flags::retry )
886 m_stat.onRemoveRetry();
888 m_stat.onExtract( result == update_flags::result_removed );
895 template <typename Q>
896 mapped_type do_extract( Q const& key )
898 mapped_type pExtracted = nullptr;
902 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
904 m_stat.onExtract( pExtracted != nullptr );
908 template <typename Q, typename Less>
909 mapped_type do_extract_with( Q const& key, Less pred )
912 mapped_type pExtracted = nullptr;
915 cds::opt::details::make_comparator_from_less<Less>(),
916 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
918 m_stat.onExtract( pExtracted != nullptr );
925 static int height( node_type * pNode, atomics::memory_order order )
928 return pNode->m_nHeight.load( order );
930 static void set_height( node_type * pNode, int h, atomics::memory_order order )
933 pNode->m_nHeight.store( h, order );
935 static int height_null( node_type * pNode, atomics::memory_order order )
937 return pNode ? height( pNode, order ) : 0;
940 static CDS_CONSTEXPR int const c_stackSize = 64;
942 template <typename Q, typename Compare, typename Func>
943 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
945 assert( gc::is_locked() );
951 version_type nVersion;
955 stack_record stack[c_stackSize];
957 stack[0].pNode = pNode;
958 stack[0].nVersion = nVersion;
959 stack[0].nDir = nDir;
962 pNode = stack[pos].pNode;
963 nVersion = stack[pos].nVersion;
964 nDir = stack[pos].nDir;
967 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
969 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
971 m_stat.onFindRetry();
974 m_stat.onFindFailed();
975 return find_result::not_found;
978 int nCmp = cmp( key, pChild->m_key );
980 if ( pChild->is_valued( memory_model::memory_order_acquire ) ) {
982 node_scoped_lock l( m_Monitor, *pChild );
983 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
984 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
986 m_stat.onFindSuccess();
987 return find_result::found;
992 m_stat.onFindRetry();
996 m_stat.onFindFailed();
997 return find_result::not_found;
1000 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1001 if ( nChildVersion & node_type::shrinking ) {
1002 m_stat.onFindWaitShrinking();
1003 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1005 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1007 m_stat.onFindRetry();
1011 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1013 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1015 m_stat.onFindRetry();
1020 assert(pos < c_stackSize);
1021 stack[pos].pNode = pChild;
1022 stack[pos].nVersion = nChildVersion;
1023 stack[pos].nDir = nCmp;
1024 break; // child iteration
1027 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1029 m_stat.onFindRetry();
1033 m_stat.onFindRetry();
1036 return find_result::retry;
1039 template <typename K, typename Compare, typename Func>
1040 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1042 assert( gc::is_locked() );
1047 // get right child of root
1048 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1050 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1051 if ( nChildVersion & node_type::shrinking ) {
1052 m_stat.onUpdateRootWaitShrinking();
1053 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1054 result = update_flags::retry;
1056 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1057 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1059 result = update_flags::retry;
1062 // the tree is empty
1063 if ( nFlags & update_flags::allow_insert ) {
1064 // insert into tree as right child of the root
1066 node_scoped_lock l( m_Monitor, *m_pRoot );
1067 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1068 result = update_flags::retry;
1072 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1073 mapped_type pVal = funcUpdate( pNew );
1074 assert( pVal != nullptr );
1075 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1077 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1078 set_height( m_pRoot, 2, memory_model::memory_order_release );
1082 m_stat.onInsertSuccess();
1083 return update_flags::result_inserted;
1086 return update_flags::failed;
1089 if ( result != update_flags::retry )
1094 template <typename K, typename Compare, typename Func>
1095 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1097 assert( gc::is_locked() );
1102 // get right child of root
1103 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1105 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1106 if ( nChildVersion & node_type::shrinking ) {
1107 m_stat.onRemoveRootWaitShrinking();
1108 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1109 result = update_flags::retry;
1111 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1112 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1115 result = update_flags::retry;
1120 if ( result == update_flags::retry )
1121 m_stat.onRemoveRetry();
1123 m_stat.onRemove( result == update_flags::result_removed );
1124 return result == update_flags::result_removed;
1129 template <typename K, typename Compare, typename Func>
1130 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1132 assert( gc::is_locked() );
1133 assert( nVersion != node_type::unlinked );
1138 version_type nVersion;
1141 stack_record stack[c_stackSize];
1143 stack[0].pNode = pNode;
1144 stack[0].nVersion = nVersion;
1146 while ( pos >= 0 ) {
1147 pNode = stack[pos].pNode;
1148 nVersion = stack[pos].nVersion;
1150 int nCmp = cmp( key, pNode->m_key );
1152 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1153 if ( result != update_flags::retry )
1156 m_stat.onUpdateRetry();
1161 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1162 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1164 m_stat.onUpdateRetry();
1168 if ( pChild == nullptr ) {
1170 if ( nFlags & update_flags::allow_insert ) {
1171 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1172 if ( result != update_flags::retry )
1175 m_stat.onUpdateRetry();
1179 return update_flags::failed;
1183 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1184 if ( nChildVersion & node_type::shrinking ) {
1185 m_stat.onUpdateWaitShrinking();
1186 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1189 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1190 // this second read is important, because it is protected by nChildVersion
1192 // validate the read that our caller took to get to node
1193 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1195 m_stat.onUpdateRetry();
1198 // At this point we know that the traversal our parent took to get to node is still valid.
1199 // The recursive implementation will validate the traversal from node to
1200 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1201 // This means that we are no longer vulnerable to node shrinks, and we don't need
1202 // to validate node version any more.
1204 assert( pos < c_stackSize );
1205 stack[pos].pNode = pChild;
1206 stack[pos].nVersion = nChildVersion;
1207 assert( nChildVersion != node_type::unlinked );
1208 break; // child iteration
1210 m_stat.onUpdateRetry();
1214 return update_flags::retry;
1217 template <typename K, typename Compare, typename Func>
1218 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1220 assert( gc::is_locked() );
1221 assert( nVersion != node_type::unlinked );
1225 node_type * pParent;
1227 version_type nVersion;
1230 stack_record stack[c_stackSize];
1232 stack[0].pParent = pParent;
1233 stack[0].pNode = pNode;
1234 stack[0].nVersion = nVersion;
1236 while ( pos >= 0 ) {
1237 pParent = stack[pos].pParent;
1238 pNode = stack[pos].pNode;
1239 nVersion = stack[pos].nVersion;
1241 int nCmp = cmp( key, pNode->m_key );
1243 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1244 if ( result != update_flags::retry )
1247 m_stat.onRemoveRetry();
1252 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1253 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1255 m_stat.onRemoveRetry();
1259 if ( pChild == nullptr )
1260 return update_flags::failed;
1263 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1264 if ( nChildVersion & node_type::shrinking ) {
1265 m_stat.onRemoveWaitShrinking();
1266 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1269 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1270 // this second read is important, because it is protected by nChildVersion
1272 // validate the read that our caller took to get to node
1273 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1275 m_stat.onRemoveRetry();
1279 // At this point we know that the traversal our parent took to get to node is still valid.
1280 // The recursive implementation will validate the traversal from node to
1281 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1282 // This means that we are no longer vulnerable to node shrinks, and we don't need
1283 // to validate node version any more.
1285 assert( pos < c_stackSize );
1286 stack[pos].pParent = pNode;
1287 stack[pos].pNode = pChild;
1288 stack[pos].nVersion = nChildVersion;
1289 break; // child iteration
1291 m_stat.onRemoveRetry();
1294 return update_flags::retry;
1297 template <typename Func>
1298 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1300 assert( gc::is_locked() );
1301 assert( nVersion != node_type::unlinked );
1305 node_type * pParent;
1307 version_type nVersion;
1310 stack_record stack[c_stackSize];
1312 stack[0].pParent = pParent;
1313 stack[0].pNode = pNode;
1314 stack[0].nVersion = nVersion;
1316 while ( pos >= 0 ) {
1317 pParent = stack[pos].pParent;
1318 pNode = stack[pos].pNode;
1319 nVersion = stack[pos].nVersion;
1322 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
1323 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1325 m_stat.onRemoveRetry();
1329 if ( pChild == nullptr ) {
1331 assert(pNode->is_valued( memory_model::memory_order_acquire ));
1332 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1333 if ( result != update_flags::retry )
1336 m_stat.onRemoveRetry();
1340 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1341 if ( nChildVersion & node_type::shrinking ) {
1342 m_stat.onRemoveWaitShrinking();
1343 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1346 else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
1347 // this second read is important, because it is protected by nChildVersion
1349 // validate the read that our caller took to get to node
1350 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1352 m_stat.onRemoveRetry();
1356 // At this point we know that the traversal our parent took to get to node is still valid.
1357 // The recursive implementation will validate the traversal from node to
1358 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1359 // This means that we are no longer vulnerable to node shrinks, and we don't need
1360 // to validate node version any more.
1362 assert( pos < c_stackSize );
1363 stack[pos].pParent = pNode;
1364 stack[pos].pNode = pChild;
1365 stack[pos].nVersion = nChildVersion;
1366 break; // child iteration
1368 m_stat.onRemoveRetry();
1371 return update_flags::retry;
1374 template <typename K, typename Func>
1375 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1379 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1380 mapped_type pVal = funcUpdate( pNew );
1381 assert( pVal != nullptr );
1382 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1385 if ( c_bRelaxedInsert ) {
1386 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1387 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1389 m_stat.onInsertRetry();
1390 return update_flags::retry;
1393 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1396 node_type * pDamaged;
1398 assert( pNode != nullptr );
1399 node_scoped_lock l( m_Monitor, *pNode );
1401 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1402 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1404 if ( c_bRelaxedInsert ) {
1405 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1406 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1409 m_stat.onRelaxedInsertFailed();
1412 m_stat.onInsertRetry();
1413 return update_flags::retry;
1416 if ( !c_bRelaxedInsert )
1417 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1419 pNode->child( pNew, nDir, memory_model::memory_order_release );
1420 pDamaged = fix_height_locked( pNode );
1424 m_stat.onInsertSuccess();
1427 fix_height_and_rebalance( pDamaged, disp );
1428 m_stat.onInsertRebalanceRequired();
1431 return update_flags::result_inserted;
1434 template <typename Func>
1435 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1439 assert( pNode != nullptr );
1441 node_scoped_lock l( m_Monitor, *pNode );
1443 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1444 return update_flags::retry;
1446 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1447 m_stat.onUpdateUnlinked();
1448 return update_flags::retry;
1451 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
1452 m_stat.onInsertFailed();
1453 return update_flags::failed;
1456 pOld = pNode->value( memory_model::memory_order_relaxed );
1457 bInserted = pOld == nullptr;
1458 mapped_type pVal = funcUpdate( pNode );
1462 assert( pVal != nullptr );
1463 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1468 disp.dispose_value(pOld);
1469 m_stat.onDisposeValue();
1473 m_stat.onInsertSuccess();
1474 return update_flags::result_inserted;
1477 m_stat.onUpdateSuccess();
1478 return update_flags::result_updated;
1481 template <typename Func>
1482 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1484 assert( pParent != nullptr );
1485 assert( pNode != nullptr );
1487 if ( !pNode->is_valued( memory_model::memory_order_acquire ) )
1488 return update_flags::failed;
1490 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1491 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1493 // pNode can be replaced with its child
1495 node_type * pDamaged;
1498 node_scoped_lock lp( m_Monitor, *pParent );
1499 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1500 return update_flags::retry;
1503 node_scoped_lock ln( m_Monitor, *pNode );
1504 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1505 return update_flags::retry;
1507 pOld = pNode->value( memory_model::memory_order_relaxed );
1509 return update_flags::failed;
1511 if ( !try_unlink_locked( pParent, pNode, disp ))
1512 return update_flags::retry;
1514 pDamaged = fix_height_locked( pParent );
1518 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1519 m_stat.onDisposeValue();
1521 m_stat.onExtractValue();
1524 fix_height_and_rebalance( pDamaged, disp );
1525 m_stat.onRemoveRebalanceRequired();
1529 // pNode is an internal with two children
1533 node_scoped_lock ln( m_Monitor, *pNode );
1534 pOld = pNode->value( memory_model::memory_order_relaxed );
1535 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1536 return update_flags::retry;
1538 return update_flags::failed;
1540 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1541 m_stat.onMakeRoutingNode();
1545 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1546 m_stat.onDisposeValue();
1548 m_stat.onExtractValue();
1550 return update_flags::result_removed;
1553 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1555 // pParent and pNode must be locked
1556 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1558 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1559 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1560 if ( pNode != pParentLeft && pNode != pParentRight ) {
1561 // node is no longer a child of parent
1565 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1566 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1568 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1569 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1570 if ( pLeft != nullptr && pRight != nullptr ) {
1571 // splicing is no longer possible
1574 node_type * pSplice = pLeft ? pLeft : pRight;
1576 if ( pParentLeft == pNode )
1577 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1579 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1582 pSplice->parent( pParent, memory_model::memory_order_release );
1584 // Mark the node as unlinked
1585 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1587 // The value will be disposed by calling function
1588 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1590 disp.dispose( pNode );
1591 m_stat.onDisposeNode();
1598 private: // rotations
1600 int estimate_node_condition( node_type * pNode )
1602 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1603 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1605 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1606 return unlink_required;
1608 int h = height( pNode, memory_model::memory_order_acquire );
1609 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1610 int hR = height_null( pRight, memory_model::memory_order_acquire );
1612 int hNew = 1 + std::max( hL, hR );
1613 int nBalance = hL - hR;
1615 if ( nBalance < -1 || nBalance > 1 )
1616 return rebalance_required;
1618 return h != hNew ? hNew : nothing_required;
1621 node_type * fix_height( node_type * pNode )
1623 assert( pNode != nullptr );
1624 node_scoped_lock l( m_Monitor, *pNode );
1625 return fix_height_locked( pNode );
1628 node_type * fix_height_locked( node_type * pNode )
1630 // pNode must be locked!!!
1631 int h = estimate_node_condition( pNode );
1633 case rebalance_required:
1634 case unlink_required:
1636 case nothing_required:
1639 set_height( pNode, h, memory_model::memory_order_release );
1640 return parent( pNode, memory_model::memory_order_relaxed );
1644 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1646 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1647 int nCond = estimate_node_condition( pNode );
1648 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ) )
1651 if ( nCond != unlink_required && nCond != rebalance_required )
1652 pNode = fix_height( pNode );
1654 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1655 assert( pParent != nullptr );
1657 node_scoped_lock lp( m_Monitor, *pParent );
1658 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1659 node_scoped_lock ln( m_Monitor, *pNode );
1660 pNode = rebalance_locked( pParent, pNode, disp );
1667 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1669 // pParent and pNode should be locked.
1670 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1671 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1673 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1674 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1676 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1677 if ( try_unlink_locked( pParent, pNode, disp ))
1678 return fix_height_locked( pParent );
1680 // retry needed for pNode
1685 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1686 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1688 int h = height( pNode, memory_model::memory_order_acquire );
1689 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1690 int hR = height_null( pRight, memory_model::memory_order_acquire );
1691 int hNew = 1 + std::max( hL, hR );
1692 int balance = hL - hR;
1695 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1696 else if ( balance < -1 )
1697 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1698 else if ( hNew != h ) {
1699 set_height( pNode, hNew, memory_model::memory_order_release );
1701 // pParent is already locked
1702 return fix_height_locked( pParent );
1708 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1710 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1711 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1712 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1714 // pParent and pNode is locked yet
1715 // pNode->pLeft is too large, we will rotate-right.
1716 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1719 assert( pLeft != nullptr );
1720 node_scoped_lock l( m_Monitor, *pLeft );
1721 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1722 return pNode; // retry for pNode
1724 int hL = height( pLeft, memory_model::memory_order_acquire );
1726 return pNode; // retry
1728 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1729 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1730 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1731 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1735 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1738 assert( pLRight != nullptr );
1740 node_scoped_lock lr( m_Monitor, *pLRight );
1741 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1742 return pNode; // retry
1744 hLR = height( pLRight, memory_model::memory_order_acquire );
1746 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1748 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
1749 int balance = hLL - hLRL;
1750 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1751 // nParent.child.left won't be damaged after a double rotation
1752 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1756 // focus on pLeft, if necessary pNode will be balanced later
1757 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1762 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1764 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1765 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1766 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1768 // pParent and pNode is locked yet
1770 assert( pRight != nullptr );
1771 node_scoped_lock l( m_Monitor, *pRight );
1772 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1773 return pNode; // retry for pNode
1775 int hR = height( pRight, memory_model::memory_order_acquire );
1776 if ( hL - hR >= -1 )
1777 return pNode; // retry
1779 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1780 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1781 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1782 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1784 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1787 assert( pRLeft != nullptr );
1788 node_scoped_lock lrl( m_Monitor, *pRLeft );
1789 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1790 return pNode; // retry
1792 hRL = height( pRLeft, memory_model::memory_order_acquire );
1794 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1796 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1797 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1798 int balance = hRR - hRLR;
1799 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1800 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1802 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1806 static void begin_change( node_type * pNode, version_type version )
1808 assert(pNode->version(memory_model::memory_order_acquire) == version );
1809 assert( (version & node_type::shrinking) == 0 );
1810 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1812 static void end_change( node_type * pNode, version_type version )
1814 // Clear shrinking and unlinked flags and increment version
1815 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1818 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1820 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1821 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1823 begin_change( pNode, nodeVersion );
1825 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1826 if ( pLRight != nullptr )
1827 pLRight->parent( pNode, memory_model::memory_order_release );
1829 atomics::atomic_thread_fence( memory_model::memory_order_release );
1831 pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
1832 pNode->parent( pLeft, memory_model::memory_order_release );
1834 atomics::atomic_thread_fence( memory_model::memory_order_release );
1836 if ( pParentLeft == pNode )
1837 pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
1839 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1840 pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
1842 pLeft->parent( pParent, memory_model::memory_order_release );
1844 atomics::atomic_thread_fence( memory_model::memory_order_release );
1846 // fix up heights links
1847 int hNode = 1 + std::max( hLR, hR );
1848 set_height( pNode, hNode, memory_model::memory_order_release );
1849 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1851 end_change( pNode, nodeVersion );
1852 m_stat.onRotateRight();
1854 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1855 // parent.child). pNode is the deepest. Perform as many fixes as we can
1856 // with the locks we've got.
1858 // We've already fixed the height for pNode, but it might still be outside
1859 // our allowable balance range. In that case a simple fix_height_locked()
1861 int nodeBalance = hLR - hR;
1862 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1863 // we need another rotation at pNode
1864 m_stat.onRotateAfterRightRotation();
1868 // we've fixed balance and height damage for pNode, now handle
1869 // extra-routing node damage
1870 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1871 // we need to remove pNode and then repair
1872 m_stat.onRemoveAfterRightRotation();
1876 // we've already fixed the height at pLeft, do we need a rotation here?
1877 int leftBalance = hLL - hNode;
1878 if ( leftBalance < -1 || leftBalance > 1 ) {
1879 m_stat.onRotateAfterRightRotation();
1883 // pLeft might also have routing node damage (if pLeft.left was null)
1884 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire) ) {
1885 m_stat.onDamageAfterRightRotation();
1889 // try to fix the parent height while we've still got the lock
1890 return fix_height_locked( pParent );
1893 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1895 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1896 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1898 begin_change( pNode, nodeVersion );
1900 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1901 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1902 if ( pRLeft != nullptr )
1903 pRLeft->parent( pNode, memory_model::memory_order_release );
1905 atomics::atomic_thread_fence( memory_model::memory_order_release );
1907 pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
1908 pNode->parent( pRight, memory_model::memory_order_release );
1910 atomics::atomic_thread_fence( memory_model::memory_order_release );
1912 if ( pParentLeft == pNode )
1913 pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
1915 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1916 pParent->m_pRight.store( pRight, memory_model::memory_order_release );
1918 pRight->parent( pParent, memory_model::memory_order_release );
1920 atomics::atomic_thread_fence( memory_model::memory_order_release );
1923 int hNode = 1 + std::max( hL, hRL );
1924 set_height( pNode, hNode, memory_model::memory_order_release );
1925 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
1927 end_change( pNode, nodeVersion );
1928 m_stat.onRotateLeft();
1930 int nodeBalance = hRL - hL;
1931 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1932 m_stat.onRotateAfterLeftRotation();
1936 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
1937 m_stat.onRemoveAfterLeftRotation();
1941 int rightBalance = hRR - hNode;
1942 if ( rightBalance < -1 || rightBalance > 1 ) {
1943 m_stat.onRotateAfterLeftRotation();
1947 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire) ) {
1948 m_stat.onDamageAfterLeftRotation();
1952 return fix_height_locked( pParent );
1955 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1957 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1958 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1960 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1961 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
1962 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
1963 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
1965 begin_change( pNode, nodeVersion );
1966 begin_change( pLeft, leftVersion );
1968 // fix up pNode links, careful about the order!
1969 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
1970 if ( pLRR != nullptr )
1971 pLRR->parent( pNode, memory_model::memory_order_release );
1973 pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
1974 if ( pLRL != nullptr )
1975 pLRL->parent( pLeft, memory_model::memory_order_release );
1977 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
1978 pLeft->parent( pLRight, memory_model::memory_order_release );
1980 pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
1981 pNode->parent( pLRight, memory_model::memory_order_release );
1984 pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
1986 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1987 pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
1989 pLRight->parent( pParent, memory_model::memory_order_release );
1992 int hNode = 1 + std::max( hLRR, hR );
1993 set_height( pNode, hNode, memory_model::memory_order_release );
1994 int hLeft = 1 + std::max( hLL, hLRL );
1995 set_height( pLeft, hLeft, memory_model::memory_order_release );
1996 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
1998 end_change( pNode, nodeVersion );
1999 end_change( pLeft, leftVersion );
2000 m_stat.onRotateRightOverLeft();
2002 // caller should have performed only a single rotation if pLeft was going
2003 // to end up damaged
2004 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2005 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
2007 // We have damaged pParent, pLR (now parent.child), and pNode (now
2008 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2009 // can with the locks we've got.
2011 // We've already fixed the height for pNode, but it might still be outside
2012 // our allowable balance range. In that case a simple fix_height_locked()
2014 int nodeBalance = hLRR - hR;
2015 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2016 // we need another rotation at pNode
2017 m_stat.onRotateAfterRLRotation();
2021 // pNode might also be damaged by being an unnecessary routing node
2022 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2023 // repair involves splicing out pNode and maybe more rotations
2024 m_stat.onRemoveAfterRLRotation();
2028 // we've already fixed the height at pLRight, do we need a rotation here?
2029 int balanceLR = hLeft - hNode;
2030 if ( balanceLR < -1 || balanceLR > 1 ) {
2031 m_stat.onRotateAfterRLRotation();
2035 // try to fix the parent height while we've still got the lock
2036 return fix_height_locked( pParent );
2039 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2041 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2042 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2044 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2045 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2046 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2047 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2049 begin_change( pNode, nodeVersion );
2050 begin_change( pRight, rightVersion );
2052 // fix up pNode links, careful about the order!
2053 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2054 if ( pRLL != nullptr )
2055 pRLL->parent( pNode, memory_model::memory_order_release );
2057 pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
2058 if ( pRLR != nullptr )
2059 pRLR->parent( pRight, memory_model::memory_order_release );
2061 pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
2062 pRight->parent( pRLeft, memory_model::memory_order_release );
2064 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
2065 pNode->parent( pRLeft, memory_model::memory_order_release );
2068 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
2070 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2071 pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
2073 pRLeft->parent( pParent, memory_model::memory_order_release );
2076 int hNode = 1 + std::max( hL, hRLL );
2077 set_height( pNode, hNode, memory_model::memory_order_release );
2078 int hRight = 1 + std::max( hRLR, hRR );
2079 set_height( pRight, hRight, memory_model::memory_order_release );
2080 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2082 end_change( pNode, nodeVersion );
2083 end_change( pRight, rightVersion );
2084 m_stat.onRotateLeftOverRight();
2086 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2088 int nodeBalance = hRLL - hL;
2089 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2090 m_stat.onRotateAfterLRRotation();
2094 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2095 m_stat.onRemoveAfterLRRotation();
2099 int balRL = hRight - hNode;
2100 if ( balRL < -1 || balRL > 1 ) {
2101 m_stat.onRotateAfterLRRotation();
2105 return fix_height_locked( pParent );
2110 }} // namespace cds::container
2112 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H