3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
17 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
19 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
20 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
21 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
22 the disposer functor provided by \p Traits template parameter.
24 <b>Template arguments</b>:
25 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
29 - \p Traits - tree traits, default is \p bronson_avltree::traits
30 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
31 instead of \p Traits template argument.
33 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
34 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 # ifdef CDS_DOXYGEN_INVOKED
41 typename Traits = bronson_avltree::traits
46 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
49 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
50 typedef Key key_type; ///< type of a key stored in the map
51 typedef T * mapped_type; ///< type of value stored in the map
52 typedef Traits traits; ///< Traits template parameter
54 # ifdef CDS_DOXYGEN_INVOKED
55 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
57 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
59 typedef typename traits::item_counter item_counter; ///< Item counting policy
60 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
61 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
62 typedef typename traits::stat stat; ///< internal statistics
63 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
64 typedef typename traits::back_off back_off; ///< Back-off strategy
65 typedef typename traits::disposer disposer; ///< Value disposer
66 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
68 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
69 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
71 /// Group of \p extract_xxx functions does not require external locking
72 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
74 # ifdef CDS_DOXYGEN_INVOKED
75 /// Returned pointer to \p mapped_type of extracted node
76 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
78 typedef cds::urcu::exempt_ptr< gc,
79 typename std::remove_pointer<mapped_type>::type,
80 typename std::remove_pointer<mapped_type>::type,
86 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
90 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
91 typedef typename node_type::version_type version_type;
93 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
94 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
96 enum class find_result
113 result_inserted = allow_insert,
114 result_updated = allow_update,
121 nothing_required = -3,
122 rebalance_required = -2,
131 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
136 template <typename K>
137 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
139 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
142 static void free_node( node_type * pNode )
144 // Free node without disposer
145 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
146 assert( pNode->m_SyncMonitorInjection.check_free());
147 cxx_allocator().Delete( pNode );
150 static void free_value( mapped_type pVal )
155 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
157 return pNode->child( nDir, order );
160 static node_type * parent( node_type * pNode, atomics::memory_order order )
162 return pNode->parent( order );
168 node_type * m_pRetiredList; ///< head of retired node list
169 mapped_type m_pRetiredValue; ///< value retired
173 : m_pRetiredList( nullptr )
174 , m_pRetiredValue( nullptr )
182 void dispose( node_type * pNode )
184 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
185 pNode->m_pNextRemoved = m_pRetiredList;
186 m_pRetiredList = pNode;
189 void dispose_value( mapped_type pVal )
191 assert( m_pRetiredValue == nullptr );
192 m_pRetiredValue = pVal;
196 struct internal_disposer
198 void operator()( node_type * p ) const
206 assert( !gc::is_locked() );
208 // TODO: use RCU::batch_retire
211 for ( node_type * p = m_pRetiredList; p; ) {
212 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
213 // Value already disposed
214 gc::template retire_ptr<internal_disposer>( p );
219 if ( m_pRetiredValue )
220 gc::template retire_ptr<disposer>( m_pRetiredValue );
228 typename node_type::base_class m_Root;
230 item_counter m_ItemCounter;
231 mutable sync_monitor m_Monitor;
236 /// Creates empty map
238 : m_pRoot( static_cast<node_type *>( &m_Root ))
249 The \p key_type should be constructible from a value of type \p K.
251 RCU \p synchronize() can be called. RCU should not be locked.
253 Returns \p true if inserting successful, \p false otherwise.
255 template <typename K>
256 bool insert( K const& key, mapped_type pVal )
258 return do_update(key, key_comparator(),
259 [pVal]( node_type * pNode ) -> mapped_type
261 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
265 update_flags::allow_insert
266 ) == update_flags::result_inserted;
269 /// Updates the value for \p key
271 The operation performs inserting or updating the value for \p key with lock-free manner.
272 If \p bInsert is \p false, only updating of existing node is possible.
274 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
275 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
276 constructible from type \p K.
277 Otherwise, the value for \p key will be changed to \p pVal.
279 RCU \p synchronize() method can be called. RCU should not be locked.
281 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
282 \p second is \p true if new node has been added or \p false if the node with \p key
285 template <typename K>
286 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
288 int result = do_update( key, key_comparator(),
289 [pVal]( node_type * ) -> mapped_type
293 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
295 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
300 /// Delete \p key from the map
302 RCU \p synchronize() method can be called. RCU should not be locked.
304 Return \p true if \p key is found and deleted, \p false otherwise
306 template <typename K>
307 bool erase( K const& key )
312 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
316 /// Deletes the item from the map using \p pred predicate for searching
318 The function is an analog of \p erase(K const&)
319 but \p pred is used for key comparing.
320 \p Less functor has the interface like \p std::less.
321 \p Less must imply the same element order as the comparator used for building the map.
323 template <typename K, typename Less>
324 bool erase_with( K const& key, Less pred )
329 cds::opt::details::make_comparator_from_less<Less>(),
330 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
334 /// Delete \p key from the map
336 The function searches an item with key \p key, calls \p f functor
337 and deletes the item. If \p key is not found, the functor is not called.
339 The functor \p Func interface:
342 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
346 RCU \p synchronize method can be called. RCU should not be locked.
348 Return \p true if key is found and deleted, \p false otherwise
350 template <typename K, typename Func>
351 bool erase( K const& key, Func f )
356 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
359 disp.dispose_value(pVal);
365 /// Deletes the item from the map using \p pred predicate for searching
367 The function is an analog of \p erase(K const&, Func)
368 but \p pred is used for key comparing.
369 \p Less functor has the interface like \p std::less.
370 \p Less must imply the same element order as the comparator used for building the map.
372 template <typename K, typename Less, typename Func>
373 bool erase_with( K const& key, Less pred, Func f )
378 cds::opt::details::make_comparator_from_less<Less>(),
379 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
382 disp.dispose_value(pVal);
388 /// Extracts a value with minimal key from the map
390 Returns \p exempt_ptr to the leftmost item.
391 If the tree is empty, returns empty \p exempt_ptr.
393 Note that the function returns only the value for minimal key.
394 To retrieve its key use \p extract_min( Func ) member function.
396 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
397 It means that the function gets leftmost leaf of the tree and tries to unlink it.
398 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
399 So, the function returns the item with minimum key at the moment of tree traversing.
401 RCU \p synchronize method can be called. RCU should NOT be locked.
402 The function does not free the item.
403 The deallocator will be implicitly invoked when the returned object is destroyed or when
404 its \p release() member function is called.
406 exempt_ptr extract_min()
408 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
411 /// Extracts minimal key key and corresponding value
413 Returns \p exempt_ptr to the leftmost item.
414 If the tree is empty, returns empty \p exempt_ptr.
416 \p Func functor is used to store minimal key.
417 \p Func has the following signature:
420 void operator()( key_type const& key );
423 If the tree is empty, \p f is not called.
424 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
427 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
428 It means that the function gets leftmost leaf of the tree and tries to unlink it.
429 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
430 So, the function returns the item with minimum key at the moment of tree traversing.
432 RCU \p synchronize method can be called. RCU should NOT be locked.
433 The function does not free the item.
434 The deallocator will be implicitly invoked when the returned object is destroyed or when
435 its \p release() member function is called.
437 template <typename Func>
438 exempt_ptr extract_min( Func f )
440 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
443 /// Extracts minimal key key and corresponding value
445 This function is a shortcut for the following call:
448 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
450 \p key_type should be copy-assignable. The copy of minimal key
451 is returned in \p min_key argument.
453 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
454 extract_min_key( key_type& min_key )
456 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
459 /// Extracts a value with maximal key from the tree
461 Returns \p exempt_ptr pointer to the rightmost item.
462 If the set is empty, returns empty \p exempt_ptr.
464 Note that the function returns only the value for maximal key.
465 To retrieve its key use \p extract_max( Func ) member function.
467 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
468 It means that the function gets rightmost leaf of the tree and tries to unlink it.
469 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
470 So, the function returns the item with maximum key at the moment of tree traversing.
472 RCU \p synchronize method can be called. RCU should NOT be locked.
473 The function does not free the item.
474 The deallocator will be implicitly invoked when the returned object is destroyed or when
475 its \p release() is called.
477 exempt_ptr extract_max()
479 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
482 /// Extracts the maximal key and corresponding value
484 Returns \p exempt_ptr pointer to the rightmost item.
485 If the set is empty, returns empty \p exempt_ptr.
487 \p Func functor is used to store maximal key.
488 \p Func has the following signature:
491 void operator()( key_type const& key );
494 If the tree is empty, \p f is not called.
495 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
498 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
499 It means that the function gets rightmost leaf of the tree and tries to unlink it.
500 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
501 So, the function returns the item with maximum key at the moment of tree traversing.
503 RCU \p synchronize method can be called. RCU should NOT be locked.
504 The function does not free the item.
505 The deallocator will be implicitly invoked when the returned object is destroyed or when
506 its \p release() is called.
508 template <typename Func>
509 exempt_ptr extract_max( Func f )
511 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
514 /// Extracts the maximal key and corresponding value
516 This function is a shortcut for the following call:
519 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
521 \p key_type should be copy-assignable. The copy of maximal key
522 is returned in \p max_key argument.
524 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
525 extract_max_key( key_type& max_key )
527 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
530 /// Extracts an item from the map
532 The function searches an item with key equal to \p key in the tree,
533 unlinks it, and returns \p exempt_ptr pointer to a value found.
534 If \p key is not found the function returns an empty \p exempt_ptr.
536 RCU \p synchronize method can be called. RCU should NOT be locked.
537 The function does not destroy the value found.
538 The disposer will be implicitly invoked when the returned object is destroyed or when
539 its \p release() member function is called.
541 template <typename Q>
542 exempt_ptr extract( Q const& key )
544 return exempt_ptr(do_extract( key ));
548 /// Extracts an item from the map using \p pred for searching
550 The function is an analog of \p extract(Q const&)
551 but \p pred is used for key compare.
552 \p Less has the interface like \p std::less.
553 \p pred must imply the same element order as the comparator used for building the tree.
555 template <typename Q, typename Less>
556 exempt_ptr extract_with( Q const& key, Less pred )
558 return exempt_ptr(do_extract_with( key, pred ));
561 /// Find the key \p key
563 The function searches the item with key equal to \p key and calls the functor \p f for item found.
564 The interface of \p Func functor is:
567 void operator()( key_type const& key, mapped_type& item );
570 where \p item is the item found.
571 The functor is called under node-level lock.
573 The function applies RCU lock internally.
575 The function returns \p true if \p key is found, \p false otherwise.
577 template <typename K, typename Func>
578 bool find( K const& key, Func f )
580 return do_find( key, key_comparator(),
581 [&f]( node_type * pNode ) -> bool {
582 assert( pNode != nullptr );
583 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
585 f( pNode->m_key, *pVal );
593 /// Finds the key \p val using \p pred predicate for searching
595 The function is an analog of \p find(K const&, Func)
596 but \p pred is used for key comparing.
597 \p Less functor has the interface like \p std::less.
598 \p Less must imply the same element order as the comparator used for building the map.
600 template <typename K, typename Less, typename Func>
601 bool find_with( K const& key, Less pred, Func f )
604 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
605 [&f]( node_type * pNode ) -> bool {
606 assert( pNode != nullptr );
607 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
609 f( pNode->m_key, *pVal );
617 /// Checks whether the map contains \p key
619 The function searches the item with key equal to \p key
620 and returns \p true if it is found, and \p false otherwise.
622 The function applies RCU lock internally.
624 template <typename K>
625 bool contains( K const& key )
627 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
630 /// Checks whether the map contains \p key using \p pred predicate for searching
632 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
633 \p Less functor has the interface like \p std::less.
634 \p Less must imply the same element order as the comparator used for building the set.
636 template <typename K, typename Less>
637 bool contains( K const& key, Less pred )
640 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
643 /// Clears the tree (thread safe, not atomic)
645 The function unlink all items from the tree.
646 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
650 assert( set.empty() );
652 the assertion could be raised.
654 For each node the \ref disposer will be called after unlinking.
656 RCU \p synchronize method can be called. RCU should not be locked.
660 while ( extract_min() );
663 /// Clears the tree (not thread safe)
665 This function is not thread safe and may be called only when no other thread deals with the tree.
666 The function is used in the tree destructor.
670 clear(); // temp solution
674 /// Checks if the map is empty
677 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
680 /// Returns item count in the map
682 Only leaf nodes containing user data are counted.
684 The value returned depends on item counter type provided by \p Traits template parameter.
685 If it is \p atomicity::empty_item_counter this function always returns 0.
687 The function is not suitable for checking the tree emptiness, use \p empty()
688 member function for this purpose.
692 return m_ItemCounter;
695 /// Returns const reference to internal statistics
696 stat const& statistics() const
701 /// Returns reference to \p sync_monitor object
702 sync_monitor& monitor()
707 sync_monitor const& monitor() const
713 /// Checks internal consistency (not atomic, not thread-safe)
715 The debugging function to check internal consistency of the tree.
717 bool check_consistency() const
719 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
722 /// Checks internal consistency (not atomic, not thread-safe)
724 The debugging function to check internal consistency of the tree.
725 The functor \p Func is called if a violation of internal tree structure
729 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
733 - \p nLevel - the level where the violation is found
734 - \p hLeft - the height of left subtree
735 - \p hRight - the height of right subtree
737 The functor is called for each violation found.
739 template <typename Func>
740 bool check_consistency( Func f ) const
742 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
745 do_check_consistency( pChild, 1, f, nErrors );
753 template <typename Func>
754 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
758 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
759 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
760 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
762 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
765 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
766 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
768 if ( hLeft >= hRight ) {
769 if ( hLeft - hRight > 1 ) {
770 f( nLevel, hLeft, hRight );
776 if ( hRight - hLeft > 1 ) {
777 f( nLevel, hLeft, hRight );
786 template <typename Q, typename Compare, typename Func>
787 bool do_find( Q& key, Compare cmp, Func f ) const
792 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
794 assert( result != find_result::retry );
795 return result == find_result::found;
798 template <typename K, typename Compare, typename Func>
799 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
801 check_deadlock_policy::check();
803 rcu_disposer removed_list;
806 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
810 template <typename K, typename Compare, typename Func>
811 bool do_remove( K const& key, Compare cmp, Func func )
813 // Func must return true if the value was disposed
814 // or false if the value was extracted
816 check_deadlock_policy::check();
818 rcu_disposer removed_list;
821 return try_remove_root( key, cmp, func, removed_list );
825 template <typename Func>
826 mapped_type do_extract_min( Func f )
828 mapped_type pExtracted = nullptr;
831 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
836 template <typename Func>
837 mapped_type do_extract_max( Func f )
839 mapped_type pExtracted = nullptr;
842 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
847 template <typename Func>
848 void do_extract_minmax( int nDir, Func func )
850 check_deadlock_policy::check();
852 rcu_disposer removed_list;
859 // get right child of root
860 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
862 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
863 if ( nChildVersion & node_type::shrinking ) {
864 m_stat.onRemoveRootWaitShrinking();
865 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
866 result = update_flags::retry;
868 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
869 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
872 result = update_flags::retry;
877 if ( result == update_flags::retry )
878 m_stat.onRemoveRetry();
880 m_stat.onExtract( result == update_flags::result_removed );
887 template <typename Q>
888 mapped_type do_extract( Q const& key )
890 mapped_type pExtracted = nullptr;
894 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
896 m_stat.onExtract( pExtracted != nullptr );
900 template <typename Q, typename Less>
901 mapped_type do_extract_with( Q const& key, Less pred )
904 mapped_type pExtracted = nullptr;
907 cds::opt::details::make_comparator_from_less<Less>(),
908 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
910 m_stat.onExtract( pExtracted != nullptr );
917 static int height( node_type * pNode, atomics::memory_order order )
920 return pNode->m_nHeight.load( order );
922 static void set_height( node_type * pNode, int h, atomics::memory_order order )
925 pNode->m_nHeight.store( h, order );
927 static int height_null( node_type * pNode, atomics::memory_order order )
929 return pNode ? height( pNode, order ) : 0;
932 static CDS_CONSTEXPR int const c_stackSize = 64;
934 template <typename Q, typename Compare, typename Func>
935 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
937 assert( gc::is_locked() );
943 version_type nVersion;
947 stack_record stack[c_stackSize];
949 stack[0].pNode = pNode;
950 stack[0].nVersion = nVersion;
951 stack[0].nDir = nDir;
954 pNode = stack[pos].pNode;
955 nVersion = stack[pos].nVersion;
956 nDir = stack[pos].nDir;
959 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
961 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
963 m_stat.onFindRetry();
966 m_stat.onFindFailed();
967 return find_result::not_found;
970 int nCmp = cmp( key, pChild->m_key );
972 if ( pChild->is_valued( memory_model::memory_order_acquire ) ) {
974 node_scoped_lock l( m_Monitor, *pChild );
975 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
976 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
978 m_stat.onFindSuccess();
979 return find_result::found;
984 m_stat.onFindRetry();
988 m_stat.onFindFailed();
989 return find_result::not_found;
992 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
993 if ( nChildVersion & node_type::shrinking ) {
994 m_stat.onFindWaitShrinking();
995 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
997 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
999 m_stat.onFindRetry();
1003 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1005 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1007 m_stat.onFindRetry();
1012 assert(pos < c_stackSize);
1013 stack[pos].pNode = pChild;
1014 stack[pos].nVersion = nChildVersion;
1015 stack[pos].nDir = nCmp;
1016 break; // child iteration
1019 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1021 m_stat.onFindRetry();
1025 m_stat.onFindRetry();
1028 return find_result::retry;
1031 template <typename K, typename Compare, typename Func>
1032 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1034 assert( gc::is_locked() );
1039 // get right child of root
1040 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1042 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1043 if ( nChildVersion & node_type::shrinking ) {
1044 m_stat.onUpdateRootWaitShrinking();
1045 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1046 result = update_flags::retry;
1048 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1049 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1051 result = update_flags::retry;
1054 // the tree is empty
1055 if ( nFlags & update_flags::allow_insert ) {
1056 // insert into tree as right child of the root
1058 node_scoped_lock l( m_Monitor, *m_pRoot );
1059 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1060 result = update_flags::retry;
1064 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1065 mapped_type pVal = funcUpdate( pNew );
1066 assert( pVal != nullptr );
1067 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1069 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1070 set_height( m_pRoot, 2, memory_model::memory_order_release );
1074 m_stat.onInsertSuccess();
1075 return update_flags::result_inserted;
1078 return update_flags::failed;
1081 if ( result != update_flags::retry )
1086 template <typename K, typename Compare, typename Func>
1087 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1089 assert( gc::is_locked() );
1094 // get right child of root
1095 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1097 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1098 if ( nChildVersion & node_type::shrinking ) {
1099 m_stat.onRemoveRootWaitShrinking();
1100 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1101 result = update_flags::retry;
1103 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1104 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1107 result = update_flags::retry;
1112 if ( result == update_flags::retry )
1113 m_stat.onRemoveRetry();
1115 m_stat.onRemove( result == update_flags::result_removed );
1116 return result == update_flags::result_removed;
1121 template <typename K, typename Compare, typename Func>
1122 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1124 assert( gc::is_locked() );
1125 assert( nVersion != node_type::unlinked );
1130 version_type nVersion;
1133 stack_record stack[c_stackSize];
1135 stack[0].pNode = pNode;
1136 stack[0].nVersion = nVersion;
1138 while ( pos >= 0 ) {
1139 pNode = stack[pos].pNode;
1140 nVersion = stack[pos].nVersion;
1142 int nCmp = cmp( key, pNode->m_key );
1144 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1145 if ( result != update_flags::retry )
1148 m_stat.onUpdateRetry();
1153 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1154 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1156 m_stat.onUpdateRetry();
1160 if ( pChild == nullptr ) {
1162 if ( nFlags & update_flags::allow_insert ) {
1163 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1164 if ( result != update_flags::retry )
1167 m_stat.onUpdateRetry();
1171 return update_flags::failed;
1175 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1176 if ( nChildVersion & node_type::shrinking ) {
1177 m_stat.onUpdateWaitShrinking();
1178 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1181 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1182 // this second read is important, because it is protected by nChildVersion
1184 // validate the read that our caller took to get to node
1185 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1187 m_stat.onUpdateRetry();
1190 // At this point we know that the traversal our parent took to get to node is still valid.
1191 // The recursive implementation will validate the traversal from node to
1192 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1193 // This means that we are no longer vulnerable to node shrinks, and we don't need
1194 // to validate node version any more.
1196 assert( pos < c_stackSize );
1197 stack[pos].pNode = pChild;
1198 stack[pos].nVersion = nChildVersion;
1199 assert( nChildVersion != node_type::unlinked );
1200 break; // child iteration
1202 m_stat.onUpdateRetry();
1206 return update_flags::retry;
1209 template <typename K, typename Compare, typename Func>
1210 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1212 assert( gc::is_locked() );
1213 assert( nVersion != node_type::unlinked );
1217 node_type * pParent;
1219 version_type nVersion;
1222 stack_record stack[c_stackSize];
1224 stack[0].pParent = pParent;
1225 stack[0].pNode = pNode;
1226 stack[0].nVersion = nVersion;
1228 while ( pos >= 0 ) {
1229 pParent = stack[pos].pParent;
1230 pNode = stack[pos].pNode;
1231 nVersion = stack[pos].nVersion;
1233 int nCmp = cmp( key, pNode->m_key );
1235 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1236 if ( result != update_flags::retry )
1239 m_stat.onRemoveRetry();
1244 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1245 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1247 m_stat.onRemoveRetry();
1251 if ( pChild == nullptr )
1252 return update_flags::failed;
1255 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1256 if ( nChildVersion & node_type::shrinking ) {
1257 m_stat.onRemoveWaitShrinking();
1258 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1261 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1262 // this second read is important, because it is protected by nChildVersion
1264 // validate the read that our caller took to get to node
1265 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1267 m_stat.onRemoveRetry();
1271 // At this point we know that the traversal our parent took to get to node is still valid.
1272 // The recursive implementation will validate the traversal from node to
1273 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1274 // This means that we are no longer vulnerable to node shrinks, and we don't need
1275 // to validate node version any more.
1277 assert( pos < c_stackSize );
1278 stack[pos].pParent = pNode;
1279 stack[pos].pNode = pChild;
1280 stack[pos].nVersion = nChildVersion;
1281 break; // child iteration
1283 m_stat.onRemoveRetry();
1286 return update_flags::retry;
1289 template <typename Func>
1290 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1292 assert( gc::is_locked() );
1293 assert( nVersion != node_type::unlinked );
1297 node_type * pParent;
1299 version_type nVersion;
1302 stack_record stack[c_stackSize];
1304 stack[0].pParent = pParent;
1305 stack[0].pNode = pNode;
1306 stack[0].nVersion = nVersion;
1308 while ( pos >= 0 ) {
1309 pParent = stack[pos].pParent;
1310 pNode = stack[pos].pNode;
1311 nVersion = stack[pos].nVersion;
1314 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
1315 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1317 m_stat.onRemoveRetry();
1321 if ( pChild == nullptr ) {
1323 assert(pNode->is_valued( memory_model::memory_order_acquire ));
1324 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1325 if ( result != update_flags::retry )
1328 m_stat.onRemoveRetry();
1332 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1333 if ( nChildVersion & node_type::shrinking ) {
1334 m_stat.onRemoveWaitShrinking();
1335 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1338 else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
1339 // this second read is important, because it is protected by nChildVersion
1341 // validate the read that our caller took to get to node
1342 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1344 m_stat.onRemoveRetry();
1348 // At this point we know that the traversal our parent took to get to node is still valid.
1349 // The recursive implementation will validate the traversal from node to
1350 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1351 // This means that we are no longer vulnerable to node shrinks, and we don't need
1352 // to validate node version any more.
1354 assert( pos < c_stackSize );
1355 stack[pos].pParent = pNode;
1356 stack[pos].pNode = pChild;
1357 stack[pos].nVersion = nChildVersion;
1358 break; // child iteration
1360 m_stat.onRemoveRetry();
1363 return update_flags::retry;
1366 template <typename K, typename Func>
1367 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1371 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1372 mapped_type pVal = funcUpdate( pNew );
1373 assert( pVal != nullptr );
1374 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1377 if ( c_bRelaxedInsert ) {
1378 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1379 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1381 m_stat.onInsertRetry();
1382 return update_flags::retry;
1385 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1388 node_type * pDamaged;
1390 assert( pNode != nullptr );
1391 node_scoped_lock l( m_Monitor, *pNode );
1393 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1394 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1396 if ( c_bRelaxedInsert ) {
1397 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1398 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1401 m_stat.onRelaxedInsertFailed();
1404 m_stat.onInsertRetry();
1405 return update_flags::retry;
1408 if ( !c_bRelaxedInsert )
1409 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1411 pNode->child( pNew, nDir, memory_model::memory_order_release );
1412 pDamaged = fix_height_locked( pNode );
1416 m_stat.onInsertSuccess();
1419 fix_height_and_rebalance( pDamaged, disp );
1420 m_stat.onInsertRebalanceRequired();
1423 return update_flags::result_inserted;
1426 template <typename Func>
1427 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1431 assert( pNode != nullptr );
1433 node_scoped_lock l( m_Monitor, *pNode );
1435 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1436 return update_flags::retry;
1438 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1439 m_stat.onUpdateUnlinked();
1440 return update_flags::retry;
1443 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
1444 m_stat.onInsertFailed();
1445 return update_flags::failed;
1448 pOld = pNode->value( memory_model::memory_order_relaxed );
1449 bInserted = pOld == nullptr;
1450 mapped_type pVal = funcUpdate( pNode );
1454 assert( pVal != nullptr );
1455 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1460 disp.dispose_value(pOld);
1461 m_stat.onDisposeValue();
1466 m_stat.onInsertSuccess();
1467 return update_flags::result_inserted;
1470 m_stat.onUpdateSuccess();
1471 return update_flags::result_updated;
1474 template <typename Func>
1475 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1477 assert( pParent != nullptr );
1478 assert( pNode != nullptr );
1480 if ( !pNode->is_valued( memory_model::memory_order_acquire ) )
1481 return update_flags::failed;
1483 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1484 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1486 // pNode can be replaced with its child
1488 node_type * pDamaged;
1491 node_scoped_lock lp( m_Monitor, *pParent );
1492 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1493 return update_flags::retry;
1496 node_scoped_lock ln( m_Monitor, *pNode );
1497 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1498 return update_flags::retry;
1500 pOld = pNode->value( memory_model::memory_order_relaxed );
1502 return update_flags::failed;
1504 if ( !try_unlink_locked( pParent, pNode, disp ))
1505 return update_flags::retry;
1507 pDamaged = fix_height_locked( pParent );
1511 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1512 m_stat.onDisposeValue();
1514 m_stat.onExtractValue();
1517 fix_height_and_rebalance( pDamaged, disp );
1518 m_stat.onRemoveRebalanceRequired();
1522 // pNode is an internal with two children
1526 node_scoped_lock ln( m_Monitor, *pNode );
1527 pOld = pNode->value( memory_model::memory_order_relaxed );
1528 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1529 return update_flags::retry;
1531 return update_flags::failed;
1533 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1534 m_stat.onMakeRoutingNode();
1538 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1539 m_stat.onDisposeValue();
1541 m_stat.onExtractValue();
1543 return update_flags::result_removed;
1546 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1548 // pParent and pNode must be locked
1549 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1551 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1552 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1553 if ( pNode != pParentLeft && pNode != pParentRight ) {
1554 // node is no longer a child of parent
1558 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1559 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1561 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1562 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1563 if ( pLeft != nullptr && pRight != nullptr ) {
1564 // splicing is no longer possible
1567 node_type * pSplice = pLeft ? pLeft : pRight;
1569 if ( pParentLeft == pNode )
1570 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1572 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1575 pSplice->parent( pParent, memory_model::memory_order_release );
1577 // Mark the node as unlinked
1578 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1580 // The value will be disposed by calling function
1581 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1583 disp.dispose( pNode );
1584 m_stat.onDisposeNode();
1591 private: // rotations
1593 int estimate_node_condition( node_type * pNode )
1595 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1596 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1598 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1599 return unlink_required;
1601 int h = height( pNode, memory_model::memory_order_acquire );
1602 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1603 int hR = height_null( pRight, memory_model::memory_order_acquire );
1605 int hNew = 1 + std::max( hL, hR );
1606 int nBalance = hL - hR;
1608 if ( nBalance < -1 || nBalance > 1 )
1609 return rebalance_required;
1611 return h != hNew ? hNew : nothing_required;
1614 node_type * fix_height( node_type * pNode )
1616 assert( pNode != nullptr );
1617 node_scoped_lock l( m_Monitor, *pNode );
1618 return fix_height_locked( pNode );
1621 node_type * fix_height_locked( node_type * pNode )
1623 // pNode must be locked!!!
1624 int h = estimate_node_condition( pNode );
1626 case rebalance_required:
1627 case unlink_required:
1629 case nothing_required:
1632 set_height( pNode, h, memory_model::memory_order_release );
1633 return parent( pNode, memory_model::memory_order_relaxed );
1637 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1639 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1640 int nCond = estimate_node_condition( pNode );
1641 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ) )
1644 if ( nCond != unlink_required && nCond != rebalance_required )
1645 pNode = fix_height( pNode );
1647 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1648 assert( pParent != nullptr );
1650 node_scoped_lock lp( m_Monitor, *pParent );
1651 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1652 node_scoped_lock ln( m_Monitor, *pNode );
1653 pNode = rebalance_locked( pParent, pNode, disp );
1660 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1662 // pParent and pNode should be locked.
1663 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1664 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1666 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1667 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1669 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1670 if ( try_unlink_locked( pParent, pNode, disp ))
1671 return fix_height_locked( pParent );
1673 // retry needed for pNode
1678 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1679 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1681 int h = height( pNode, memory_model::memory_order_acquire );
1682 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1683 int hR = height_null( pRight, memory_model::memory_order_acquire );
1684 int hNew = 1 + std::max( hL, hR );
1685 int balance = hL - hR;
1688 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1689 else if ( balance < -1 )
1690 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1691 else if ( hNew != h ) {
1692 set_height( pNode, hNew, memory_model::memory_order_release );
1694 // pParent is already locked
1695 return fix_height_locked( pParent );
1701 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1703 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1704 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1705 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1707 // pParent and pNode is locked yet
1708 // pNode->pLeft is too large, we will rotate-right.
1709 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1712 assert( pLeft != nullptr );
1713 node_scoped_lock l( m_Monitor, *pLeft );
1714 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1715 return pNode; // retry for pNode
1717 int hL = height( pLeft, memory_model::memory_order_acquire );
1719 return pNode; // retry
1721 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1722 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1723 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1724 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1728 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1731 assert( pLRight != nullptr );
1733 node_scoped_lock lr( m_Monitor, *pLRight );
1734 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1735 return pNode; // retry
1737 hLR = height( pLRight, memory_model::memory_order_acquire );
1739 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1741 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
1742 int balance = hLL - hLRL;
1743 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1744 // nParent.child.left won't be damaged after a double rotation
1745 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1749 // focus on pLeft, if necessary pNode will be balanced later
1750 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1755 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1757 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1758 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1759 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1761 // pParent and pNode is locked yet
1763 assert( pRight != nullptr );
1764 node_scoped_lock l( m_Monitor, *pRight );
1765 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1766 return pNode; // retry for pNode
1768 int hR = height( pRight, memory_model::memory_order_acquire );
1769 if ( hL - hR >= -1 )
1770 return pNode; // retry
1772 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1773 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1774 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1775 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1777 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1780 assert( pRLeft != nullptr );
1781 node_scoped_lock lrl( m_Monitor, *pRLeft );
1782 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1783 return pNode; // retry
1785 hRL = height( pRLeft, memory_model::memory_order_acquire );
1787 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1789 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1790 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1791 int balance = hRR - hRLR;
1792 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1793 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1795 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1799 static void begin_change( node_type * pNode, version_type version )
1801 assert(pNode->version(memory_model::memory_order_acquire) == version );
1802 assert( (version & node_type::shrinking) == 0 );
1803 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1805 static void end_change( node_type * pNode, version_type version )
1807 // Clear shrinking and unlinked flags and increment version
1808 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1811 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1813 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1814 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1816 begin_change( pNode, nodeVersion );
1818 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1819 if ( pLRight != nullptr )
1820 pLRight->parent( pNode, memory_model::memory_order_release );
1822 atomics::atomic_thread_fence( memory_model::memory_order_release );
1824 pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
1825 pNode->parent( pLeft, memory_model::memory_order_release );
1827 atomics::atomic_thread_fence( memory_model::memory_order_release );
1829 if ( pParentLeft == pNode )
1830 pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
1832 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1833 pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
1835 pLeft->parent( pParent, memory_model::memory_order_release );
1837 atomics::atomic_thread_fence( memory_model::memory_order_release );
1839 // fix up heights links
1840 int hNode = 1 + std::max( hLR, hR );
1841 set_height( pNode, hNode, memory_model::memory_order_release );
1842 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1844 end_change( pNode, nodeVersion );
1845 m_stat.onRotateRight();
1847 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1848 // parent.child). pNode is the deepest. Perform as many fixes as we can
1849 // with the locks we've got.
1851 // We've already fixed the height for pNode, but it might still be outside
1852 // our allowable balance range. In that case a simple fix_height_locked()
1854 int nodeBalance = hLR - hR;
1855 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1856 // we need another rotation at pNode
1857 m_stat.onRotateAfterRightRotation();
1861 // we've fixed balance and height damage for pNode, now handle
1862 // extra-routing node damage
1863 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1864 // we need to remove pNode and then repair
1865 m_stat.onRemoveAfterRightRotation();
1869 // we've already fixed the height at pLeft, do we need a rotation here?
1870 int leftBalance = hLL - hNode;
1871 if ( leftBalance < -1 || leftBalance > 1 ) {
1872 m_stat.onRotateAfterRightRotation();
1876 // pLeft might also have routing node damage (if pLeft.left was null)
1877 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire) ) {
1878 m_stat.onDamageAfterRightRotation();
1882 // try to fix the parent height while we've still got the lock
1883 return fix_height_locked( pParent );
1886 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1888 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1889 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1891 begin_change( pNode, nodeVersion );
1893 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1894 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1895 if ( pRLeft != nullptr )
1896 pRLeft->parent( pNode, memory_model::memory_order_release );
1898 atomics::atomic_thread_fence( memory_model::memory_order_release );
1900 pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
1901 pNode->parent( pRight, memory_model::memory_order_release );
1903 atomics::atomic_thread_fence( memory_model::memory_order_release );
1905 if ( pParentLeft == pNode )
1906 pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
1908 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1909 pParent->m_pRight.store( pRight, memory_model::memory_order_release );
1911 pRight->parent( pParent, memory_model::memory_order_release );
1913 atomics::atomic_thread_fence( memory_model::memory_order_release );
1916 int hNode = 1 + std::max( hL, hRL );
1917 set_height( pNode, hNode, memory_model::memory_order_release );
1918 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
1920 end_change( pNode, nodeVersion );
1921 m_stat.onRotateLeft();
1923 int nodeBalance = hRL - hL;
1924 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1925 m_stat.onRotateAfterLeftRotation();
1929 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
1930 m_stat.onRemoveAfterLeftRotation();
1934 int rightBalance = hRR - hNode;
1935 if ( rightBalance < -1 || rightBalance > 1 ) {
1936 m_stat.onRotateAfterLeftRotation();
1940 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire) ) {
1941 m_stat.onDamageAfterLeftRotation();
1945 return fix_height_locked( pParent );
1948 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1950 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1951 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1953 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1954 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
1955 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
1956 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
1958 begin_change( pNode, nodeVersion );
1959 begin_change( pLeft, leftVersion );
1961 // fix up pNode links, careful about the order!
1962 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
1963 if ( pLRR != nullptr )
1964 pLRR->parent( pNode, memory_model::memory_order_release );
1966 pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
1967 if ( pLRL != nullptr )
1968 pLRL->parent( pLeft, memory_model::memory_order_release );
1970 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
1971 pLeft->parent( pLRight, memory_model::memory_order_release );
1973 pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
1974 pNode->parent( pLRight, memory_model::memory_order_release );
1977 pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
1979 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1980 pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
1982 pLRight->parent( pParent, memory_model::memory_order_release );
1985 int hNode = 1 + std::max( hLRR, hR );
1986 set_height( pNode, hNode, memory_model::memory_order_release );
1987 int hLeft = 1 + std::max( hLL, hLRL );
1988 set_height( pLeft, hLeft, memory_model::memory_order_release );
1989 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
1991 end_change( pNode, nodeVersion );
1992 end_change( pLeft, leftVersion );
1993 m_stat.onRotateRightOverLeft();
1995 // caller should have performed only a single rotation if pLeft was going
1996 // to end up damaged
1997 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1998 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
2000 // We have damaged pParent, pLR (now parent.child), and pNode (now
2001 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2002 // can with the locks we've got.
2004 // We've already fixed the height for pNode, but it might still be outside
2005 // our allowable balance range. In that case a simple fix_height_locked()
2007 int nodeBalance = hLRR - hR;
2008 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2009 // we need another rotation at pNode
2010 m_stat.onRotateAfterRLRotation();
2014 // pNode might also be damaged by being an unnecessary routing node
2015 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2016 // repair involves splicing out pNode and maybe more rotations
2017 m_stat.onRemoveAfterRLRotation();
2021 // we've already fixed the height at pLRight, do we need a rotation here?
2022 int balanceLR = hLeft - hNode;
2023 if ( balanceLR < -1 || balanceLR > 1 ) {
2024 m_stat.onRotateAfterRLRotation();
2028 // try to fix the parent height while we've still got the lock
2029 return fix_height_locked( pParent );
2032 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2034 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2035 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2037 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2038 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2039 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2040 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2042 begin_change( pNode, nodeVersion );
2043 begin_change( pRight, rightVersion );
2045 // fix up pNode links, careful about the order!
2046 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2047 if ( pRLL != nullptr )
2048 pRLL->parent( pNode, memory_model::memory_order_release );
2050 pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
2051 if ( pRLR != nullptr )
2052 pRLR->parent( pRight, memory_model::memory_order_release );
2054 pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
2055 pRight->parent( pRLeft, memory_model::memory_order_release );
2057 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
2058 pNode->parent( pRLeft, memory_model::memory_order_release );
2061 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
2063 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2064 pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
2066 pRLeft->parent( pParent, memory_model::memory_order_release );
2069 int hNode = 1 + std::max( hL, hRLL );
2070 set_height( pNode, hNode, memory_model::memory_order_release );
2071 int hRight = 1 + std::max( hRLR, hRR );
2072 set_height( pRight, hRight, memory_model::memory_order_release );
2073 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2075 end_change( pNode, nodeVersion );
2076 end_change( pRight, rightVersion );
2077 m_stat.onRotateLeftOverRight();
2079 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2081 int nodeBalance = hRLL - hL;
2082 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2083 m_stat.onRotateAfterLRRotation();
2087 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2088 m_stat.onRemoveAfterLRRotation();
2092 int balRL = hRight - hNode;
2093 if ( balRL < -1 || balRL > 1 ) {
2094 m_stat.onRotateAfterLRRotation();
2098 return fix_height_locked( pParent );
2103 }} // namespace cds::container
2105 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H