3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <type_traits> // is_base_of
7 #include <cds/container/details/bronson_avltree_base.h>
8 #include <cds/urcu/details/check_deadlock.h>
9 #include <cds/urcu/exempt_ptr.h>
11 namespace cds { namespace container {
13 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
14 /** @ingroup cds_nonintrusive_map
15 @ingroup cds_nonintrusive_tree
16 @headerfile cds/container/bronson_avltree_map_rcu.h
18 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
19 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
20 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
21 the disposer functor provided by \p Traits template parameter.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
70 # ifdef CDS_DOXYGEN_INVOKED
71 /// Returned pointer to \p mapped_type of extracted node
72 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
74 typedef cds::urcu::exempt_ptr< gc,
75 typename std::remove_pointer<mapped_type>::type,
76 typename std::remove_pointer<mapped_type>::type,
82 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
86 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
87 typedef typename node_type::version_type version_type;
89 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
90 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
92 enum class find_result
109 result_inserted = allow_insert,
110 result_updated = allow_update,
117 nothing_required = -3,
118 rebalance_required = -2,
127 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
132 template <typename K>
133 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
135 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
138 static void free_node( node_type * pNode )
140 // Free node without disposer
141 cxx_allocator().Delete( pNode );
144 static void free_value( mapped_type pVal )
149 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
151 return pNode->child( nDir ).load( order );
154 static node_type * parent( node_type * pNode, atomics::memory_order order )
156 return pNode->m_pParent.load( order );
162 node_type * m_pRetiredList; ///< head of retired node list
163 mapped_type m_pRetiredValue; ///< value retired
167 : m_pRetiredList( nullptr )
168 , m_pRetiredValue( nullptr )
176 void dispose( node_type * pNode )
178 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
179 pNode->m_pNextRemoved = m_pRetiredList;
180 m_pRetiredList = pNode;
183 void dispose_value( mapped_type pVal )
185 assert( m_pRetiredValue == nullptr );
186 m_pRetiredValue = pVal;
190 struct internal_disposer
192 void operator()( node_type * p ) const
200 assert( !gc::is_locked() );
202 // TODO: use RCU::batch_retire
205 for ( node_type * p = m_pRetiredList; p; ) {
206 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
207 // Value already disposed
208 gc::template retire_ptr<internal_disposer>( p );
213 if ( m_pRetiredValue )
214 gc::template retire_ptr<disposer>( m_pRetiredValue );
222 typename node_type::base_class m_Root;
224 item_counter m_ItemCounter;
225 mutable sync_monitor m_Monitor;
230 /// Creates empty map
232 : m_pRoot( static_cast<node_type *>( &m_Root ))
243 The \p key_type should be constructible from a value of type \p K.
245 RCU \p synchronize() can be called. RCU should not be locked.
247 Returns \p true if inserting successful, \p false otherwise.
249 template <typename K>
250 bool insert( K const& key, mapped_type pVal )
252 return do_update(key, key_comparator(),
253 [pVal]( node_type * pNode ) -> mapped_type
255 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
258 update_flags::allow_insert
259 ) == update_flags::result_inserted;
262 /// Updates the value for \p key
264 The operation performs inserting or updating the value for \p key with lock-free manner.
265 If \p bInsert is \p false, only updating of existing node is possible.
267 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
268 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
269 constructible from type \p K.
270 Otherwise, the value is changed to \p pVal.
272 RCU \p synchronize() method can be called. RCU should not be locked.
274 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
275 \p second is \p true if new node has been added or \p false if the node with \p key
276 already is in the tree.
278 template <typename K, typename Func>
279 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
281 int result = do_update( key, key_comparator(),
282 [pVal]( node_type * ) -> mapped_type
286 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
288 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
291 /// Delete \p key from the map
293 RCU \p synchronize() method can be called. RCU should not be locked.
295 Return \p true if \p key is found and deleted, \p false otherwise
297 template <typename K>
298 bool erase( K const& key )
303 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
307 /// Deletes the item from the map using \p pred predicate for searching
309 The function is an analog of \p erase(K const&)
310 but \p pred is used for key comparing.
311 \p Less functor has the interface like \p std::less.
312 \p Less must imply the same element order as the comparator used for building the map.
314 template <typename K, typename Less>
315 bool erase_with( K const& key, Less pred )
320 cds::opt::details::make_comparator_from_less<Less>(),
321 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
325 /// Delete \p key from the map
327 The function searches an item with key \p key, calls \p f functor
328 and deletes the item. If \p key is not found, the functor is not called.
330 The functor \p Func interface:
333 void operator()(mapped_type& item) { ... }
337 RCU \p synchronize method can be called. RCU should not be locked.
339 Return \p true if key is found and deleted, \p false otherwise
341 template <typename K, typename Func>
342 bool erase( K const& key, Func f )
347 [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
350 disp.dispose_value(pVal);
356 /// Deletes the item from the map using \p pred predicate for searching
358 The function is an analog of \p erase(K const&, Func)
359 but \p pred is used for key comparing.
360 \p Less functor has the interface like \p std::less.
361 \p Less must imply the same element order as the comparator used for building the map.
363 template <typename K, typename Less, typename Func>
364 bool erase_with( K const& key, Less pred, Func f )
369 cds::opt::details::make_comparator_from_less<Less>(),
370 [&f]( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool {
373 disp.dispose_value(pVal);
379 /// Extracts a value with minimal key from the map
381 Returns \p exempt_ptr to the leftmost item.
382 If the tree is empty, returns empty \p exempt_ptr.
384 Note that the function returns only the value for minimal key.
385 To retrieve its key use \p extract_min( Func ) member function.
387 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
388 It means that the function gets leftmost leaf of the tree and tries to unlink it.
389 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
390 So, the function returns the item with minimum key at the moment of tree traversing.
392 RCU \p synchronize method can be called. RCU should NOT be locked.
393 The function does not free the item.
394 The deallocator will be implicitly invoked when the returned object is destroyed or when
395 its \p release() member function is called.
397 exempt_ptr extract_min()
399 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
402 /// Extracts minimal key key and corresponding value
404 Returns \p exempt_ptr to the leftmost item.
405 If the tree is empty, returns empty \p exempt_ptr.
407 \p Func functor is used to store minimal key.
408 \p Func has the following signature:
411 void operator()( key_type const& key );
414 If the tree is empty, \p f is not called.
415 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
418 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
419 It means that the function gets leftmost leaf of the tree and tries to unlink it.
420 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
421 So, the function returns the item with minimum key at the moment of tree traversing.
423 RCU \p synchronize method can be called. RCU should NOT be locked.
424 The function does not free the item.
425 The deallocator will be implicitly invoked when the returned object is destroyed or when
426 its \p release() member function is called.
428 template <typename Func>
429 exempt_ptr extract_min( Func f )
431 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
434 /// Extracts minimal key key and corresponding value
436 This function is a shortcut for the following call:
439 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
441 \p key_type should be copy-assignable. The copy of minimal key
442 is returned in \p min_key argument.
444 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
445 extract_min_key( key_type& min_key )
447 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
450 /// Extracts a value with maximal key from the tree
452 Returns \p exempt_ptr pointer to the rightmost item.
453 If the set is empty, returns empty \p exempt_ptr.
455 Note that the function returns only the value for maximal key.
456 To retrieve its key use \p extract_max( Func ) member function.
458 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
459 It means that the function gets rightmost leaf of the tree and tries to unlink it.
460 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
461 So, the function returns the item with maximum key at the moment of tree traversing.
463 RCU \p synchronize method can be called. RCU should NOT be locked.
464 The function does not free the item.
465 The deallocator will be implicitly invoked when the returned object is destroyed or when
466 its \p release() is called.
468 exempt_ptr extract_max()
470 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
473 /// Extracts the maximal key and corresponding value
475 Returns \p exempt_ptr pointer to the rightmost item.
476 If the set is empty, returns empty \p exempt_ptr.
478 \p Func functor is used to store maximal key.
479 \p Func has the following signature:
482 void operator()( key_type const& key );
485 If the tree is empty, \p f is not called.
486 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
489 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
490 It means that the function gets rightmost leaf of the tree and tries to unlink it.
491 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
492 So, the function returns the item with maximum key at the moment of tree traversing.
494 RCU \p synchronize method can be called. RCU should NOT be locked.
495 The function does not free the item.
496 The deallocator will be implicitly invoked when the returned object is destroyed or when
497 its \p release() is called.
499 template <typename Func>
500 exempt_ptr extract_max( Func f )
502 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
505 /// Extracts the maximal key and corresponding value
507 This function is a shortcut for the following call:
510 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
512 \p key_type should be copy-assignable. The copy of maximal key
513 is returned in \p max_key argument.
515 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
516 extract_max_key( key_type& max_key )
518 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
521 /// Extracts an item from the map
523 The function searches an item with key equal to \p key in the tree,
524 unlinks it, and returns \p exempt_ptr pointer to a value found.
525 If \p key is not found the function returns an empty \p exempt_ptr.
527 RCU \p synchronize method can be called. RCU should NOT be locked.
528 The function does not destroy the value found.
529 The disposer will be implicitly invoked when the returned object is destroyed or when
530 its \p release() member function is called.
532 template <typename Q>
533 exempt_ptr extract( Q const& key )
535 return exempt_ptr(do_extract( key ));
539 /// Extracts an item from the map using \p pred for searching
541 The function is an analog of \p extract(Q const&)
542 but \p pred is used for key compare.
543 \p Less has the interface like \p std::less.
544 \p pred must imply the same element order as the comparator used for building the tree.
546 template <typename Q, typename Less>
547 exempt_ptr extract_with( Q const& key, Less pred )
549 return exempt_ptr(do_extract_with( key, pred ));
552 /// Find the key \p key
554 The function searches the item with key equal to \p key and calls the functor \p f for item found.
555 The interface of \p Func functor is:
558 void operator()( key_type const& key, mapped_type& item );
561 where \p item is the item found.
562 The functor is called under node-level lock.
564 The function applies RCU lock internally.
566 The function returns \p true if \p key is found, \p false otherwise.
568 template <typename K, typename Func>
569 bool find( K const& key, Func f )
571 return do_find( key, key_comparator(),
572 [&f]( node_type * pNode ) -> bool {
573 assert( pNode != nullptr );
574 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
576 f( pNode->m_key, *pVal );
584 /// Finds the key \p val using \p pred predicate for searching
586 The function is an analog of \p find(K const&, Func)
587 but \p pred is used for key comparing.
588 \p Less functor has the interface like \p std::less.
589 \p Less must imply the same element order as the comparator used for building the map.
591 template <typename K, typename Less, typename Func>
592 bool find_with( K const& key, Less pred, Func f )
595 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
596 [&f]( node_type * pNode ) -> bool {
597 assert( pNode != nullptr );
598 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
600 f( pNode->m_key, *pVal );
608 /// Find the key \p key
610 The function searches the item with key equal to \p key
611 and returns \p true if it is found, and \p false otherwise.
613 The function applies RCU lock internally.
615 template <typename K>
616 bool find( K const& key )
618 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
621 /// Finds the key \p val using \p pred predicate for searching
623 The function is an analog of \p find(K const&)
624 but \p pred is used for key comparing.
625 \p Less functor has the interface like \p std::less.
626 \p Less must imply the same element order as the comparator used for building the map.
628 template <typename K, typename Less>
629 bool find_with( K const& key, Less pred )
632 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
635 /// Clears the tree (thread safe, not atomic)
637 The function unlink all items from the tree.
638 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
642 assert( set.empty() );
644 the assertion could be raised.
646 For each node the \ref disposer will be called after unlinking.
648 RCU \p synchronize method can be called. RCU should not be locked.
652 while ( extract_min() );
655 /// Clears the tree (not thread safe)
657 This function is not thread safe and may be called only when no other thread deals with the tree.
658 The function is used in the tree destructor.
662 clear(); // temp solution
666 /// Checks if the map is empty
669 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
672 /// Returns item count in the map
674 Only leaf nodes containing user data are counted.
676 The value returned depends on item counter type provided by \p Traits template parameter.
677 If it is \p atomicity::empty_item_counter this function always returns 0.
679 The function is not suitable for checking the tree emptiness, use \p empty()
680 member function for this purpose.
684 return m_ItemCounter;
687 /// Returns const reference to internal statistics
688 stat const& statistics() const
693 /// Checks internal consistency (not atomic, not thread-safe)
695 The debugging function to check internal consistency of the tree.
697 bool check_consistency() const
705 template <typename Q, typename Compare, typename Func>
706 bool do_find( Q& key, Compare cmp, Func f ) const
711 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
713 assert( result != find_result::retry );
714 return result == find_result::found;
717 template <typename K, typename Compare, typename Func>
718 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
720 check_deadlock_policy::check();
722 rcu_disposer removed_list;
725 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
729 template <typename K, typename Compare, typename Func>
730 bool do_remove( K const& key, Compare cmp, Func func )
732 // Func must return true if the value was disposed
733 // or false if the value was extracted
735 check_deadlock_policy::check();
737 rcu_disposer removed_list;
740 return try_remove_root( key, cmp, func, removed_list );
744 template <typename Func>
745 mapped_type do_extract_min( Func f )
747 mapped_type pExtracted = nullptr;
750 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
755 template <typename Func>
756 mapped_type do_extract_max( Func f )
758 mapped_type pExtracted = nullptr;
761 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
766 template <typename Func>
767 void do_extract_minmax( int nDir, Func func )
769 check_deadlock_policy::check();
771 rcu_disposer removed_list;
775 int result = update_flags::failed;
777 // get right child of root
778 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
780 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
781 if ( nChildVersion & node_type::shrinking ) {
782 m_stat.onRemoveRootWaitShrinking();
783 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
784 result = update_flags::retry;
786 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
787 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
790 } while ( result == update_flags::retry );
794 template <typename Q>
795 mapped_type do_extract( Q const& key )
797 mapped_type pExtracted = nullptr;
801 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
806 template <typename Q, typename Less>
807 mapped_type do_extract_with( Q const& key, Less pred )
810 mapped_type pExtracted = nullptr;
813 cds::opt::details::make_comparator_from_less<Less>(),
814 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
823 template <typename Q, typename Compare, typename Func>
824 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
826 assert( gc::is_locked() );
830 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
832 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
833 m_stat.onFindRetry();
834 return find_result::retry;
837 m_stat.onFindFailed();
838 return find_result::not_found;
841 int nCmp = cmp( key, pChild->m_key );
843 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
845 node_scoped_lock l( m_Monitor, *pChild );
846 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
848 m_stat.onFindSuccess();
849 return find_result::found;
854 m_stat.onFindFailed();
855 return find_result::not_found;
858 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
859 if ( nChildVersion & node_type::shrinking ) {
860 m_stat.onFindWaitShrinking();
861 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
863 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
864 m_stat.onFindRetry();
865 return find_result::retry;
868 else if ( nChildVersion != node_type::unlinked ) {
870 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
871 m_stat.onFindRetry();
872 return find_result::retry;
875 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
876 if ( found != find_result::retry )
882 template <typename K, typename Compare, typename Func>
883 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
885 assert( gc::is_locked() );
889 // get right child of root
890 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
892 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
893 if ( nChildVersion & node_type::shrinking ) {
894 m_stat.onUpdateRootWaitShrinking();
895 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
896 result = update_flags::retry;
898 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
899 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
904 if ( nFlags & update_flags::allow_insert ) {
905 // insert into tree as right child of the root
907 node_scoped_lock l( m_Monitor, *m_pRoot );
908 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
909 result = result == update_flags::retry;
913 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
914 mapped_type pVal = funcUpdate( pNew );
915 assert( pVal != nullptr );
916 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
918 m_pRoot->child( pNew, right_child, memory_model::memory_order_relaxed );
919 m_pRoot->height( 2, memory_model::memory_order_relaxed );
923 m_stat.onInsertSuccess();
924 return update_flags::result_inserted;
927 return update_flags::failed;
929 } while ( result == update_flags::retry );
933 template <typename K, typename Compare, typename Func>
934 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
936 assert( gc::is_locked() );
940 // get right child of root
941 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
943 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
944 if ( nChildVersion & node_type::shrinking ) {
945 m_stat.onRemoveRootWaitShrinking();
946 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
947 result = update_flags::retry;
949 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
950 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
955 } while ( result == update_flags::retry );
957 return result == update_flags::result_removed;
960 template <typename K, typename Compare, typename Func>
961 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
963 assert( gc::is_locked() );
964 assert( nVersion != node_type::unlinked );
965 CDS_UNUSED( pParent );
967 int nCmp = cmp( key, pNode->m_key );
969 if ( nFlags & update_flags::allow_update ) {
970 return try_update_node( funcUpdate, pNode, disp );
972 return update_flags::failed;
977 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
978 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
979 m_stat.onUpdateRetry();
980 return update_flags::retry;
983 if ( pChild == nullptr ) {
985 if ( nFlags & update_flags::allow_insert )
986 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
988 result = update_flags::failed;
992 result = update_flags::retry;
993 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
994 if ( nChildVersion & node_type::shrinking ) {
995 m_stat.onUpdateWaitShrinking();
996 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
999 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1000 // this second read is important, because it is protected by nChildVersion
1002 // validate the read that our caller took to get to node
1003 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1004 m_stat.onUpdateRetry();
1005 return update_flags::retry;
1008 // At this point we know that the traversal our parent took to get to node is still valid.
1009 // The recursive implementation will validate the traversal from node to
1010 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1011 // This means that we are no longer vulnerable to node shrinks, and we don't need
1012 // to validate node version any more.
1013 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
1016 } while ( result == update_flags::retry );
1020 template <typename K, typename Compare, typename Func>
1021 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1023 assert( gc::is_locked() );
1024 assert( nVersion != node_type::unlinked );
1026 int nCmp = cmp( key, pNode->m_key );
1028 return try_remove_node( pParent, pNode, nVersion, func, disp );
1032 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_relaxed );
1033 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1034 m_stat.onRemoveRetry();
1035 return update_flags::retry;
1038 if ( pChild == nullptr ) {
1039 return update_flags::failed;
1043 result = update_flags::retry;
1044 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1045 if ( nChildVersion & node_type::shrinking ) {
1046 m_stat.onRemoveWaitShrinking();
1047 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1050 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_relaxed )) {
1051 // this second read is important, because it is protected by nChildVersion
1053 // validate the read that our caller took to get to node
1054 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1055 m_stat.onRemoveRetry();
1056 return update_flags::retry;
1059 // At this point we know that the traversal our parent took to get to node is still valid.
1060 // The recursive implementation will validate the traversal from node to
1061 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1062 // This means that we are no longer vulnerable to node shrinks, and we don't need
1063 // to validate node version any more.
1064 result = try_remove( key, cmp, func, pNode, pChild, nChildVersion, disp );
1067 } while ( result == update_flags::retry );
1071 template <typename Func>
1072 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1074 assert( gc::is_locked() );
1075 assert( nVersion != node_type::unlinked );
1079 node_type * pChild = child( pNode, nDir, memory_model::memory_order_relaxed );
1080 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1081 m_stat.onRemoveRetry();
1082 return update_flags::retry;
1085 if ( pChild == nullptr ) {
1087 return try_remove_node( pParent, pNode, nVersion, func, disp );
1090 result = update_flags::retry;
1091 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1092 if ( nChildVersion & node_type::shrinking ) {
1093 m_stat.onRemoveWaitShrinking();
1094 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
1097 else if ( pChild == child( pNode, nDir, memory_model::memory_order_relaxed )) {
1098 // this second read is important, because it is protected by nChildVersion
1100 // validate the read that our caller took to get to node
1101 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
1102 m_stat.onRemoveRetry();
1103 return update_flags::retry;
1106 // At this point we know that the traversal our parent took to get to node is still valid.
1107 // The recursive implementation will validate the traversal from node to
1108 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1109 // This means that we are no longer vulnerable to node shrinks, and we don't need
1110 // to validate node version any more.
1111 result = try_extract_minmax( nDir, func, pNode, pChild, nChildVersion, disp );
1114 } while ( result == update_flags::retry );
1118 template <typename K, typename Func>
1119 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1123 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1124 mapped_type pVal = funcUpdate( pNew );
1125 assert( pVal != nullptr );
1126 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1129 if ( c_bRelaxedInsert ) {
1130 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1131 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1133 m_stat.onInsertRetry();
1134 return update_flags::retry;
1137 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1140 node_type * pDamaged;
1142 assert( pNode != nullptr );
1143 node_scoped_lock l( m_Monitor, *pNode );
1145 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
1146 || child( pNode, nDir, memory_model::memory_order_relaxed ) != nullptr )
1148 if ( c_bRelaxedInsert ) {
1150 m_stat.onRelaxedInsertFailed();
1153 m_stat.onInsertRetry();
1154 return update_flags::retry;
1157 if ( !c_bRelaxedInsert )
1158 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1160 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
1161 pDamaged = fix_height_locked( pNode );
1165 m_stat.onInsertSuccess();
1167 fix_height_and_rebalance( pDamaged, disp );
1169 return update_flags::result_inserted;
1172 template <typename Func>
1173 int try_update_node( Func funcUpdate, node_type * pNode, rcu_disposer& disp )
1176 assert( pNode != nullptr );
1178 node_scoped_lock l( m_Monitor, *pNode );
1180 if ( pNode->is_unlinked( memory_model::memory_order_relaxed )) {
1181 m_stat.onUpdateUnlinked();
1182 return update_flags::retry;
1185 pOld = pNode->value( memory_model::memory_order_relaxed );
1186 mapped_type pVal = funcUpdate( pNode );
1190 assert( pVal != nullptr );
1191 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed );
1196 disp.dispose_value(pOld);
1197 m_stat.onDisposeValue();
1200 m_stat.onUpdateSuccess();
1201 return update_flags::result_updated;
1204 template <typename Func>
1205 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1207 assert( pParent != nullptr );
1208 assert( pNode != nullptr );
1210 if ( !pNode->is_valued( atomics::memory_order_relaxed ) )
1211 return update_flags::failed;
1213 if ( child( pNode, left_child, memory_model::memory_order_relaxed ) == nullptr
1214 || child( pNode, right_child, memory_model::memory_order_relaxed ) == nullptr )
1216 node_type * pDamaged;
1219 node_scoped_lock lp( m_Monitor, *pParent );
1220 if ( pParent->is_unlinked( atomics::memory_order_relaxed ) || parent( pNode, memory_model::memory_order_relaxed ) != pParent )
1221 return update_flags::retry;
1224 node_scoped_lock ln( m_Monitor, *pNode );
1225 pOld = pNode->value( memory_model::memory_order_relaxed );
1226 if ( !( pNode->version( memory_model::memory_order_relaxed ) == nVersion
1228 && try_unlink_locked( pParent, pNode, disp )))
1230 return update_flags::retry;
1233 pDamaged = fix_height_locked( pParent );
1237 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1238 m_stat.onDisposeValue();
1240 m_stat.onExtractValue();
1242 fix_height_and_rebalance( pDamaged, disp );
1243 return update_flags::result_removed;
1246 int result = update_flags::retry;
1249 node_scoped_lock ln( m_Monitor, *pNode );
1250 pOld = pNode->value( atomics::memory_order_relaxed );
1251 if ( pNode->version( atomics::memory_order_relaxed ) == nVersion && pOld ) {
1252 pNode->m_pValue.store( nullptr, atomics::memory_order_relaxed );
1253 result = update_flags::result_removed;
1257 if ( result == update_flags::result_removed ) {
1259 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1260 m_stat.onDisposeValue();
1262 m_stat.onExtractValue();
1269 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1271 // pParent and pNode must be locked
1272 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1274 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1275 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1276 if ( pNode != pParentLeft && pNode != pParentRight ) {
1277 // node is no longer a child of parent
1281 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ) );
1282 assert( pParent == parent( pNode, memory_model::memory_order_relaxed));
1284 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1285 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1286 if ( pLeft != nullptr && pRight != nullptr ) {
1287 // splicing is no longer possible
1290 node_type * pSplice = pLeft ? pLeft : pRight;
1292 if ( pParentLeft == pNode )
1293 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
1295 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
1298 pSplice->m_pParent.store( pParent, memory_model::memory_order_release );
1300 // Mark the node as unlinked
1301 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1303 // The value will be disposed by calling function
1304 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1306 disp.dispose( pNode );
1307 m_stat.onDisposeNode();
1314 private: // rotations
1316 int estimate_node_condition( node_type * pNode )
1318 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1319 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1321 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1322 return unlink_required;
1324 int h = pNode->height( memory_model::memory_order_relaxed );
1325 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1326 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1328 int hNew = 1 + std::max( hL, hR );
1329 int nBalance = hL - hR;
1331 if ( nBalance < -1 || nBalance > 1 )
1332 return rebalance_required;
1334 return h != hNew ? hNew : nothing_required;
1337 node_type * fix_height( node_type * pNode )
1339 assert( pNode != nullptr );
1340 node_scoped_lock l( m_Monitor, *pNode );
1341 return fix_height_locked( pNode );
1344 node_type * fix_height_locked( node_type * pNode )
1346 // pNode must be locked!!!
1347 int h = estimate_node_condition( pNode );
1349 case rebalance_required:
1350 case unlink_required:
1352 case nothing_required:
1355 pNode->height( h, memory_model::memory_order_relaxed );
1356 return parent( pNode, memory_model::memory_order_relaxed );
1360 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1362 while ( pNode && parent( pNode, memory_model::memory_order_relaxed )) {
1363 int nCond = estimate_node_condition( pNode );
1364 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
1367 if ( nCond != unlink_required && nCond != rebalance_required )
1368 pNode = fix_height( pNode );
1370 node_type * pParent = parent( pNode, memory_model::memory_order_relaxed );
1371 assert( pParent != nullptr );
1373 node_scoped_lock lp( m_Monitor, *pParent );
1374 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
1375 && parent( pNode, memory_model::memory_order_relaxed ) == pParent )
1377 node_scoped_lock ln( m_Monitor, *pNode );
1378 pNode = rebalance_locked( pParent, pNode, disp );
1385 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1387 // pParent and pNode should be locked.
1388 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1389 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1390 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1391 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1393 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1394 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1396 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1397 if ( try_unlink_locked( pParent, pNode, disp ))
1398 return fix_height_locked( pParent );
1400 // retry needed for pNode
1405 int h = pNode->height( memory_model::memory_order_relaxed );
1406 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
1407 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
1408 int hNew = 1 + std::max( hL, hR );
1409 int balance = hL - hR;
1412 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1413 else if ( balance < -1 )
1414 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1415 else if ( hNew != h ) {
1416 pNode->height( hNew, memory_model::memory_order_relaxed );
1418 // pParent is already locked
1419 return fix_height_locked( pParent );
1425 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1427 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1428 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1429 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1431 // pParent and pNode is locked yet
1432 // pNode->pLeft is too large, we will rotate-right.
1433 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1436 assert( pLeft != nullptr );
1437 node_scoped_lock l( m_Monitor, *pLeft );
1438 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1439 return pNode; // retry for pNode
1441 int hL = pLeft->height( memory_model::memory_order_relaxed );
1443 return pNode; // retry
1445 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1446 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1447 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1448 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1452 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1455 assert( pLRight != nullptr );
1457 node_scoped_lock lr( m_Monitor, *pLRight );
1458 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1459 return pNode; // retry
1461 hLR = pLRight->height( memory_model::memory_order_relaxed );
1463 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1465 node_type * pLRLeft = child( pLRight, left_child, memory_model::memory_order_relaxed );
1466 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1467 int balance = hLL - hLRL;
1468 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1469 // nParent.child.left won't be damaged after a double rotation
1470 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1474 // focus on pLeft, if necessary pNode will be balanced later
1475 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1480 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1482 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1483 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1484 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1486 // pParent and pNode is locked yet
1488 assert( pRight != nullptr );
1489 node_scoped_lock l( m_Monitor, *pRight );
1490 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1491 return pNode; // retry for pNode
1493 int hR = pRight->height( memory_model::memory_order_relaxed );
1494 if ( hL - hR >= -1 )
1495 return pNode; // retry
1497 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1498 int hRL = pRLeft ? pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1499 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1500 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1502 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1505 assert( pRLeft != nullptr );
1506 node_scoped_lock lrl( m_Monitor, *pRLeft );
1507 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1508 return pNode; // retry
1510 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1512 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1514 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1515 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1516 int balance = hRR - hRLR;
1517 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1518 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1520 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1524 static void begin_change( node_type * pNode, version_type version )
1526 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1528 static void end_change( node_type * pNode, version_type version )
1530 // Clear shrinking and unlinked flags and increment version
1531 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1534 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1536 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1537 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1539 begin_change( pNode, nodeVersion );
1541 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1542 if ( pLRight != nullptr )
1543 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1545 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1546 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1548 if ( pParentLeft == pNode )
1549 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1551 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1552 pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed );
1554 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1556 // fix up heights links
1557 int hNode = 1 + std::max( hLR, hR );
1558 pNode->height( hNode, memory_model::memory_order_relaxed );
1559 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1561 end_change( pNode, nodeVersion );
1562 m_stat.onRotateRight();
1564 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1565 // parent.child). pNode is the deepest. Perform as many fixes as we can
1566 // with the locks we've got.
1568 // We've already fixed the height for pNode, but it might still be outside
1569 // our allowable balance range. In that case a simple fix_height_locked()
1571 int nodeBalance = hLR - hR;
1572 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1573 // we need another rotation at pNode
1577 // we've fixed balance and height damage for pNode, now handle
1578 // extra-routing node damage
1579 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1580 // we need to remove pNode and then repair
1584 // we've already fixed the height at pLeft, do we need a rotation here?
1585 int leftBalance = hLL - hNode;
1586 if ( leftBalance < -1 || leftBalance > 1 )
1589 // pLeft might also have routing node damage (if pLeft.left was null)
1590 if ( hLL == 0 && !pLeft->is_valued( memory_model::memory_order_relaxed ))
1593 // try to fix the parent height while we've still got the lock
1594 return fix_height_locked( pParent );
1597 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1599 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1600 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1602 begin_change( pNode, nodeVersion );
1604 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1605 pNode->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1606 if ( pRLeft != nullptr )
1607 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1609 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1610 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1612 if ( pParentLeft == pNode )
1613 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1615 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1616 pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1618 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1621 int hNode = 1 + std::max( hL, hRL );
1622 pNode->height( hNode, memory_model::memory_order_relaxed );
1623 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1625 end_change( pNode, nodeVersion );
1626 m_stat.onRotateLeft();
1628 int nodeBalance = hRL - hL;
1629 if ( nodeBalance < -1 || nodeBalance > 1 )
1632 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1635 int rightBalance = hRR - hNode;
1636 if ( rightBalance < -1 || rightBalance > 1 )
1639 if ( hRR == 0 && !pRight->is_valued( memory_model::memory_order_relaxed ))
1642 return fix_height_locked( pParent );
1645 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1647 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1648 version_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1650 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1651 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_relaxed );
1652 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_relaxed );
1653 int hLRR = pLRR ? pLRR->height( memory_model::memory_order_relaxed ) : 0;
1655 begin_change( pNode, nodeVersion );
1656 begin_change( pLeft, leftVersion );
1658 // fix up pNode links, careful about the order!
1659 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1660 if ( pLRR != nullptr )
1661 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1663 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1664 if ( pLRL != nullptr )
1665 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1667 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1668 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1669 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1670 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1673 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1675 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1676 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1678 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1681 int hNode = 1 + std::max( hLRR, hR );
1682 pNode->height( hNode, memory_model::memory_order_relaxed );
1683 int hLeft = 1 + std::max( hLL, hLRL );
1684 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1685 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1687 end_change( pNode, nodeVersion );
1688 end_change( pLeft, leftVersion );
1689 m_stat.onRotateRightOverLeft();
1691 // caller should have performed only a single rotation if pLeft was going
1692 // to end up damaged
1693 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1694 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_relaxed )));
1696 // We have damaged pParent, pLR (now parent.child), and pNode (now
1697 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1698 // can with the locks we've got.
1700 // We've already fixed the height for pNode, but it might still be outside
1701 // our allowable balance range. In that case a simple fix_height_locked()
1703 int nodeBalance = hLRR - hR;
1704 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1705 // we need another rotation at pNode
1709 // pNode might also be damaged by being an unnecessary routing node
1710 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1711 // repair involves splicing out pNode and maybe more rotations
1715 // we've already fixed the height at pLRight, do we need a rotation here?
1716 int balanceLR = hLeft - hNode;
1717 if ( balanceLR < -1 || balanceLR > 1 )
1720 // try to fix the parent height while we've still got the lock
1721 return fix_height_locked( pParent );
1724 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1726 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1727 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1729 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1730 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_relaxed );
1731 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1732 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1734 begin_change( pNode, nodeVersion );
1735 begin_change( pRight, rightVersion );
1737 // fix up pNode links, careful about the order!
1738 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1739 if ( pRLL != nullptr )
1740 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1742 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1743 if ( pRLR != nullptr )
1744 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1746 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1747 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1748 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1749 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1752 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1754 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1755 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1757 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1760 int hNode = 1 + std::max( hL, hRLL );
1761 pNode->height( hNode, memory_model::memory_order_relaxed );
1762 int hRight = 1 + std::max( hRLR, hRR );
1763 pRight->height( hRight, memory_model::memory_order_relaxed );
1764 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1766 end_change( pNode, nodeVersion );
1767 end_change( pRight, rightVersion );
1768 m_stat.onRotateLeftOverRight();
1770 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1772 int nodeBalance = hRLL - hL;
1773 if ( nodeBalance < -1 || nodeBalance > 1 )
1775 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued( memory_model::memory_order_relaxed ))
1778 int balRL = hRight - hNode;
1779 if ( balRL < -1 || balRL > 1 )
1782 return fix_height_locked( pParent );
1787 }} // namespace cds::container
1789 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H