2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
32 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
34 #include <type_traits> // is_base_of
35 #include <cds/container/details/bronson_avltree_base.h>
36 #include <cds/urcu/details/check_deadlock.h>
37 #include <cds/urcu/exempt_ptr.h>
39 namespace cds { namespace container {
41 /// Bronson et al AVL-tree (RCU specialization for pointers)
42 /** @ingroup cds_nonintrusive_map
43 @ingroup cds_nonintrusive_tree
44 @headerfile cds/container/bronson_avltree_map_rcu.h
45 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
47 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
48 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
49 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
50 the disposer functor provided by \p Traits template parameter.
52 <b>Template arguments</b>:
53 - \p RCU - one of \ref cds_urcu_gc "RCU type"
55 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
57 - \p Traits - tree traits, default is \p bronson_avltree::traits
58 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
59 instead of \p Traits template argument.
61 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
62 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
68 # ifdef CDS_DOXYGEN_INVOKED
69 typename Traits = bronson_avltree::traits
74 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
77 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
78 typedef Key key_type; ///< type of a key stored in the map
79 typedef T * mapped_type; ///< type of value stored in the map
80 typedef Traits traits; ///< Traits template parameter
82 # ifdef CDS_DOXYGEN_INVOKED
83 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
85 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
87 typedef typename traits::item_counter item_counter; ///< Item counting policy
88 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
89 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
90 typedef typename traits::stat stat; ///< internal statistics
91 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
92 typedef typename traits::back_off back_off; ///< Back-off strategy
93 typedef typename traits::disposer disposer; ///< Value disposer
94 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
96 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
97 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
99 /// Group of \p extract_xxx functions does not require external locking
100 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
102 # ifdef CDS_DOXYGEN_INVOKED
103 /// Returned pointer to \p mapped_type of extracted node
104 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
106 typedef cds::urcu::exempt_ptr< gc,
107 typename std::remove_pointer<mapped_type>::type,
108 typename std::remove_pointer<mapped_type>::type,
114 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
118 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
119 typedef typename node_type::version_type version_type;
121 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
122 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
124 enum class find_result
141 result_inserted = allow_insert,
142 result_updated = allow_update,
149 nothing_required = -3,
150 rebalance_required = -2,
159 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
164 template <typename K>
165 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
167 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
170 static void free_node( node_type * pNode )
172 // Free node without disposer
173 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
174 assert( pNode->m_SyncMonitorInjection.check_free());
175 cxx_allocator().Delete( pNode );
178 static void free_value( mapped_type pVal )
183 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
185 return pNode->child( nDir, order );
188 static node_type * parent( node_type * pNode, atomics::memory_order order )
190 return pNode->parent( order );
196 node_type * m_pRetiredList; ///< head of retired node list
197 mapped_type m_pRetiredValue; ///< value retired
201 : m_pRetiredList( nullptr )
202 , m_pRetiredValue( nullptr )
210 void dispose( node_type * pNode )
212 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
213 pNode->m_pNextRemoved = m_pRetiredList;
214 m_pRetiredList = pNode;
217 void dispose_value( mapped_type pVal )
219 assert( m_pRetiredValue == nullptr );
220 m_pRetiredValue = pVal;
224 struct internal_disposer
226 void operator()( node_type * p ) const
234 assert( !gc::is_locked());
236 // TODO: use RCU::batch_retire
239 for ( node_type * p = m_pRetiredList; p; ) {
240 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
241 // Value already disposed
242 gc::template retire_ptr<internal_disposer>( p );
247 if ( m_pRetiredValue )
248 gc::template retire_ptr<disposer>( m_pRetiredValue );
256 typename node_type::base_class m_Root;
258 item_counter m_ItemCounter;
259 mutable sync_monitor m_Monitor;
264 /// Creates empty map
266 : m_pRoot( static_cast<node_type *>( &m_Root ))
277 The \p key_type should be constructible from a value of type \p K.
279 RCU \p synchronize() can be called. RCU should not be locked.
281 Returns \p true if inserting successful, \p false otherwise.
283 template <typename K>
284 bool insert( K const& key, mapped_type pVal )
286 return do_update(key, key_comparator(),
287 [pVal]( node_type * pNode ) -> mapped_type
289 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
293 update_flags::allow_insert
294 ) == update_flags::result_inserted;
297 /// Updates the value for \p key
299 The operation performs inserting or updating the value for \p key with lock-free manner.
300 If \p bInsert is \p false, only updating of existing node is possible.
302 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
303 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
304 constructible from type \p K.
305 Otherwise, the value for \p key will be changed to \p pVal.
307 RCU \p synchronize() method can be called. RCU should not be locked.
309 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
310 \p second is \p true if new node has been added or \p false if the node with \p key
313 template <typename K>
314 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
316 int result = do_update( key, key_comparator(),
317 [pVal]( node_type * ) -> mapped_type
321 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
323 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
328 /// Delete \p key from the map
330 RCU \p synchronize() method can be called. RCU should not be locked.
332 Return \p true if \p key is found and deleted, \p false otherwise
334 template <typename K>
335 bool erase( K const& key )
340 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
344 /// Deletes the item from the map using \p pred predicate for searching
346 The function is an analog of \p erase(K const&)
347 but \p pred is used for key comparing.
348 \p Less functor has the interface like \p std::less.
349 \p Less must imply the same element order as the comparator used for building the map.
351 template <typename K, typename Less>
352 bool erase_with( K const& key, Less pred )
357 cds::opt::details::make_comparator_from_less<Less>(),
358 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
362 /// Delete \p key from the map
364 The function searches an item with key \p key, calls \p f functor
365 and deletes the item. If \p key is not found, the functor is not called.
367 The functor \p Func interface:
370 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
374 RCU \p synchronize method can be called. RCU should not be locked.
376 Return \p true if key is found and deleted, \p false otherwise
378 template <typename K, typename Func>
379 bool erase( K const& key, Func f )
384 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
387 disp.dispose_value(pVal);
393 /// Deletes the item from the map using \p pred predicate for searching
395 The function is an analog of \p erase(K const&, Func)
396 but \p pred is used for key comparing.
397 \p Less functor has the interface like \p std::less.
398 \p Less must imply the same element order as the comparator used for building the map.
400 template <typename K, typename Less, typename Func>
401 bool erase_with( K const& key, Less pred, Func f )
406 cds::opt::details::make_comparator_from_less<Less>(),
407 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
410 disp.dispose_value(pVal);
416 /// Extracts a value with minimal key from the map
418 Returns \p exempt_ptr to the leftmost item.
419 If the tree is empty, returns empty \p exempt_ptr.
421 Note that the function returns only the value for minimal key.
422 To retrieve its key use \p extract_min( Func ) member function.
424 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
425 It means that the function gets leftmost leaf of the tree and tries to unlink it.
426 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
427 So, the function returns the item with minimum key at the moment of tree traversing.
429 RCU \p synchronize method can be called. RCU should NOT be locked.
430 The function does not free the item.
431 The deallocator will be implicitly invoked when the returned object is destroyed or when
432 its \p release() member function is called.
434 exempt_ptr extract_min()
436 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
439 /// Extracts minimal key and corresponding value
441 Returns \p exempt_ptr to the leftmost item.
442 If the tree is empty, returns empty \p exempt_ptr.
444 \p Func functor is used to store minimal key.
445 \p Func has the following signature:
448 void operator()( key_type const& key );
451 If the tree is empty, \p f is not called.
452 Otherwise, it is called with minimal key, the pointer to corresponding value is returned
455 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
456 It means that the function gets leftmost leaf of the tree and tries to unlink it.
457 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
458 So, the function returns the item with minimum key at the moment of tree traversing.
460 RCU \p synchronize method can be called. RCU should NOT be locked.
461 The function does not free the item.
462 The deallocator will be implicitly invoked when the returned object is destroyed or when
463 its \p release() member function is called.
465 template <typename Func>
466 exempt_ptr extract_min( Func f )
468 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
471 /// Extracts minimal key and corresponding value
473 This function is a shortcut for the following call:
476 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
478 \p key_type should be copy-assignable. The copy of minimal key
479 is returned in \p min_key argument.
481 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
482 extract_min_key( key_type& min_key )
484 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
487 /// Extracts a value with maximal key from the tree
489 Returns \p exempt_ptr pointer to the rightmost item.
490 If the set is empty, returns empty \p exempt_ptr.
492 Note that the function returns only the value for maximal key.
493 To retrieve its key use \p extract_max( Func ) member function.
495 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
496 It means that the function gets rightmost leaf of the tree and tries to unlink it.
497 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
498 So, the function returns the item with maximum key at the moment of tree traversing.
500 RCU \p synchronize method can be called. RCU should NOT be locked.
501 The function does not free the item.
502 The deallocator will be implicitly invoked when the returned object is destroyed or when
503 its \p release() is called.
505 exempt_ptr extract_max()
507 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
510 /// Extracts the maximal key and corresponding value
512 Returns \p exempt_ptr pointer to the rightmost item.
513 If the set is empty, returns empty \p exempt_ptr.
515 \p Func functor is used to store maximal key.
516 \p Func has the following signature:
519 void operator()( key_type const& key );
522 If the tree is empty, \p f is not called.
523 Otherwise, it is called with maximal key, the pointer to corresponding value is returned
526 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
527 It means that the function gets rightmost leaf of the tree and tries to unlink it.
528 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
529 So, the function returns the item with maximum key at the moment of tree traversing.
531 RCU \p synchronize method can be called. RCU should NOT be locked.
532 The function does not free the item.
533 The deallocator will be implicitly invoked when the returned object is destroyed or when
534 its \p release() is called.
536 template <typename Func>
537 exempt_ptr extract_max( Func f )
539 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
542 /// Extracts the maximal key and corresponding value
544 This function is a shortcut for the following call:
547 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
549 \p key_type should be copy-assignable. The copy of maximal key
550 is returned in \p max_key argument.
552 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
553 extract_max_key( key_type& max_key )
555 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
558 /// Extracts an item from the map
560 The function searches an item with key equal to \p key in the tree,
561 unlinks it, and returns \p exempt_ptr pointer to a value found.
562 If \p key is not found the function returns an empty \p exempt_ptr.
564 RCU \p synchronize method can be called. RCU should NOT be locked.
565 The function does not destroy the value found.
566 The disposer will be implicitly invoked when the returned object is destroyed or when
567 its \p release() member function is called.
569 template <typename Q>
570 exempt_ptr extract( Q const& key )
572 return exempt_ptr(do_extract( key ));
576 /// Extracts an item from the map using \p pred for searching
578 The function is an analog of \p extract(Q const&)
579 but \p pred is used for key compare.
580 \p Less has the interface like \p std::less.
581 \p pred must imply the same element order as the comparator used for building the tree.
583 template <typename Q, typename Less>
584 exempt_ptr extract_with( Q const& key, Less pred )
586 return exempt_ptr(do_extract_with( key, pred ));
589 /// Find the key \p key
591 The function searches the item with key equal to \p key and calls the functor \p f for item found.
592 The interface of \p Func functor is:
595 void operator()( key_type const& key, std::remove_pointer< mapped_type )::type& item );
598 where \p item is the item found.
599 The functor is called under node-level lock.
601 The function applies RCU lock internally.
603 The function returns \p true if \p key is found, \p false otherwise.
605 template <typename K, typename Func>
606 bool find( K const& key, Func f )
608 return do_find( key, key_comparator(),
609 [&f]( node_type * pNode ) -> bool {
610 assert( pNode != nullptr );
611 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
613 f( pNode->m_key, *pVal );
621 /// Finds the key \p val using \p pred predicate for searching
623 The function is an analog of \p find(K const&, Func)
624 but \p pred is used for key comparing.
625 \p Less functor has the interface like \p std::less.
626 \p Less must imply the same element order as the comparator used for building the map.
628 template <typename K, typename Less, typename Func>
629 bool find_with( K const& key, Less pred, Func f )
632 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
633 [&f]( node_type * pNode ) -> bool {
634 assert( pNode != nullptr );
635 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
637 f( pNode->m_key, *pVal );
645 /// Checks whether the map contains \p key
647 The function searches the item with key equal to \p key
648 and returns \p true if it is found, and \p false otherwise.
650 The function applies RCU lock internally.
652 template <typename K>
653 bool contains( K const& key )
655 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
658 /// Checks whether the map contains \p key using \p pred predicate for searching
660 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
661 \p Less functor has the interface like \p std::less.
662 \p Less must imply the same element order as the comparator used for building the set.
664 template <typename K, typename Less>
665 bool contains( K const& key, Less pred )
668 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
671 /// Clears the tree (thread safe, not atomic)
673 The function unlink all items from the tree.
674 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
678 assert( set.empty());
680 the assertion could be raised.
682 For each node the \ref disposer will be called after unlinking.
684 RCU \p synchronize method can be called. RCU should not be locked.
688 while ( extract_min());
691 /// Clears the tree (not thread safe)
693 This function is not thread safe and may be called only when no other thread deals with the tree.
694 The function is used in the tree destructor.
698 clear(); // temp solution
702 /// Checks if the map is empty
705 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
708 /// Returns item count in the map
710 Only leaf nodes containing user data are counted.
712 The value returned depends on item counter type provided by \p Traits template parameter.
713 If it is \p atomicity::empty_item_counter this function always returns 0.
715 The function is not suitable for checking the tree emptiness, use \p empty()
716 member function for this purpose.
720 return m_ItemCounter;
723 /// Returns const reference to internal statistics
724 stat const& statistics() const
729 /// Returns reference to \p sync_monitor object
730 sync_monitor& monitor()
735 sync_monitor const& monitor() const
741 /// Checks internal consistency (not atomic, not thread-safe)
743 The debugging function to check internal consistency of the tree.
745 bool check_consistency() const
747 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
750 /// Checks internal consistency (not atomic, not thread-safe)
752 The debugging function to check internal consistency of the tree.
753 The functor \p Func is called if a violation of internal tree structure
757 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
761 - \p nLevel - the level where the violation is found
762 - \p hLeft - the height of left subtree
763 - \p hRight - the height of right subtree
765 The functor is called for each violation found.
767 template <typename Func>
768 bool check_consistency( Func f ) const
770 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
773 do_check_consistency( pChild, 1, f, nErrors );
781 template <typename Func>
782 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
786 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
787 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
788 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
790 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
793 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
794 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
796 if ( hLeft >= hRight ) {
797 if ( hLeft - hRight > 1 ) {
798 f( nLevel, hLeft, hRight );
804 if ( hRight - hLeft > 1 ) {
805 f( nLevel, hLeft, hRight );
814 template <typename Q, typename Compare, typename Func>
815 bool do_find( Q& key, Compare cmp, Func f ) const
820 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
822 assert( result != find_result::retry );
823 return result == find_result::found;
826 template <typename K, typename Compare, typename Func>
827 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
829 check_deadlock_policy::check();
831 rcu_disposer removed_list;
834 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
838 template <typename K, typename Compare, typename Func>
839 bool do_remove( K const& key, Compare cmp, Func func )
841 // Func must return true if the value was disposed
842 // or false if the value was extracted
844 check_deadlock_policy::check();
846 rcu_disposer removed_list;
849 return try_remove_root( key, cmp, func, removed_list );
853 template <typename Func>
854 mapped_type do_extract_min( Func f )
856 mapped_type pExtracted = nullptr;
859 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
864 template <typename Func>
865 mapped_type do_extract_max( Func f )
867 mapped_type pExtracted = nullptr;
870 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
875 template <typename Func>
876 void do_extract_minmax( int nDir, Func func )
878 check_deadlock_policy::check();
880 rcu_disposer removed_list;
887 // get right child of root
888 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
890 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
891 if ( nChildVersion & node_type::shrinking ) {
892 m_stat.onRemoveRootWaitShrinking();
893 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
894 result = update_flags::retry;
896 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
897 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
900 result = update_flags::retry;
905 if ( result == update_flags::retry )
906 m_stat.onRemoveRetry();
908 m_stat.onExtract( result == update_flags::result_removed );
915 template <typename Q>
916 mapped_type do_extract( Q const& key )
918 mapped_type pExtracted = nullptr;
922 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
924 m_stat.onExtract( pExtracted != nullptr );
928 template <typename Q, typename Less>
929 mapped_type do_extract_with( Q const& key, Less pred )
932 mapped_type pExtracted = nullptr;
935 cds::opt::details::make_comparator_from_less<Less>(),
936 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
938 m_stat.onExtract( pExtracted != nullptr );
945 static int height( node_type * pNode, atomics::memory_order order )
948 return pNode->m_nHeight.load( order );
950 static void set_height( node_type * pNode, int h, atomics::memory_order order )
953 pNode->m_nHeight.store( h, order );
955 static int height_null( node_type * pNode, atomics::memory_order order )
957 return pNode ? height( pNode, order ) : 0;
960 static CDS_CONSTEXPR int const c_stackSize = 64;
962 template <typename Q, typename Compare, typename Func>
963 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
965 assert( gc::is_locked());
971 version_type nVersion;
975 stack_record stack[c_stackSize];
977 stack[0].pNode = pNode;
978 stack[0].nVersion = nVersion;
979 stack[0].nDir = nDir;
982 pNode = stack[pos].pNode;
983 nVersion = stack[pos].nVersion;
984 nDir = stack[pos].nDir;
987 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
989 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
991 m_stat.onFindRetry();
994 m_stat.onFindFailed();
995 return find_result::not_found;
998 int nCmp = cmp( key, pChild->m_key );
1000 if ( pChild->is_valued( memory_model::memory_order_acquire )) {
1002 node_scoped_lock l( m_Monitor, *pChild );
1003 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
1004 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
1006 m_stat.onFindSuccess();
1007 return find_result::found;
1012 m_stat.onFindRetry();
1016 m_stat.onFindFailed();
1017 return find_result::not_found;
1020 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1021 if ( nChildVersion & node_type::shrinking ) {
1022 m_stat.onFindWaitShrinking();
1023 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1025 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1027 m_stat.onFindRetry();
1031 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1033 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1035 m_stat.onFindRetry();
1040 assert(pos < c_stackSize);
1041 stack[pos].pNode = pChild;
1042 stack[pos].nVersion = nChildVersion;
1043 stack[pos].nDir = nCmp;
1044 break; // child iteration
1047 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1049 m_stat.onFindRetry();
1053 m_stat.onFindRetry();
1056 return find_result::retry;
1059 template <typename K, typename Compare, typename Func>
1060 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1062 assert( gc::is_locked());
1067 // get right child of root
1068 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1070 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1071 if ( nChildVersion & node_type::shrinking ) {
1072 m_stat.onUpdateRootWaitShrinking();
1073 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1074 result = update_flags::retry;
1076 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1077 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1079 result = update_flags::retry;
1082 // the tree is empty
1083 if ( nFlags & update_flags::allow_insert ) {
1084 // insert into tree as right child of the root
1086 node_scoped_lock l( m_Monitor, *m_pRoot );
1087 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1088 result = update_flags::retry;
1092 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1093 mapped_type pVal = funcUpdate( pNew );
1094 assert( pVal != nullptr );
1095 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1097 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1098 set_height( m_pRoot, 2, memory_model::memory_order_release );
1102 m_stat.onInsertSuccess();
1103 return update_flags::result_inserted;
1106 return update_flags::failed;
1109 if ( result != update_flags::retry )
1114 template <typename K, typename Compare, typename Func>
1115 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1117 assert( gc::is_locked());
1122 // get right child of root
1123 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1125 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1126 if ( nChildVersion & node_type::shrinking ) {
1127 m_stat.onRemoveRootWaitShrinking();
1128 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1129 result = update_flags::retry;
1131 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1132 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1135 result = update_flags::retry;
1140 if ( result == update_flags::retry )
1141 m_stat.onRemoveRetry();
1143 m_stat.onRemove( result == update_flags::result_removed );
1144 return result == update_flags::result_removed;
1149 template <typename K, typename Compare, typename Func>
1150 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1152 assert( gc::is_locked());
1153 assert( nVersion != node_type::unlinked );
1158 version_type nVersion;
1161 stack_record stack[c_stackSize];
1163 stack[0].pNode = pNode;
1164 stack[0].nVersion = nVersion;
1166 while ( pos >= 0 ) {
1167 pNode = stack[pos].pNode;
1168 nVersion = stack[pos].nVersion;
1170 int nCmp = cmp( key, pNode->m_key );
1172 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1173 if ( result != update_flags::retry )
1176 m_stat.onUpdateRetry();
1181 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1182 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1184 m_stat.onUpdateRetry();
1188 if ( pChild == nullptr ) {
1190 if ( nFlags & update_flags::allow_insert ) {
1191 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1192 if ( result != update_flags::retry )
1195 m_stat.onUpdateRetry();
1199 return update_flags::failed;
1203 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1204 if ( nChildVersion & node_type::shrinking ) {
1205 m_stat.onUpdateWaitShrinking();
1206 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1209 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1210 // this second read is important, because it is protected by nChildVersion
1212 // validate the read that our caller took to get to node
1213 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1215 m_stat.onUpdateRetry();
1218 // At this point we know that the traversal our parent took to get to node is still valid.
1219 // The recursive implementation will validate the traversal from node to
1220 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1221 // This means that we are no longer vulnerable to node shrinks, and we don't need
1222 // to validate node version any more.
1224 assert( pos < c_stackSize );
1225 stack[pos].pNode = pChild;
1226 stack[pos].nVersion = nChildVersion;
1227 assert( nChildVersion != node_type::unlinked );
1228 break; // child iteration
1230 m_stat.onUpdateRetry();
1234 return update_flags::retry;
1237 template <typename K, typename Compare, typename Func>
1238 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1240 assert( gc::is_locked());
1241 assert( nVersion != node_type::unlinked );
1245 node_type * pParent;
1247 version_type nVersion;
1250 stack_record stack[c_stackSize];
1252 stack[0].pParent = pParent;
1253 stack[0].pNode = pNode;
1254 stack[0].nVersion = nVersion;
1256 while ( pos >= 0 ) {
1257 pParent = stack[pos].pParent;
1258 pNode = stack[pos].pNode;
1259 nVersion = stack[pos].nVersion;
1261 int nCmp = cmp( key, pNode->m_key );
1263 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1264 if ( result != update_flags::retry )
1267 m_stat.onRemoveRetry();
1272 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1273 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1275 m_stat.onRemoveRetry();
1279 if ( pChild == nullptr )
1280 return update_flags::failed;
1283 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1284 if ( nChildVersion & node_type::shrinking ) {
1285 m_stat.onRemoveWaitShrinking();
1286 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1289 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1290 // this second read is important, because it is protected by nChildVersion
1292 // validate the read that our caller took to get to node
1293 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1295 m_stat.onRemoveRetry();
1299 // At this point we know that the traversal our parent took to get to node is still valid.
1300 // The recursive implementation will validate the traversal from node to
1301 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1302 // This means that we are no longer vulnerable to node shrinks, and we don't need
1303 // to validate node version any more.
1305 assert( pos < c_stackSize );
1306 stack[pos].pParent = pNode;
1307 stack[pos].pNode = pChild;
1308 stack[pos].nVersion = nChildVersion;
1309 break; // child iteration
1311 m_stat.onRemoveRetry();
1314 return update_flags::retry;
1317 template <typename Func>
1318 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1320 assert( gc::is_locked());
1321 assert( nVersion != node_type::unlinked );
1325 node_type * pParent;
1327 version_type nVersion;
1330 stack_record stack[c_stackSize];
1332 stack[0].pParent = pParent;
1333 stack[0].pNode = pNode;
1334 stack[0].nVersion = nVersion;
1336 while ( pos >= 0 ) {
1337 pParent = stack[pos].pParent;
1338 pNode = stack[pos].pNode;
1339 nVersion = stack[pos].nVersion;
1343 node_type * pChild = child( pNode, iterDir, memory_model::memory_order_acquire );
1344 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1346 m_stat.onRemoveRetry();
1352 if ( pNode->is_valued( memory_model::memory_order_acquire ) ) {
1353 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1355 if ( result == update_flags::result_removed )
1359 m_stat.onRemoveRetry();
1363 // check right (for min) or left (for max) child node
1365 pChild = child( pNode, iterDir, memory_model::memory_order_acquire );
1368 m_stat.onRemoveRetry();
1374 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1375 if ( nChildVersion & node_type::shrinking ) {
1376 m_stat.onRemoveWaitShrinking();
1377 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1380 else if ( pChild == child( pNode, iterDir, memory_model::memory_order_acquire )) {
1381 // this second read is important, because it is protected by nChildVersion
1383 // validate the read that our caller took to get to node
1384 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1386 m_stat.onRemoveRetry();
1390 // At this point we know that the traversal our parent took to get to node is still valid.
1391 // The recursive implementation will validate the traversal from node to
1392 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1393 // This means that we are no longer vulnerable to node shrinks, and we don't need
1394 // to validate node version any more.
1396 assert( pos < c_stackSize );
1397 stack[pos].pParent = pNode;
1398 stack[pos].pNode = pChild;
1399 stack[pos].nVersion = nChildVersion;
1400 break; // child iteration
1402 m_stat.onRemoveRetry();
1405 return update_flags::retry;
1408 template <typename K, typename Func>
1409 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1413 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1414 mapped_type pVal = funcUpdate( pNew );
1415 assert( pVal != nullptr );
1416 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1419 if ( c_bRelaxedInsert ) {
1420 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1421 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1423 m_stat.onInsertRetry();
1424 return update_flags::retry;
1427 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1430 node_type * pDamaged;
1432 assert( pNode != nullptr );
1433 node_scoped_lock l( m_Monitor, *pNode );
1435 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1436 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1438 if ( c_bRelaxedInsert ) {
1439 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1440 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1443 m_stat.onRelaxedInsertFailed();
1446 m_stat.onInsertRetry();
1447 return update_flags::retry;
1450 if ( !c_bRelaxedInsert )
1451 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1453 pNode->child( pNew, nDir, memory_model::memory_order_release );
1454 pDamaged = fix_height_locked( pNode );
1458 m_stat.onInsertSuccess();
1461 fix_height_and_rebalance( pDamaged, disp );
1462 m_stat.onInsertRebalanceRequired();
1465 return update_flags::result_inserted;
1468 template <typename Func>
1469 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1473 assert( pNode != nullptr );
1475 node_scoped_lock l( m_Monitor, *pNode );
1477 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1478 return update_flags::retry;
1480 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1481 m_stat.onUpdateUnlinked();
1482 return update_flags::retry;
1485 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update)) {
1486 m_stat.onInsertFailed();
1487 return update_flags::failed;
1490 pOld = pNode->value( memory_model::memory_order_relaxed );
1491 bInserted = pOld == nullptr;
1492 mapped_type pVal = funcUpdate( pNode );
1496 assert( pVal != nullptr );
1497 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1502 disp.dispose_value(pOld);
1503 m_stat.onDisposeValue();
1508 m_stat.onInsertSuccess();
1509 return update_flags::result_inserted;
1512 m_stat.onUpdateSuccess();
1513 return update_flags::result_updated;
1516 template <typename Func>
1517 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1519 assert( pParent != nullptr );
1520 assert( pNode != nullptr );
1522 if ( !pNode->is_valued( memory_model::memory_order_acquire ))
1523 return update_flags::failed;
1525 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1526 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1528 // pNode can be replaced with its child
1530 node_type * pDamaged;
1533 node_scoped_lock lp( m_Monitor, *pParent );
1534 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1535 return update_flags::retry;
1538 node_scoped_lock ln( m_Monitor, *pNode );
1539 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1540 return update_flags::retry;
1542 pOld = pNode->value( memory_model::memory_order_relaxed );
1544 return update_flags::failed;
1546 if ( !try_unlink_locked( pParent, pNode, disp ))
1547 return update_flags::retry;
1549 pDamaged = fix_height_locked( pParent );
1553 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1554 m_stat.onDisposeValue();
1556 m_stat.onExtractValue();
1559 fix_height_and_rebalance( pDamaged, disp );
1560 m_stat.onRemoveRebalanceRequired();
1564 // pNode is an internal with two children
1568 node_scoped_lock ln( m_Monitor, *pNode );
1569 pOld = pNode->value( memory_model::memory_order_relaxed );
1570 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1571 return update_flags::retry;
1573 return update_flags::failed;
1575 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1576 m_stat.onMakeRoutingNode();
1580 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1581 m_stat.onDisposeValue();
1583 m_stat.onExtractValue();
1585 return update_flags::result_removed;
1588 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1590 // pParent and pNode must be locked
1591 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed));
1593 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1594 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1595 if ( pNode != pParentLeft && pNode != pParentRight ) {
1596 // node is no longer a child of parent
1600 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1601 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1603 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1604 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1605 if ( pLeft != nullptr && pRight != nullptr ) {
1606 // splicing is no longer possible
1609 node_type * pSplice = pLeft ? pLeft : pRight;
1611 if ( pParentLeft == pNode )
1612 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1614 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1617 pSplice->parent( pParent, memory_model::memory_order_release );
1619 // Mark the node as unlinked
1620 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1622 // The value will be disposed by calling function
1623 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1625 disp.dispose( pNode );
1626 m_stat.onDisposeNode();
1633 private: // rotations
1635 int estimate_node_condition( node_type * pNode )
1637 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1638 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1640 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1641 return unlink_required;
1643 int h = height( pNode, memory_model::memory_order_acquire );
1644 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1645 int hR = height_null( pRight, memory_model::memory_order_acquire );
1647 int hNew = 1 + std::max( hL, hR );
1648 int nBalance = hL - hR;
1650 if ( nBalance < -1 || nBalance > 1 )
1651 return rebalance_required;
1653 return h != hNew ? hNew : nothing_required;
1656 node_type * fix_height( node_type * pNode )
1658 assert( pNode != nullptr );
1659 node_scoped_lock l( m_Monitor, *pNode );
1660 return fix_height_locked( pNode );
1663 node_type * fix_height_locked( node_type * pNode )
1665 // pNode must be locked!!!
1666 int h = estimate_node_condition( pNode );
1668 case rebalance_required:
1669 case unlink_required:
1671 case nothing_required:
1674 set_height( pNode, h, memory_model::memory_order_release );
1675 return parent( pNode, memory_model::memory_order_relaxed );
1679 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1681 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1682 int nCond = estimate_node_condition( pNode );
1683 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ))
1686 if ( nCond != unlink_required && nCond != rebalance_required )
1687 pNode = fix_height( pNode );
1689 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1690 assert( pParent != nullptr );
1692 node_scoped_lock lp( m_Monitor, *pParent );
1693 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1694 node_scoped_lock ln( m_Monitor, *pNode );
1695 pNode = rebalance_locked( pParent, pNode, disp );
1702 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1704 // pParent and pNode should be locked.
1705 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1706 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1708 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1709 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1711 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1712 if ( try_unlink_locked( pParent, pNode, disp ))
1713 return fix_height_locked( pParent );
1715 // retry needed for pNode
1720 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1721 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1723 int h = height( pNode, memory_model::memory_order_acquire );
1724 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1725 int hR = height_null( pRight, memory_model::memory_order_acquire );
1726 int hNew = 1 + std::max( hL, hR );
1727 int balance = hL - hR;
1730 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1731 else if ( balance < -1 )
1732 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1733 else if ( hNew != h ) {
1734 set_height( pNode, hNew, memory_model::memory_order_release );
1736 // pParent is already locked
1737 return fix_height_locked( pParent );
1743 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1745 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1746 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1747 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1749 // pParent and pNode is locked yet
1750 // pNode->pLeft is too large, we will rotate-right.
1751 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1754 assert( pLeft != nullptr );
1755 node_scoped_lock l( m_Monitor, *pLeft );
1756 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1757 return pNode; // retry for pNode
1759 int hL = height( pLeft, memory_model::memory_order_acquire );
1761 return pNode; // retry
1763 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1764 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1765 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1766 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1770 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1774 node_scoped_lock lr( m_Monitor, *pLRight );
1775 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1776 return pNode; // retry
1778 hLR = height( pLRight, memory_model::memory_order_acquire );
1780 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1782 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
1783 int balance = hLL - hLRL;
1784 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1785 // nParent.child.left won't be damaged after a double rotation
1786 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1790 return pNode; // retry
1792 // focus on pLeft, if necessary pNode will be balanced later
1793 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1798 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1800 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1801 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1802 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1804 // pParent and pNode is locked yet
1806 assert( pRight != nullptr );
1807 node_scoped_lock l( m_Monitor, *pRight );
1808 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1809 return pNode; // retry for pNode
1811 int hR = height( pRight, memory_model::memory_order_acquire );
1812 if ( hL - hR >= -1 )
1813 return pNode; // retry
1815 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1816 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1817 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1818 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1820 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1823 node_scoped_lock lrl( m_Monitor, *pRLeft );
1824 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1825 return pNode; // retry
1827 hRL = height( pRLeft, memory_model::memory_order_acquire );
1829 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1831 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1832 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1833 int balance = hRR - hRLR;
1834 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1835 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1838 return pNode; // retry
1839 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1843 static void begin_change( node_type * pNode, version_type version )
1845 assert(pNode->version(memory_model::memory_order_acquire) == version );
1846 assert( (version & node_type::shrinking) == 0 );
1847 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1849 static void end_change( node_type * pNode, version_type version )
1851 // Clear shrinking and unlinked flags and increment version
1852 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1855 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1857 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1858 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1860 begin_change( pNode, nodeVersion );
1862 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1863 if ( pLRight != nullptr )
1864 pLRight->parent( pNode, memory_model::memory_order_release );
1866 pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
1867 pNode->parent( pLeft, memory_model::memory_order_release );
1869 if ( pParentLeft == pNode )
1870 pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
1872 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1873 pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
1875 pLeft->parent( pParent, memory_model::memory_order_release );
1877 // fix up heights links
1878 int hNode = 1 + std::max( hLR, hR );
1879 set_height( pNode, hNode, memory_model::memory_order_release );
1880 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1882 end_change( pNode, nodeVersion );
1883 m_stat.onRotateRight();
1885 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1886 // parent.child). pNode is the deepest. Perform as many fixes as we can
1887 // with the locks we've got.
1889 // We've already fixed the height for pNode, but it might still be outside
1890 // our allowable balance range. In that case a simple fix_height_locked()
1892 int nodeBalance = hLR - hR;
1893 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1894 // we need another rotation at pNode
1895 m_stat.onRotateAfterRightRotation();
1899 // we've fixed balance and height damage for pNode, now handle
1900 // extra-routing node damage
1901 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1902 // we need to remove pNode and then repair
1903 m_stat.onRemoveAfterRightRotation();
1907 // we've already fixed the height at pLeft, do we need a rotation here?
1908 int leftBalance = hLL - hNode;
1909 if ( leftBalance < -1 || leftBalance > 1 ) {
1910 m_stat.onRotateAfterRightRotation();
1914 // pLeft might also have routing node damage (if pLeft.left was null)
1915 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire)) {
1916 m_stat.onDamageAfterRightRotation();
1920 // try to fix the parent height while we've still got the lock
1921 return fix_height_locked( pParent );
1924 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1926 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1927 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1929 begin_change( pNode, nodeVersion );
1931 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1932 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1933 if ( pRLeft != nullptr )
1934 pRLeft->parent( pNode, memory_model::memory_order_release );
1936 pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
1937 pNode->parent( pRight, memory_model::memory_order_release );
1939 if ( pParentLeft == pNode )
1940 pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
1942 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1943 pParent->m_pRight.store( pRight, memory_model::memory_order_release );
1945 pRight->parent( pParent, memory_model::memory_order_release );
1948 int hNode = 1 + std::max( hL, hRL );
1949 set_height( pNode, hNode, memory_model::memory_order_release );
1950 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
1952 end_change( pNode, nodeVersion );
1953 m_stat.onRotateLeft();
1955 int nodeBalance = hRL - hL;
1956 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1957 m_stat.onRotateAfterLeftRotation();
1961 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1962 m_stat.onRemoveAfterLeftRotation();
1966 int rightBalance = hRR - hNode;
1967 if ( rightBalance < -1 || rightBalance > 1 ) {
1968 m_stat.onRotateAfterLeftRotation();
1972 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire)) {
1973 m_stat.onDamageAfterLeftRotation();
1977 return fix_height_locked( pParent );
1980 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1982 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1983 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1985 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1986 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
1987 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
1988 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
1990 begin_change( pNode, nodeVersion );
1991 begin_change( pLeft, leftVersion );
1993 // fix up pNode links, careful about the order!
1994 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
1995 if ( pLRR != nullptr )
1996 pLRR->parent( pNode, memory_model::memory_order_release );
1998 pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
1999 if ( pLRL != nullptr )
2000 pLRL->parent( pLeft, memory_model::memory_order_release );
2002 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
2003 pLeft->parent( pLRight, memory_model::memory_order_release );
2005 pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
2006 pNode->parent( pLRight, memory_model::memory_order_release );
2009 pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
2011 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
2012 pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
2014 pLRight->parent( pParent, memory_model::memory_order_release );
2017 int hNode = 1 + std::max( hLRR, hR );
2018 set_height( pNode, hNode, memory_model::memory_order_release );
2019 int hLeft = 1 + std::max( hLL, hLRL );
2020 set_height( pLeft, hLeft, memory_model::memory_order_release );
2021 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
2023 end_change( pNode, nodeVersion );
2024 end_change( pLeft, leftVersion );
2025 m_stat.onRotateRightOverLeft();
2027 // caller should have performed only a single rotation if pLeft was going
2028 // to end up damaged
2029 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2030 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
2032 // We have damaged pParent, pLR (now parent.child), and pNode (now
2033 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2034 // can with the locks we've got.
2036 // We've already fixed the height for pNode, but it might still be outside
2037 // our allowable balance range. In that case a simple fix_height_locked()
2039 int nodeBalance = hLRR - hR;
2040 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2041 // we need another rotation at pNode
2042 m_stat.onRotateAfterRLRotation();
2046 // pNode might also be damaged by being an unnecessary routing node
2047 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2048 // repair involves splicing out pNode and maybe more rotations
2049 m_stat.onRemoveAfterRLRotation();
2053 // we've already fixed the height at pLRight, do we need a rotation here?
2054 int balanceLR = hLeft - hNode;
2055 if ( balanceLR < -1 || balanceLR > 1 ) {
2056 m_stat.onRotateAfterRLRotation();
2060 // try to fix the parent height while we've still got the lock
2061 return fix_height_locked( pParent );
2064 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2066 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2067 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2069 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2070 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2071 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2072 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2074 begin_change( pNode, nodeVersion );
2075 begin_change( pRight, rightVersion );
2077 // fix up pNode links, careful about the order!
2078 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2079 if ( pRLL != nullptr )
2080 pRLL->parent( pNode, memory_model::memory_order_release );
2082 pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
2083 if ( pRLR != nullptr )
2084 pRLR->parent( pRight, memory_model::memory_order_release );
2086 pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
2087 pRight->parent( pRLeft, memory_model::memory_order_release );
2089 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
2090 pNode->parent( pRLeft, memory_model::memory_order_release );
2093 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
2095 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2096 pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
2098 pRLeft->parent( pParent, memory_model::memory_order_release );
2101 int hNode = 1 + std::max( hL, hRLL );
2102 set_height( pNode, hNode, memory_model::memory_order_release );
2103 int hRight = 1 + std::max( hRLR, hRR );
2104 set_height( pRight, hRight, memory_model::memory_order_release );
2105 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2107 end_change( pNode, nodeVersion );
2108 end_change( pRight, rightVersion );
2109 m_stat.onRotateLeftOverRight();
2111 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2113 int nodeBalance = hRLL - hL;
2114 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2115 m_stat.onRotateAfterLRRotation();
2119 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
2120 m_stat.onRemoveAfterLRRotation();
2124 int balRL = hRight - hNode;
2125 if ( balRL < -1 || balRL > 1 ) {
2126 m_stat.onRotateAfterLRRotation();
2130 return fix_height_locked( pParent );
2135 }} // namespace cds::container
2137 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H