2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
32 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
34 #include <type_traits> // is_base_of
35 #include <cds/container/details/bronson_avltree_base.h>
36 #include <cds/urcu/details/check_deadlock.h>
37 #include <cds/urcu/exempt_ptr.h>
39 namespace cds { namespace container {
41 /// Bronson et al AVL-tree (RCU specialization for pointers)
42 /** @ingroup cds_nonintrusive_map
43 @ingroup cds_nonintrusive_tree
44 @headerfile cds/container/bronson_avltree_map_rcu.h
45 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
47 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
48 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
49 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
50 the disposer functor provided by \p Traits template parameter.
52 <b>Template arguments</b>:
53 - \p RCU - one of \ref cds_urcu_gc "RCU type"
55 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
57 - \p Traits - tree traits, default is \p bronson_avltree::traits
58 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
59 instead of \p Traits template argument.
61 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
62 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
68 # ifdef CDS_DOXYGEN_INVOKED
69 typename Traits = bronson_avltree::traits
74 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
77 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
78 typedef Key key_type; ///< type of a key stored in the map
79 typedef T * mapped_type; ///< type of value stored in the map
80 typedef Traits traits; ///< Traits template parameter
82 # ifdef CDS_DOXYGEN_INVOKED
83 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
85 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
87 typedef typename traits::item_counter item_counter; ///< Item counting policy
88 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
89 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
90 typedef typename traits::stat stat; ///< internal statistics
91 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
92 typedef typename traits::back_off back_off; ///< Back-off strategy
93 typedef typename traits::disposer disposer; ///< Value disposer
94 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
96 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
97 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
99 /// Group of \p extract_xxx functions does not require external locking
100 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
102 # ifdef CDS_DOXYGEN_INVOKED
103 /// Returned pointer to \p mapped_type of extracted node
104 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
106 typedef cds::urcu::exempt_ptr< gc,
107 typename std::remove_pointer<mapped_type>::type,
108 typename std::remove_pointer<mapped_type>::type,
114 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
118 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
119 typedef typename node_type::version_type version_type;
121 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
122 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
124 enum class find_result
141 result_inserted = allow_insert,
142 result_updated = allow_update,
149 nothing_required = -3,
150 rebalance_required = -2,
159 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
164 template <typename K>
165 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
167 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
170 static void free_node( node_type * pNode )
172 // Free node without disposer
173 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
174 assert( pNode->m_SyncMonitorInjection.check_free());
175 cxx_allocator().Delete( pNode );
178 static void free_value( mapped_type pVal )
183 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
185 return pNode->child( nDir, order );
188 static node_type * parent( node_type * pNode, atomics::memory_order order )
190 return pNode->parent( order );
196 node_type * m_pRetiredList; ///< head of retired node list
197 mapped_type m_pRetiredValue; ///< value retired
201 : m_pRetiredList( nullptr )
202 , m_pRetiredValue( nullptr )
210 void dispose( node_type * pNode )
212 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
213 pNode->m_pNextRemoved = m_pRetiredList;
214 m_pRetiredList = pNode;
217 void dispose_value( mapped_type pVal )
219 assert( m_pRetiredValue == nullptr );
220 m_pRetiredValue = pVal;
224 struct internal_disposer
226 void operator()( node_type * p ) const
234 assert( !gc::is_locked());
236 // TODO: use RCU::batch_retire
239 for ( node_type * p = m_pRetiredList; p; ) {
240 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
241 // Value already disposed
242 gc::template retire_ptr<internal_disposer>( p );
247 if ( m_pRetiredValue )
248 gc::template retire_ptr<disposer>( m_pRetiredValue );
256 typename node_type::base_class m_Root;
258 item_counter m_ItemCounter;
259 mutable sync_monitor m_Monitor;
264 /// Creates empty map
266 : m_pRoot( static_cast<node_type *>( &m_Root ))
277 The \p key_type should be constructible from a value of type \p K.
279 RCU \p synchronize() can be called. RCU should not be locked.
281 Returns \p true if inserting successful, \p false otherwise.
283 template <typename K>
284 bool insert( K const& key, mapped_type pVal )
286 return do_update(key, key_comparator(),
287 [pVal]( node_type * pNode ) -> mapped_type
289 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
293 update_flags::allow_insert
294 ) == update_flags::result_inserted;
297 /// Updates the value for \p key
299 The operation performs inserting or updating the value for \p key with lock-free manner.
300 If \p bInsert is \p false, only updating of existing node is possible.
302 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
303 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
304 constructible from type \p K.
305 Otherwise, the value for \p key will be changed to \p pVal.
307 RCU \p synchronize() method can be called. RCU should not be locked.
309 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successful,
310 \p second is \p true if new node has been added or \p false if the node with \p key
313 template <typename K>
314 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
316 int result = do_update( key, key_comparator(),
317 [pVal]( node_type * ) -> mapped_type
321 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
323 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
328 /// Delete \p key from the map
330 RCU \p synchronize() method can be called. RCU should not be locked.
332 Return \p true if \p key is found and deleted, \p false otherwise
334 template <typename K>
335 bool erase( K const& key )
340 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
344 /// Deletes the item from the map using \p pred predicate for searching
346 The function is an analog of \p erase(K const&)
347 but \p pred is used for key comparing.
348 \p Less functor has the interface like \p std::less.
349 \p Less must imply the same element order as the comparator used for building the map.
351 template <typename K, typename Less>
352 bool erase_with( K const& key, Less pred )
357 cds::opt::details::make_comparator_from_less<Less>(),
358 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
362 /// Delete \p key from the map
364 The function searches an item with key \p key, calls \p f functor
365 and deletes the item. If \p key is not found, the functor is not called.
367 The functor \p Func interface:
370 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
374 RCU \p synchronize method can be called. RCU should not be locked.
376 Return \p true if key is found and deleted, \p false otherwise
378 template <typename K, typename Func>
379 bool erase( K const& key, Func f )
384 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
387 disp.dispose_value(pVal);
393 /// Deletes the item from the map using \p pred predicate for searching
395 The function is an analog of \p erase(K const&, Func)
396 but \p pred is used for key comparing.
397 \p Less functor has the interface like \p std::less.
398 \p Less must imply the same element order as the comparator used for building the map.
400 template <typename K, typename Less, typename Func>
401 bool erase_with( K const& key, Less pred, Func f )
406 cds::opt::details::make_comparator_from_less<Less>(),
407 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
410 disp.dispose_value(pVal);
416 /// Extracts a value with minimal key from the map
418 Returns \p exempt_ptr to the leftmost item.
419 If the tree is empty, returns empty \p exempt_ptr.
421 Note that the function returns only the value for minimal key.
422 To retrieve its key use \p extract_min( Func ) member function.
424 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
425 It means that the function gets leftmost leaf of the tree and tries to unlink it.
426 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
427 So, the function returns the item with minimum key at the moment of tree traversing.
429 RCU \p synchronize method can be called. RCU should NOT be locked.
430 The function does not free the item.
431 The deallocator will be implicitly invoked when the returned object is destroyed or when
432 its \p release() member function is called.
434 exempt_ptr extract_min()
436 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
439 /// Extracts minimal key and corresponding value
441 Returns \p exempt_ptr to the leftmost item.
442 If the tree is empty, returns empty \p exempt_ptr.
444 \p Func functor is used to store minimal key.
445 \p Func has the following signature:
448 void operator()( key_type const& key );
451 If the tree is empty, \p f is not called.
452 Otherwise, it is called with minimal key, the pointer to corresponding value is returned
455 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
456 It means that the function gets leftmost leaf of the tree and tries to unlink it.
457 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
458 So, the function returns the item with minimum key at the moment of tree traversing.
460 RCU \p synchronize method can be called. RCU should NOT be locked.
461 The function does not free the item.
462 The deallocator will be implicitly invoked when the returned object is destroyed or when
463 its \p release() member function is called.
465 template <typename Func>
466 exempt_ptr extract_min( Func f )
468 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
471 /// Extracts minimal key and corresponding value
473 This function is a shortcut for the following call:
476 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
478 \p key_type should be copy-assignable. The copy of minimal key
479 is returned in \p min_key argument.
481 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
482 extract_min_key( key_type& min_key )
484 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
487 /// Extracts a value with maximal key from the tree
489 Returns \p exempt_ptr pointer to the rightmost item.
490 If the set is empty, returns empty \p exempt_ptr.
492 Note that the function returns only the value for maximal key.
493 To retrieve its key use \p extract_max( Func ) member function.
495 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
496 It means that the function gets rightmost leaf of the tree and tries to unlink it.
497 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
498 So, the function returns the item with maximum key at the moment of tree traversing.
500 RCU \p synchronize method can be called. RCU should NOT be locked.
501 The function does not free the item.
502 The deallocator will be implicitly invoked when the returned object is destroyed or when
503 its \p release() is called.
505 exempt_ptr extract_max()
507 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
510 /// Extracts the maximal key and corresponding value
512 Returns \p exempt_ptr pointer to the rightmost item.
513 If the set is empty, returns empty \p exempt_ptr.
515 \p Func functor is used to store maximal key.
516 \p Func has the following signature:
519 void operator()( key_type const& key );
522 If the tree is empty, \p f is not called.
523 Otherwise, it is called with maximal key, the pointer to corresponding value is returned
526 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
527 It means that the function gets rightmost leaf of the tree and tries to unlink it.
528 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
529 So, the function returns the item with maximum key at the moment of tree traversing.
531 RCU \p synchronize method can be called. RCU should NOT be locked.
532 The function does not free the item.
533 The deallocator will be implicitly invoked when the returned object is destroyed or when
534 its \p release() is called.
536 template <typename Func>
537 exempt_ptr extract_max( Func f )
539 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
542 /// Extracts the maximal key and corresponding value
544 This function is a shortcut for the following call:
547 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
549 \p key_type should be copy-assignable. The copy of maximal key
550 is returned in \p max_key argument.
552 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
553 extract_max_key( key_type& max_key )
555 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
558 /// Extracts an item from the map
560 The function searches an item with key equal to \p key in the tree,
561 unlinks it, and returns \p exempt_ptr pointer to a value found.
562 If \p key is not found the function returns an empty \p exempt_ptr.
564 RCU \p synchronize method can be called. RCU should NOT be locked.
565 The function does not destroy the value found.
566 The disposer will be implicitly invoked when the returned object is destroyed or when
567 its \p release() member function is called.
569 template <typename Q>
570 exempt_ptr extract( Q const& key )
572 return exempt_ptr(do_extract( key ));
576 /// Extracts an item from the map using \p pred for searching
578 The function is an analog of \p extract(Q const&)
579 but \p pred is used for key compare.
580 \p Less has the interface like \p std::less.
581 \p pred must imply the same element order as the comparator used for building the tree.
583 template <typename Q, typename Less>
584 exempt_ptr extract_with( Q const& key, Less pred )
586 return exempt_ptr(do_extract_with( key, pred ));
589 /// Find the key \p key
591 The function searches the item with key equal to \p key and calls the functor \p f for item found.
592 The interface of \p Func functor is:
595 void operator()( key_type const& key, std::remove_pointer< mapped_type )::type& item );
598 where \p item is the item found.
599 The functor is called under node-level lock.
601 The function applies RCU lock internally.
603 The function returns \p true if \p key is found, \p false otherwise.
605 template <typename K, typename Func>
606 bool find( K const& key, Func f )
608 return do_find( key, key_comparator(),
609 [&f]( node_type * pNode ) -> bool {
610 assert( pNode != nullptr );
611 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
613 f( pNode->m_key, *pVal );
621 /// Finds the key \p val using \p pred predicate for searching
623 The function is an analog of \p find(K const&, Func)
624 but \p pred is used for key comparing.
625 \p Less functor has the interface like \p std::less.
626 \p Less must imply the same element order as the comparator used for building the map.
628 template <typename K, typename Less, typename Func>
629 bool find_with( K const& key, Less pred, Func f )
632 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
633 [&f]( node_type * pNode ) -> bool {
634 assert( pNode != nullptr );
635 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
637 f( pNode->m_key, *pVal );
645 /// Checks whether the map contains \p key
647 The function searches the item with key equal to \p key
648 and returns \p true if it is found, and \p false otherwise.
650 The function applies RCU lock internally.
652 template <typename K>
653 bool contains( K const& key )
655 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
658 /// Checks whether the map contains \p key using \p pred predicate for searching
660 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
661 \p Less functor has the interface like \p std::less.
662 \p Less must imply the same element order as the comparator used for building the set.
664 template <typename K, typename Less>
665 bool contains( K const& key, Less pred )
668 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
671 /// Clears the tree (thread safe, not atomic)
673 The function unlink all items from the tree.
674 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
678 assert( set.empty());
680 the assertion could be raised.
682 For each node the \ref disposer will be called after unlinking.
684 RCU \p synchronize method can be called. RCU should not be locked.
688 while ( extract_min());
691 /// Clears the tree (not thread safe)
693 This function is not thread safe and may be called only when no other thread deals with the tree.
694 The function is used in the tree destructor.
698 clear(); // temp solution
702 /// Checks if the map is empty
705 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
708 /// Returns item count in the map
710 Only leaf nodes containing user data are counted.
712 The value returned depends on item counter type provided by \p Traits template parameter.
713 If it is \p atomicity::empty_item_counter this function always returns 0.
715 The function is not suitable for checking the tree emptiness, use \p empty()
716 member function for this purpose.
720 return m_ItemCounter;
723 /// Returns const reference to internal statistics
724 stat const& statistics() const
729 /// Returns reference to \p sync_monitor object
730 sync_monitor& monitor()
735 sync_monitor const& monitor() const
741 /// Checks internal consistency (not atomic, not thread-safe)
743 The debugging function to check internal consistency of the tree.
745 bool check_consistency() const
747 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
750 /// Checks internal consistency (not atomic, not thread-safe)
752 The debugging function to check internal consistency of the tree.
753 The functor \p Func is called if a violation of internal tree structure
757 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
761 - \p nLevel - the level where the violation is found
762 - \p hLeft - the height of left subtree
763 - \p hRight - the height of right subtree
765 The functor is called for each violation found.
767 template <typename Func>
768 bool check_consistency( Func f ) const
770 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
773 do_check_consistency( pChild, 1, f, nErrors );
781 template <typename Func>
782 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
786 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
787 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
788 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
790 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
793 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
794 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
796 if ( hLeft >= hRight ) {
797 if ( hLeft - hRight > 1 ) {
798 f( nLevel, hLeft, hRight );
804 if ( hRight - hLeft > 1 ) {
805 f( nLevel, hLeft, hRight );
814 template <typename Q, typename Compare, typename Func>
815 bool do_find( Q& key, Compare cmp, Func f ) const
820 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
822 assert( result != find_result::retry );
823 return result == find_result::found;
826 template <typename K, typename Compare, typename Func>
827 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
829 check_deadlock_policy::check();
831 rcu_disposer removed_list;
834 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
838 template <typename K, typename Compare, typename Func>
839 bool do_remove( K const& key, Compare cmp, Func func )
841 // Func must return true if the value was disposed
842 // or false if the value was extracted
844 check_deadlock_policy::check();
846 rcu_disposer removed_list;
849 return try_remove_root( key, cmp, func, removed_list );
853 template <typename Func>
854 mapped_type do_extract_min( Func f )
856 mapped_type pExtracted = nullptr;
859 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
864 template <typename Func>
865 mapped_type do_extract_max( Func f )
867 mapped_type pExtracted = nullptr;
870 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
875 template <typename Func>
876 void do_extract_minmax( int nDir, Func func )
878 check_deadlock_policy::check();
880 rcu_disposer removed_list;
887 // get right child of root
888 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
890 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
891 if ( nChildVersion & node_type::shrinking ) {
892 m_stat.onRemoveRootWaitShrinking();
893 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
894 result = update_flags::retry;
896 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
897 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
900 result = update_flags::retry;
905 if ( result == update_flags::retry )
906 m_stat.onRemoveRetry();
908 m_stat.onExtract( result == update_flags::result_removed );
915 template <typename Q>
916 mapped_type do_extract( Q const& key )
918 mapped_type pExtracted = nullptr;
922 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
924 m_stat.onExtract( pExtracted != nullptr );
928 template <typename Q, typename Less>
929 mapped_type do_extract_with( Q const& key, Less pred )
932 mapped_type pExtracted = nullptr;
935 cds::opt::details::make_comparator_from_less<Less>(),
936 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
938 m_stat.onExtract( pExtracted != nullptr );
945 static int height( node_type * pNode, atomics::memory_order order )
948 return pNode->m_nHeight.load( order );
950 static void set_height( node_type * pNode, int h, atomics::memory_order order )
953 pNode->m_nHeight.store( h, order );
955 static int height_null( node_type * pNode, atomics::memory_order order )
957 return pNode ? height( pNode, order ) : 0;
960 static CDS_CONSTEXPR int const c_stackSize = 64;
962 template <typename Q, typename Compare, typename Func>
963 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
965 assert( gc::is_locked());
971 version_type nVersion;
975 stack_record stack[c_stackSize];
977 stack[0].pNode = pNode;
978 stack[0].nVersion = nVersion;
979 stack[0].nDir = nDir;
982 pNode = stack[pos].pNode;
983 nVersion = stack[pos].nVersion;
984 nDir = stack[pos].nDir;
987 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
989 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
991 m_stat.onFindRetry();
994 m_stat.onFindFailed();
995 return find_result::not_found;
998 int nCmp = cmp( key, pChild->m_key );
1000 if ( pChild->is_valued( memory_model::memory_order_acquire )) {
1002 node_scoped_lock l( m_Monitor, *pChild );
1003 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
1004 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
1006 m_stat.onFindSuccess();
1007 return find_result::found;
1012 m_stat.onFindRetry();
1016 m_stat.onFindFailed();
1017 return find_result::not_found;
1020 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1021 if ( nChildVersion & node_type::shrinking ) {
1022 m_stat.onFindWaitShrinking();
1023 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1025 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1027 m_stat.onFindRetry();
1031 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1033 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1035 m_stat.onFindRetry();
1040 assert(pos < c_stackSize);
1041 stack[pos].pNode = pChild;
1042 stack[pos].nVersion = nChildVersion;
1043 stack[pos].nDir = nCmp;
1044 break; // child iteration
1047 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1049 m_stat.onFindRetry();
1053 m_stat.onFindRetry();
1056 return find_result::retry;
1059 template <typename K, typename Compare, typename Func>
1060 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1062 assert( gc::is_locked());
1067 // get right child of root
1068 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1070 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1071 if ( nChildVersion & node_type::shrinking ) {
1072 m_stat.onUpdateRootWaitShrinking();
1073 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1074 result = update_flags::retry;
1076 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1077 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1079 result = update_flags::retry;
1082 // the tree is empty
1083 if ( nFlags & update_flags::allow_insert ) {
1084 // insert into tree as right child of the root
1086 node_scoped_lock l( m_Monitor, *m_pRoot );
1087 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1088 result = update_flags::retry;
1092 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1093 mapped_type pVal = funcUpdate( pNew );
1094 assert( pVal != nullptr );
1095 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1097 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1098 set_height( m_pRoot, 2, memory_model::memory_order_release );
1102 m_stat.onInsertSuccess();
1103 return update_flags::result_inserted;
1106 return update_flags::failed;
1109 if ( result != update_flags::retry )
1114 template <typename K, typename Compare, typename Func>
1115 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1117 assert( gc::is_locked());
1122 // get right child of root
1123 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1125 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1126 if ( nChildVersion & node_type::shrinking ) {
1127 m_stat.onRemoveRootWaitShrinking();
1128 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1129 result = update_flags::retry;
1131 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1132 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1135 result = update_flags::retry;
1140 if ( result == update_flags::retry )
1141 m_stat.onRemoveRetry();
1143 m_stat.onRemove( result == update_flags::result_removed );
1144 return result == update_flags::result_removed;
1149 template <typename K, typename Compare, typename Func>
1150 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1152 assert( gc::is_locked());
1153 assert( nVersion != node_type::unlinked );
1158 version_type nVersion;
1161 stack_record stack[c_stackSize];
1163 stack[0].pNode = pNode;
1164 stack[0].nVersion = nVersion;
1166 while ( pos >= 0 ) {
1167 pNode = stack[pos].pNode;
1168 nVersion = stack[pos].nVersion;
1170 int nCmp = cmp( key, pNode->m_key );
1172 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1173 if ( result != update_flags::retry )
1176 m_stat.onUpdateRetry();
1181 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1182 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1184 m_stat.onUpdateRetry();
1188 if ( pChild == nullptr ) {
1190 if ( nFlags & update_flags::allow_insert ) {
1191 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1192 if ( result != update_flags::retry )
1195 m_stat.onUpdateRetry();
1199 return update_flags::failed;
1203 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1204 if ( nChildVersion & node_type::shrinking ) {
1205 m_stat.onUpdateWaitShrinking();
1206 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1209 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1210 // this second read is important, because it is protected by nChildVersion
1212 // validate the read that our caller took to get to node
1213 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1215 m_stat.onUpdateRetry();
1218 // At this point we know that the traversal our parent took to get to node is still valid.
1219 // The recursive implementation will validate the traversal from node to
1220 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1221 // This means that we are no longer vulnerable to node shrinks, and we don't need
1222 // to validate node version any more.
1224 assert( pos < c_stackSize );
1225 stack[pos].pNode = pChild;
1226 stack[pos].nVersion = nChildVersion;
1227 assert( nChildVersion != node_type::unlinked );
1228 break; // child iteration
1230 m_stat.onUpdateRetry();
1234 return update_flags::retry;
1237 template <typename K, typename Compare, typename Func>
1238 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1240 assert( gc::is_locked());
1241 assert( nVersion != node_type::unlinked );
1245 node_type * pParent;
1247 version_type nVersion;
1250 stack_record stack[c_stackSize];
1252 stack[0].pParent = pParent;
1253 stack[0].pNode = pNode;
1254 stack[0].nVersion = nVersion;
1256 while ( pos >= 0 ) {
1257 pParent = stack[pos].pParent;
1258 pNode = stack[pos].pNode;
1259 nVersion = stack[pos].nVersion;
1261 int nCmp = cmp( key, pNode->m_key );
1263 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1264 if ( result != update_flags::retry )
1267 m_stat.onRemoveRetry();
1272 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1273 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1275 m_stat.onRemoveRetry();
1279 if ( pChild == nullptr )
1280 return update_flags::failed;
1283 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1284 if ( nChildVersion & node_type::shrinking ) {
1285 m_stat.onRemoveWaitShrinking();
1286 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1289 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1290 // this second read is important, because it is protected by nChildVersion
1292 // validate the read that our caller took to get to node
1293 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1295 m_stat.onRemoveRetry();
1299 // At this point we know that the traversal our parent took to get to node is still valid.
1300 // The recursive implementation will validate the traversal from node to
1301 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1302 // This means that we are no longer vulnerable to node shrinks, and we don't need
1303 // to validate node version any more.
1305 assert( pos < c_stackSize );
1306 stack[pos].pParent = pNode;
1307 stack[pos].pNode = pChild;
1308 stack[pos].nVersion = nChildVersion;
1309 break; // child iteration
1311 m_stat.onRemoveRetry();
1314 return update_flags::retry;
1317 template <typename Func>
1318 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1320 assert( gc::is_locked());
1321 assert( nVersion != node_type::unlinked );
1325 node_type * pParent;
1327 version_type nVersion;
1330 stack_record stack[c_stackSize];
1332 stack[0].pParent = pParent;
1333 stack[0].pNode = pNode;
1334 stack[0].nVersion = nVersion;
1336 while ( pos >= 0 ) {
1337 pParent = stack[pos].pParent;
1338 pNode = stack[pos].pNode;
1339 nVersion = stack[pos].nVersion;
1342 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
1343 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1345 m_stat.onRemoveRetry();
1349 if ( pChild == nullptr ) {
1351 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1352 if ( result != update_flags::retry )
1355 m_stat.onRemoveRetry();
1359 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1360 if ( nChildVersion & node_type::shrinking ) {
1361 m_stat.onRemoveWaitShrinking();
1362 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1365 else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
1366 // this second read is important, because it is protected by nChildVersion
1368 // validate the read that our caller took to get to node
1369 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1371 m_stat.onRemoveRetry();
1375 // At this point we know that the traversal our parent took to get to node is still valid.
1376 // The recursive implementation will validate the traversal from node to
1377 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1378 // This means that we are no longer vulnerable to node shrinks, and we don't need
1379 // to validate node version any more.
1381 assert( pos < c_stackSize );
1382 stack[pos].pParent = pNode;
1383 stack[pos].pNode = pChild;
1384 stack[pos].nVersion = nChildVersion;
1385 break; // child iteration
1387 m_stat.onRemoveRetry();
1390 return update_flags::retry;
1393 template <typename K, typename Func>
1394 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1398 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1399 mapped_type pVal = funcUpdate( pNew );
1400 assert( pVal != nullptr );
1401 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1404 if ( c_bRelaxedInsert ) {
1405 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1406 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1408 m_stat.onInsertRetry();
1409 return update_flags::retry;
1412 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1415 node_type * pDamaged;
1417 assert( pNode != nullptr );
1418 node_scoped_lock l( m_Monitor, *pNode );
1420 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1421 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1423 if ( c_bRelaxedInsert ) {
1424 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1425 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1428 m_stat.onRelaxedInsertFailed();
1431 m_stat.onInsertRetry();
1432 return update_flags::retry;
1435 if ( !c_bRelaxedInsert )
1436 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1438 pNode->child( pNew, nDir, memory_model::memory_order_release );
1439 pDamaged = fix_height_locked( pNode );
1443 m_stat.onInsertSuccess();
1446 fix_height_and_rebalance( pDamaged, disp );
1447 m_stat.onInsertRebalanceRequired();
1450 return update_flags::result_inserted;
1453 template <typename Func>
1454 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1458 assert( pNode != nullptr );
1460 node_scoped_lock l( m_Monitor, *pNode );
1462 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1463 return update_flags::retry;
1465 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1466 m_stat.onUpdateUnlinked();
1467 return update_flags::retry;
1470 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update)) {
1471 m_stat.onInsertFailed();
1472 return update_flags::failed;
1475 pOld = pNode->value( memory_model::memory_order_relaxed );
1476 bInserted = pOld == nullptr;
1477 mapped_type pVal = funcUpdate( pNode );
1481 assert( pVal != nullptr );
1482 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1487 disp.dispose_value(pOld);
1488 m_stat.onDisposeValue();
1493 m_stat.onInsertSuccess();
1494 return update_flags::result_inserted;
1497 m_stat.onUpdateSuccess();
1498 return update_flags::result_updated;
1501 template <typename Func>
1502 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1504 assert( pParent != nullptr );
1505 assert( pNode != nullptr );
1507 if ( !pNode->is_valued( memory_model::memory_order_acquire ))
1508 return update_flags::failed;
1510 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1511 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1513 // pNode can be replaced with its child
1515 node_type * pDamaged;
1518 node_scoped_lock lp( m_Monitor, *pParent );
1519 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1520 return update_flags::retry;
1523 node_scoped_lock ln( m_Monitor, *pNode );
1524 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1525 return update_flags::retry;
1527 pOld = pNode->value( memory_model::memory_order_relaxed );
1529 return update_flags::failed;
1531 if ( !try_unlink_locked( pParent, pNode, disp ))
1532 return update_flags::retry;
1534 pDamaged = fix_height_locked( pParent );
1538 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1539 m_stat.onDisposeValue();
1541 m_stat.onExtractValue();
1544 fix_height_and_rebalance( pDamaged, disp );
1545 m_stat.onRemoveRebalanceRequired();
1549 // pNode is an internal with two children
1553 node_scoped_lock ln( m_Monitor, *pNode );
1554 pOld = pNode->value( memory_model::memory_order_relaxed );
1555 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1556 return update_flags::retry;
1558 return update_flags::failed;
1560 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1561 m_stat.onMakeRoutingNode();
1565 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1566 m_stat.onDisposeValue();
1568 m_stat.onExtractValue();
1570 return update_flags::result_removed;
1573 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1575 // pParent and pNode must be locked
1576 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed));
1578 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1579 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1580 if ( pNode != pParentLeft && pNode != pParentRight ) {
1581 // node is no longer a child of parent
1585 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1586 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1588 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1589 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1590 if ( pLeft != nullptr && pRight != nullptr ) {
1591 // splicing is no longer possible
1594 node_type * pSplice = pLeft ? pLeft : pRight;
1596 if ( pParentLeft == pNode )
1597 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1599 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1602 pSplice->parent( pParent, memory_model::memory_order_release );
1604 // Mark the node as unlinked
1605 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1607 // The value will be disposed by calling function
1608 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1610 disp.dispose( pNode );
1611 m_stat.onDisposeNode();
1618 private: // rotations
1620 int estimate_node_condition( node_type * pNode )
1622 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1623 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1625 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1626 return unlink_required;
1628 int h = height( pNode, memory_model::memory_order_acquire );
1629 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1630 int hR = height_null( pRight, memory_model::memory_order_acquire );
1632 int hNew = 1 + std::max( hL, hR );
1633 int nBalance = hL - hR;
1635 if ( nBalance < -1 || nBalance > 1 )
1636 return rebalance_required;
1638 return h != hNew ? hNew : nothing_required;
1641 node_type * fix_height( node_type * pNode )
1643 assert( pNode != nullptr );
1644 node_scoped_lock l( m_Monitor, *pNode );
1645 return fix_height_locked( pNode );
1648 node_type * fix_height_locked( node_type * pNode )
1650 // pNode must be locked!!!
1651 int h = estimate_node_condition( pNode );
1653 case rebalance_required:
1654 case unlink_required:
1656 case nothing_required:
1659 set_height( pNode, h, memory_model::memory_order_release );
1660 return parent( pNode, memory_model::memory_order_relaxed );
1664 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1666 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1667 int nCond = estimate_node_condition( pNode );
1668 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ))
1671 if ( nCond != unlink_required && nCond != rebalance_required )
1672 pNode = fix_height( pNode );
1674 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1675 assert( pParent != nullptr );
1677 node_scoped_lock lp( m_Monitor, *pParent );
1678 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1679 node_scoped_lock ln( m_Monitor, *pNode );
1680 pNode = rebalance_locked( pParent, pNode, disp );
1687 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1689 // pParent and pNode should be locked.
1690 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1691 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1693 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1694 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1696 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1697 if ( try_unlink_locked( pParent, pNode, disp ))
1698 return fix_height_locked( pParent );
1700 // retry needed for pNode
1705 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1706 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1708 int h = height( pNode, memory_model::memory_order_acquire );
1709 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1710 int hR = height_null( pRight, memory_model::memory_order_acquire );
1711 int hNew = 1 + std::max( hL, hR );
1712 int balance = hL - hR;
1715 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1716 else if ( balance < -1 )
1717 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1718 else if ( hNew != h ) {
1719 set_height( pNode, hNew, memory_model::memory_order_release );
1721 // pParent is already locked
1722 return fix_height_locked( pParent );
1728 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1730 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1731 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1732 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1734 // pParent and pNode is locked yet
1735 // pNode->pLeft is too large, we will rotate-right.
1736 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1739 assert( pLeft != nullptr );
1740 node_scoped_lock l( m_Monitor, *pLeft );
1741 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1742 return pNode; // retry for pNode
1744 int hL = height( pLeft, memory_model::memory_order_acquire );
1746 return pNode; // retry
1748 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1749 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1750 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1751 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1755 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1759 node_scoped_lock lr( m_Monitor, *pLRight );
1760 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1761 return pNode; // retry
1763 hLR = height( pLRight, memory_model::memory_order_acquire );
1765 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1767 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
1768 int balance = hLL - hLRL;
1769 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1770 // nParent.child.left won't be damaged after a double rotation
1771 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1775 return pNode; // retry
1777 // focus on pLeft, if necessary pNode will be balanced later
1778 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1783 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1785 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1786 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1787 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1789 // pParent and pNode is locked yet
1791 assert( pRight != nullptr );
1792 node_scoped_lock l( m_Monitor, *pRight );
1793 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1794 return pNode; // retry for pNode
1796 int hR = height( pRight, memory_model::memory_order_acquire );
1797 if ( hL - hR >= -1 )
1798 return pNode; // retry
1800 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1801 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1802 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1803 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1805 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1808 node_scoped_lock lrl( m_Monitor, *pRLeft );
1809 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1810 return pNode; // retry
1812 hRL = height( pRLeft, memory_model::memory_order_acquire );
1814 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1816 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1817 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1818 int balance = hRR - hRLR;
1819 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1820 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1823 return pNode; // retry
1824 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1828 static void begin_change( node_type * pNode, version_type version )
1830 assert(pNode->version(memory_model::memory_order_acquire) == version );
1831 assert( (version & node_type::shrinking) == 0 );
1832 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1834 static void end_change( node_type * pNode, version_type version )
1836 // Clear shrinking and unlinked flags and increment version
1837 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1840 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1842 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1843 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1845 begin_change( pNode, nodeVersion );
1847 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1848 if ( pLRight != nullptr )
1849 pLRight->parent( pNode, memory_model::memory_order_release );
1851 atomics::atomic_thread_fence( memory_model::memory_order_release );
1853 pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
1854 pNode->parent( pLeft, memory_model::memory_order_release );
1856 atomics::atomic_thread_fence( memory_model::memory_order_release );
1858 if ( pParentLeft == pNode )
1859 pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
1861 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1862 pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
1864 pLeft->parent( pParent, memory_model::memory_order_release );
1866 atomics::atomic_thread_fence( memory_model::memory_order_release );
1868 // fix up heights links
1869 int hNode = 1 + std::max( hLR, hR );
1870 set_height( pNode, hNode, memory_model::memory_order_release );
1871 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1873 end_change( pNode, nodeVersion );
1874 m_stat.onRotateRight();
1876 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1877 // parent.child). pNode is the deepest. Perform as many fixes as we can
1878 // with the locks we've got.
1880 // We've already fixed the height for pNode, but it might still be outside
1881 // our allowable balance range. In that case a simple fix_height_locked()
1883 int nodeBalance = hLR - hR;
1884 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1885 // we need another rotation at pNode
1886 m_stat.onRotateAfterRightRotation();
1890 // we've fixed balance and height damage for pNode, now handle
1891 // extra-routing node damage
1892 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1893 // we need to remove pNode and then repair
1894 m_stat.onRemoveAfterRightRotation();
1898 // we've already fixed the height at pLeft, do we need a rotation here?
1899 int leftBalance = hLL - hNode;
1900 if ( leftBalance < -1 || leftBalance > 1 ) {
1901 m_stat.onRotateAfterRightRotation();
1905 // pLeft might also have routing node damage (if pLeft.left was null)
1906 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire)) {
1907 m_stat.onDamageAfterRightRotation();
1911 // try to fix the parent height while we've still got the lock
1912 return fix_height_locked( pParent );
1915 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1917 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1918 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1920 begin_change( pNode, nodeVersion );
1922 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1923 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1924 if ( pRLeft != nullptr )
1925 pRLeft->parent( pNode, memory_model::memory_order_release );
1927 atomics::atomic_thread_fence( memory_model::memory_order_release );
1929 pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
1930 pNode->parent( pRight, memory_model::memory_order_release );
1932 atomics::atomic_thread_fence( memory_model::memory_order_release );
1934 if ( pParentLeft == pNode )
1935 pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
1937 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1938 pParent->m_pRight.store( pRight, memory_model::memory_order_release );
1940 pRight->parent( pParent, memory_model::memory_order_release );
1942 atomics::atomic_thread_fence( memory_model::memory_order_release );
1945 int hNode = 1 + std::max( hL, hRL );
1946 set_height( pNode, hNode, memory_model::memory_order_release );
1947 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
1949 end_change( pNode, nodeVersion );
1950 m_stat.onRotateLeft();
1952 int nodeBalance = hRL - hL;
1953 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1954 m_stat.onRotateAfterLeftRotation();
1958 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1959 m_stat.onRemoveAfterLeftRotation();
1963 int rightBalance = hRR - hNode;
1964 if ( rightBalance < -1 || rightBalance > 1 ) {
1965 m_stat.onRotateAfterLeftRotation();
1969 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire)) {
1970 m_stat.onDamageAfterLeftRotation();
1974 return fix_height_locked( pParent );
1977 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1979 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1980 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1982 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1983 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
1984 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
1985 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
1987 begin_change( pNode, nodeVersion );
1988 begin_change( pLeft, leftVersion );
1990 // fix up pNode links, careful about the order!
1991 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
1992 if ( pLRR != nullptr )
1993 pLRR->parent( pNode, memory_model::memory_order_release );
1995 pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
1996 if ( pLRL != nullptr )
1997 pLRL->parent( pLeft, memory_model::memory_order_release );
1999 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
2000 pLeft->parent( pLRight, memory_model::memory_order_release );
2002 pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
2003 pNode->parent( pLRight, memory_model::memory_order_release );
2006 pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
2008 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
2009 pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
2011 pLRight->parent( pParent, memory_model::memory_order_release );
2014 int hNode = 1 + std::max( hLRR, hR );
2015 set_height( pNode, hNode, memory_model::memory_order_release );
2016 int hLeft = 1 + std::max( hLL, hLRL );
2017 set_height( pLeft, hLeft, memory_model::memory_order_release );
2018 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
2020 end_change( pNode, nodeVersion );
2021 end_change( pLeft, leftVersion );
2022 m_stat.onRotateRightOverLeft();
2024 // caller should have performed only a single rotation if pLeft was going
2025 // to end up damaged
2026 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2027 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
2029 // We have damaged pParent, pLR (now parent.child), and pNode (now
2030 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2031 // can with the locks we've got.
2033 // We've already fixed the height for pNode, but it might still be outside
2034 // our allowable balance range. In that case a simple fix_height_locked()
2036 int nodeBalance = hLRR - hR;
2037 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2038 // we need another rotation at pNode
2039 m_stat.onRotateAfterRLRotation();
2043 // pNode might also be damaged by being an unnecessary routing node
2044 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2045 // repair involves splicing out pNode and maybe more rotations
2046 m_stat.onRemoveAfterRLRotation();
2050 // we've already fixed the height at pLRight, do we need a rotation here?
2051 int balanceLR = hLeft - hNode;
2052 if ( balanceLR < -1 || balanceLR > 1 ) {
2053 m_stat.onRotateAfterRLRotation();
2057 // try to fix the parent height while we've still got the lock
2058 return fix_height_locked( pParent );
2061 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2063 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2064 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2066 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2067 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2068 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2069 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2071 begin_change( pNode, nodeVersion );
2072 begin_change( pRight, rightVersion );
2074 // fix up pNode links, careful about the order!
2075 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2076 if ( pRLL != nullptr )
2077 pRLL->parent( pNode, memory_model::memory_order_release );
2079 pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
2080 if ( pRLR != nullptr )
2081 pRLR->parent( pRight, memory_model::memory_order_release );
2083 pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
2084 pRight->parent( pRLeft, memory_model::memory_order_release );
2086 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
2087 pNode->parent( pRLeft, memory_model::memory_order_release );
2090 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
2092 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2093 pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
2095 pRLeft->parent( pParent, memory_model::memory_order_release );
2098 int hNode = 1 + std::max( hL, hRLL );
2099 set_height( pNode, hNode, memory_model::memory_order_release );
2100 int hRight = 1 + std::max( hRLR, hRR );
2101 set_height( pRight, hRight, memory_model::memory_order_release );
2102 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2104 end_change( pNode, nodeVersion );
2105 end_change( pRight, rightVersion );
2106 m_stat.onRotateLeftOverRight();
2108 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2110 int nodeBalance = hRLL - hL;
2111 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2112 m_stat.onRotateAfterLRRotation();
2116 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
2117 m_stat.onRemoveAfterLRRotation();
2121 int balRL = hRight - hNode;
2122 if ( balRL < -1 || balRL > 1 ) {
2123 m_stat.onRotateAfterLRRotation();
2127 return fix_height_locked( pParent );
2132 }} // namespace cds::container
2134 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H