2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
32 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
34 #include <type_traits> // is_base_of
35 #include <cds/container/details/bronson_avltree_base.h>
36 #include <cds/urcu/details/check_deadlock.h>
37 #include <cds/urcu/exempt_ptr.h>
39 namespace cds { namespace container {
41 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
42 /** @ingroup cds_nonintrusive_map
43 @ingroup cds_nonintrusive_tree
44 @headerfile cds/container/bronson_avltree_map_rcu.h
45 @anchor cds_container_BronsonAVLTreeMap_rcu_ptr
47 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
48 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
49 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
50 the disposer functor provided by \p Traits template parameter.
52 <b>Template arguments</b>:
53 - \p RCU - one of \ref cds_urcu_gc "RCU type"
55 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
57 - \p Traits - tree traits, default is \p bronson_avltree::traits
58 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
59 instead of \p Traits template argument.
61 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
62 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
68 # ifdef CDS_DOXYGEN_INVOKED
69 typename Traits = bronson_avltree::traits
74 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
77 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
78 typedef Key key_type; ///< type of a key stored in the map
79 typedef T * mapped_type; ///< type of value stored in the map
80 typedef Traits traits; ///< Traits template parameter
82 # ifdef CDS_DOXYGEN_INVOKED
83 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
85 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
87 typedef typename traits::item_counter item_counter; ///< Item counting policy
88 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
89 typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes
90 typedef typename traits::stat stat; ///< internal statistics
91 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
92 typedef typename traits::back_off back_off; ///< Back-off strategy
93 typedef typename traits::disposer disposer; ///< Value disposer
94 typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking
96 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
97 static CDS_CONSTEXPR bool const c_bRelaxedInsert = traits::relaxed_insert;
99 /// Group of \p extract_xxx functions does not require external locking
100 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false;
102 # ifdef CDS_DOXYGEN_INVOKED
103 /// Returned pointer to \p mapped_type of extracted node
104 typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr;
106 typedef cds::urcu::exempt_ptr< gc,
107 typename std::remove_pointer<mapped_type>::type,
108 typename std::remove_pointer<mapped_type>::type,
114 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
118 typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type;
119 typedef typename node_type::version_type version_type;
121 typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator;
122 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
124 enum class find_result
141 result_inserted = allow_insert,
142 result_updated = allow_update,
149 nothing_required = -3,
150 rebalance_required = -2,
159 typedef typename sync_monitor::template scoped_lock<node_type> node_scoped_lock;
164 template <typename K>
165 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
167 return cxx_allocator().New( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
170 static void free_node( node_type * pNode )
172 // Free node without disposer
173 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
174 assert( pNode->m_SyncMonitorInjection.check_free());
175 cxx_allocator().Delete( pNode );
178 static void free_value( mapped_type pVal )
183 static node_type * child( node_type * pNode, int nDir, atomics::memory_order order )
185 return pNode->child( nDir, order );
188 static node_type * parent( node_type * pNode, atomics::memory_order order )
190 return pNode->parent( order );
196 node_type * m_pRetiredList; ///< head of retired node list
197 mapped_type m_pRetiredValue; ///< value retired
201 : m_pRetiredList( nullptr )
202 , m_pRetiredValue( nullptr )
210 void dispose( node_type * pNode )
212 assert( !pNode->is_valued( memory_model::memory_order_relaxed ));
213 pNode->m_pNextRemoved = m_pRetiredList;
214 m_pRetiredList = pNode;
217 void dispose_value( mapped_type pVal )
219 assert( m_pRetiredValue == nullptr );
220 m_pRetiredValue = pVal;
224 struct internal_disposer
226 void operator()( node_type * p ) const
234 assert( !gc::is_locked() );
236 // TODO: use RCU::batch_retire
239 for ( node_type * p = m_pRetiredList; p; ) {
240 node_type * pNext = static_cast<node_type *>( p->m_pNextRemoved );
241 // Value already disposed
242 gc::template retire_ptr<internal_disposer>( p );
247 if ( m_pRetiredValue )
248 gc::template retire_ptr<disposer>( m_pRetiredValue );
256 typename node_type::base_class m_Root;
258 item_counter m_ItemCounter;
259 mutable sync_monitor m_Monitor;
264 /// Creates empty map
266 : m_pRoot( static_cast<node_type *>( &m_Root ))
277 The \p key_type should be constructible from a value of type \p K.
279 RCU \p synchronize() can be called. RCU should not be locked.
281 Returns \p true if inserting successful, \p false otherwise.
283 template <typename K>
284 bool insert( K const& key, mapped_type pVal )
286 return do_update(key, key_comparator(),
287 [pVal]( node_type * pNode ) -> mapped_type
289 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
293 update_flags::allow_insert
294 ) == update_flags::result_inserted;
297 /// Updates the value for \p key
299 The operation performs inserting or updating the value for \p key with lock-free manner.
300 If \p bInsert is \p false, only updating of existing node is possible.
302 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
303 then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be
304 constructible from type \p K.
305 Otherwise, the value for \p key will be changed to \p pVal.
307 RCU \p synchronize() method can be called. RCU should not be locked.
309 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
310 \p second is \p true if new node has been added or \p false if the node with \p key
313 template <typename K>
314 std::pair<bool, bool> update( K const& key, mapped_type pVal, bool bInsert = true )
316 int result = do_update( key, key_comparator(),
317 [pVal]( node_type * ) -> mapped_type
321 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
323 return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 );
328 /// Delete \p key from the map
330 RCU \p synchronize() method can be called. RCU should not be locked.
332 Return \p true if \p key is found and deleted, \p false otherwise
334 template <typename K>
335 bool erase( K const& key )
340 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
344 /// Deletes the item from the map using \p pred predicate for searching
346 The function is an analog of \p erase(K const&)
347 but \p pred is used for key comparing.
348 \p Less functor has the interface like \p std::less.
349 \p Less must imply the same element order as the comparator used for building the map.
351 template <typename K, typename Less>
352 bool erase_with( K const& key, Less pred )
357 cds::opt::details::make_comparator_from_less<Less>(),
358 []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; }
362 /// Delete \p key from the map
364 The function searches an item with key \p key, calls \p f functor
365 and deletes the item. If \p key is not found, the functor is not called.
367 The functor \p Func interface:
370 void operator()( key_type const& key, std::remove_pointer<mapped_type>::type& val) { ... }
374 RCU \p synchronize method can be called. RCU should not be locked.
376 Return \p true if key is found and deleted, \p false otherwise
378 template <typename K, typename Func>
379 bool erase( K const& key, Func f )
384 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
387 disp.dispose_value(pVal);
393 /// Deletes the item from the map using \p pred predicate for searching
395 The function is an analog of \p erase(K const&, Func)
396 but \p pred is used for key comparing.
397 \p Less functor has the interface like \p std::less.
398 \p Less must imply the same element order as the comparator used for building the map.
400 template <typename K, typename Less, typename Func>
401 bool erase_with( K const& key, Less pred, Func f )
406 cds::opt::details::make_comparator_from_less<Less>(),
407 [&f]( key_type const& key, mapped_type pVal, rcu_disposer& disp ) -> bool {
410 disp.dispose_value(pVal);
416 /// Extracts a value with minimal key from the map
418 Returns \p exempt_ptr to the leftmost item.
419 If the tree is empty, returns empty \p exempt_ptr.
421 Note that the function returns only the value for minimal key.
422 To retrieve its key use \p extract_min( Func ) member function.
424 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
425 It means that the function gets leftmost leaf of the tree and tries to unlink it.
426 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
427 So, the function returns the item with minimum key at the moment of tree traversing.
429 RCU \p synchronize method can be called. RCU should NOT be locked.
430 The function does not free the item.
431 The deallocator will be implicitly invoked when the returned object is destroyed or when
432 its \p release() member function is called.
434 exempt_ptr extract_min()
436 return exempt_ptr(do_extract_min( []( key_type const& ) {}));
439 /// Extracts minimal key key and corresponding value
441 Returns \p exempt_ptr to the leftmost item.
442 If the tree is empty, returns empty \p exempt_ptr.
444 \p Func functor is used to store minimal key.
445 \p Func has the following signature:
448 void operator()( key_type const& key );
451 If the tree is empty, \p f is not called.
452 Otherwise, is it called with minimal key, the pointer to corresponding value is returned
455 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
456 It means that the function gets leftmost leaf of the tree and tries to unlink it.
457 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
458 So, the function returns the item with minimum key at the moment of tree traversing.
460 RCU \p synchronize method can be called. RCU should NOT be locked.
461 The function does not free the item.
462 The deallocator will be implicitly invoked when the returned object is destroyed or when
463 its \p release() member function is called.
465 template <typename Func>
466 exempt_ptr extract_min( Func f )
468 return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); }));
471 /// Extracts minimal key key and corresponding value
473 This function is a shortcut for the following call:
476 exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } );
478 \p key_type should be copy-assignable. The copy of minimal key
479 is returned in \p min_key argument.
481 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
482 extract_min_key( key_type& min_key )
484 return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; }));
487 /// Extracts a value with maximal key from the tree
489 Returns \p exempt_ptr pointer to the rightmost item.
490 If the set is empty, returns empty \p exempt_ptr.
492 Note that the function returns only the value for maximal key.
493 To retrieve its key use \p extract_max( Func ) member function.
495 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
496 It means that the function gets rightmost leaf of the tree and tries to unlink it.
497 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
498 So, the function returns the item with maximum key at the moment of tree traversing.
500 RCU \p synchronize method can be called. RCU should NOT be locked.
501 The function does not free the item.
502 The deallocator will be implicitly invoked when the returned object is destroyed or when
503 its \p release() is called.
505 exempt_ptr extract_max()
507 return exempt_ptr(do_extract_max( []( key_type const& ) {}));
510 /// Extracts the maximal key and corresponding value
512 Returns \p exempt_ptr pointer to the rightmost item.
513 If the set is empty, returns empty \p exempt_ptr.
515 \p Func functor is used to store maximal key.
516 \p Func has the following signature:
519 void operator()( key_type const& key );
522 If the tree is empty, \p f is not called.
523 Otherwise, is it called with maximal key, the pointer to corresponding value is returned
526 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
527 It means that the function gets rightmost leaf of the tree and tries to unlink it.
528 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
529 So, the function returns the item with maximum key at the moment of tree traversing.
531 RCU \p synchronize method can be called. RCU should NOT be locked.
532 The function does not free the item.
533 The deallocator will be implicitly invoked when the returned object is destroyed or when
534 its \p release() is called.
536 template <typename Func>
537 exempt_ptr extract_max( Func f )
539 return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); }));
542 /// Extracts the maximal key and corresponding value
544 This function is a shortcut for the following call:
547 exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } );
549 \p key_type should be copy-assignable. The copy of maximal key
550 is returned in \p max_key argument.
552 typename std::enable_if< std::is_copy_assignable<key_type>::value, exempt_ptr >::type
553 extract_max_key( key_type& max_key )
555 return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; }));
558 /// Extracts an item from the map
560 The function searches an item with key equal to \p key in the tree,
561 unlinks it, and returns \p exempt_ptr pointer to a value found.
562 If \p key is not found the function returns an empty \p exempt_ptr.
564 RCU \p synchronize method can be called. RCU should NOT be locked.
565 The function does not destroy the value found.
566 The disposer will be implicitly invoked when the returned object is destroyed or when
567 its \p release() member function is called.
569 template <typename Q>
570 exempt_ptr extract( Q const& key )
572 return exempt_ptr(do_extract( key ));
576 /// Extracts an item from the map using \p pred for searching
578 The function is an analog of \p extract(Q const&)
579 but \p pred is used for key compare.
580 \p Less has the interface like \p std::less.
581 \p pred must imply the same element order as the comparator used for building the tree.
583 template <typename Q, typename Less>
584 exempt_ptr extract_with( Q const& key, Less pred )
586 return exempt_ptr(do_extract_with( key, pred ));
589 /// Find the key \p key
591 The function searches the item with key equal to \p key and calls the functor \p f for item found.
592 The interface of \p Func functor is:
595 void operator()( key_type const& key, mapped_type& item );
598 where \p item is the item found.
599 The functor is called under node-level lock.
601 The function applies RCU lock internally.
603 The function returns \p true if \p key is found, \p false otherwise.
605 template <typename K, typename Func>
606 bool find( K const& key, Func f )
608 return do_find( key, key_comparator(),
609 [&f]( node_type * pNode ) -> bool {
610 assert( pNode != nullptr );
611 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
613 f( pNode->m_key, *pVal );
621 /// Finds the key \p val using \p pred predicate for searching
623 The function is an analog of \p find(K const&, Func)
624 but \p pred is used for key comparing.
625 \p Less functor has the interface like \p std::less.
626 \p Less must imply the same element order as the comparator used for building the map.
628 template <typename K, typename Less, typename Func>
629 bool find_with( K const& key, Less pred, Func f )
632 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(),
633 [&f]( node_type * pNode ) -> bool {
634 assert( pNode != nullptr );
635 mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
637 f( pNode->m_key, *pVal );
645 /// Checks whether the map contains \p key
647 The function searches the item with key equal to \p key
648 and returns \p true if it is found, and \p false otherwise.
650 The function applies RCU lock internally.
652 template <typename K>
653 bool contains( K const& key )
655 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
658 /// Checks whether the map contains \p key using \p pred predicate for searching
660 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
661 \p Less functor has the interface like \p std::less.
662 \p Less must imply the same element order as the comparator used for building the set.
664 template <typename K, typename Less>
665 bool contains( K const& key, Less pred )
668 return do_find( key, cds::opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
671 /// Clears the tree (thread safe, not atomic)
673 The function unlink all items from the tree.
674 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
678 assert( set.empty() );
680 the assertion could be raised.
682 For each node the \ref disposer will be called after unlinking.
684 RCU \p synchronize method can be called. RCU should not be locked.
688 while ( extract_min() );
691 /// Clears the tree (not thread safe)
693 This function is not thread safe and may be called only when no other thread deals with the tree.
694 The function is used in the tree destructor.
698 clear(); // temp solution
702 /// Checks if the map is empty
705 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
708 /// Returns item count in the map
710 Only leaf nodes containing user data are counted.
712 The value returned depends on item counter type provided by \p Traits template parameter.
713 If it is \p atomicity::empty_item_counter this function always returns 0.
715 The function is not suitable for checking the tree emptiness, use \p empty()
716 member function for this purpose.
720 return m_ItemCounter;
723 /// Returns const reference to internal statistics
724 stat const& statistics() const
729 /// Returns reference to \p sync_monitor object
730 sync_monitor& monitor()
735 sync_monitor const& monitor() const
741 /// Checks internal consistency (not atomic, not thread-safe)
743 The debugging function to check internal consistency of the tree.
745 bool check_consistency() const
747 return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} );
750 /// Checks internal consistency (not atomic, not thread-safe)
752 The debugging function to check internal consistency of the tree.
753 The functor \p Func is called if a violation of internal tree structure
757 void operator()( size_t nLevel, size_t hLeft, size_t hRight );
761 - \p nLevel - the level where the violation is found
762 - \p hLeft - the height of left subtree
763 - \p hRight - the height of right subtree
765 The functor is called for each violation found.
767 template <typename Func>
768 bool check_consistency( Func f ) const
770 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
773 do_check_consistency( pChild, 1, f, nErrors );
781 template <typename Func>
782 size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const
786 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
787 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
788 if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 )
790 if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 )
793 size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors );
794 size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors );
796 if ( hLeft >= hRight ) {
797 if ( hLeft - hRight > 1 ) {
798 f( nLevel, hLeft, hRight );
804 if ( hRight - hLeft > 1 ) {
805 f( nLevel, hLeft, hRight );
814 template <typename Q, typename Compare, typename Func>
815 bool do_find( Q& key, Compare cmp, Func f ) const
820 result = try_find( key, cmp, f, m_pRoot, right_child, 0 );
822 assert( result != find_result::retry );
823 return result == find_result::found;
826 template <typename K, typename Compare, typename Func>
827 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
829 check_deadlock_policy::check();
831 rcu_disposer removed_list;
834 return try_update_root( key, cmp, nFlags, funcUpdate, removed_list );
838 template <typename K, typename Compare, typename Func>
839 bool do_remove( K const& key, Compare cmp, Func func )
841 // Func must return true if the value was disposed
842 // or false if the value was extracted
844 check_deadlock_policy::check();
846 rcu_disposer removed_list;
849 return try_remove_root( key, cmp, func, removed_list );
853 template <typename Func>
854 mapped_type do_extract_min( Func f )
856 mapped_type pExtracted = nullptr;
859 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
864 template <typename Func>
865 mapped_type do_extract_max( Func f )
867 mapped_type pExtracted = nullptr;
870 [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; }
875 template <typename Func>
876 void do_extract_minmax( int nDir, Func func )
878 check_deadlock_policy::check();
880 rcu_disposer removed_list;
887 // get right child of root
888 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
890 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
891 if ( nChildVersion & node_type::shrinking ) {
892 m_stat.onRemoveRootWaitShrinking();
893 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
894 result = update_flags::retry;
896 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
897 result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list );
900 result = update_flags::retry;
905 if ( result == update_flags::retry )
906 m_stat.onRemoveRetry();
908 m_stat.onExtract( result == update_flags::result_removed );
915 template <typename Q>
916 mapped_type do_extract( Q const& key )
918 mapped_type pExtracted = nullptr;
922 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
924 m_stat.onExtract( pExtracted != nullptr );
928 template <typename Q, typename Less>
929 mapped_type do_extract_with( Q const& key, Less pred )
932 mapped_type pExtracted = nullptr;
935 cds::opt::details::make_comparator_from_less<Less>(),
936 [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; }
938 m_stat.onExtract( pExtracted != nullptr );
945 static int height( node_type * pNode, atomics::memory_order order )
948 return pNode->m_nHeight.load( order );
950 static void set_height( node_type * pNode, int h, atomics::memory_order order )
953 pNode->m_nHeight.store( h, order );
955 static int height_null( node_type * pNode, atomics::memory_order order )
957 return pNode ? height( pNode, order ) : 0;
960 static CDS_CONSTEXPR int const c_stackSize = 64;
962 template <typename Q, typename Compare, typename Func>
963 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
965 assert( gc::is_locked() );
971 version_type nVersion;
975 stack_record stack[c_stackSize];
977 stack[0].pNode = pNode;
978 stack[0].nVersion = nVersion;
979 stack[0].nDir = nDir;
982 pNode = stack[pos].pNode;
983 nVersion = stack[pos].nVersion;
984 nDir = stack[pos].nDir;
987 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
989 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
991 m_stat.onFindRetry();
994 m_stat.onFindFailed();
995 return find_result::not_found;
998 int nCmp = cmp( key, pChild->m_key );
1000 if ( pChild->is_valued( memory_model::memory_order_acquire ) ) {
1002 node_scoped_lock l( m_Monitor, *pChild );
1003 if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) {
1004 if ( pChild->is_valued( memory_model::memory_order_relaxed )) {
1005 if ( f( pChild ) ) {
1006 m_stat.onFindSuccess();
1007 return find_result::found;
1012 m_stat.onFindRetry();
1016 m_stat.onFindFailed();
1017 return find_result::not_found;
1020 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1021 if ( nChildVersion & node_type::shrinking ) {
1022 m_stat.onFindWaitShrinking();
1023 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1025 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1027 m_stat.onFindRetry();
1031 else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild )
1033 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1035 m_stat.onFindRetry();
1040 assert(pos < c_stackSize);
1041 stack[pos].pNode = pChild;
1042 stack[pos].nVersion = nChildVersion;
1043 stack[pos].nDir = nCmp;
1044 break; // child iteration
1047 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1049 m_stat.onFindRetry();
1053 m_stat.onFindRetry();
1056 return find_result::retry;
1059 template <typename K, typename Compare, typename Func>
1060 int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
1062 assert( gc::is_locked() );
1067 // get right child of root
1068 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1070 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1071 if ( nChildVersion & node_type::shrinking ) {
1072 m_stat.onUpdateRootWaitShrinking();
1073 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1074 result = update_flags::retry;
1076 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire ))
1077 result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp );
1079 result = update_flags::retry;
1082 // the tree is empty
1083 if ( nFlags & update_flags::allow_insert ) {
1084 // insert into tree as right child of the root
1086 node_scoped_lock l( m_Monitor, *m_pRoot );
1087 if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) {
1088 result = update_flags::retry;
1092 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
1093 mapped_type pVal = funcUpdate( pNew );
1094 assert( pVal != nullptr );
1095 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1097 m_pRoot->child( pNew, right_child, memory_model::memory_order_release);
1098 set_height( m_pRoot, 2, memory_model::memory_order_release );
1102 m_stat.onInsertSuccess();
1103 return update_flags::result_inserted;
1106 return update_flags::failed;
1109 if ( result != update_flags::retry )
1114 template <typename K, typename Compare, typename Func>
1115 bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp )
1117 assert( gc::is_locked() );
1122 // get right child of root
1123 node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire );
1125 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1126 if ( nChildVersion & node_type::shrinking ) {
1127 m_stat.onRemoveRootWaitShrinking();
1128 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1129 result = update_flags::retry;
1131 else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) {
1132 result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp );
1135 result = update_flags::retry;
1140 if ( result == update_flags::retry )
1141 m_stat.onRemoveRetry();
1143 m_stat.onRemove( result == update_flags::result_removed );
1144 return result == update_flags::result_removed;
1149 template <typename K, typename Compare, typename Func>
1150 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1152 assert( gc::is_locked() );
1153 assert( nVersion != node_type::unlinked );
1158 version_type nVersion;
1161 stack_record stack[c_stackSize];
1163 stack[0].pNode = pNode;
1164 stack[0].nVersion = nVersion;
1166 while ( pos >= 0 ) {
1167 pNode = stack[pos].pNode;
1168 nVersion = stack[pos].nVersion;
1170 int nCmp = cmp( key, pNode->m_key );
1172 int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp );
1173 if ( result != update_flags::retry )
1176 m_stat.onUpdateRetry();
1181 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1182 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1184 m_stat.onUpdateRetry();
1188 if ( pChild == nullptr ) {
1190 if ( nFlags & update_flags::allow_insert ) {
1191 int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
1192 if ( result != update_flags::retry )
1195 m_stat.onUpdateRetry();
1199 return update_flags::failed;
1203 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1204 if ( nChildVersion & node_type::shrinking ) {
1205 m_stat.onUpdateWaitShrinking();
1206 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1209 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1210 // this second read is important, because it is protected by nChildVersion
1212 // validate the read that our caller took to get to node
1213 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1215 m_stat.onUpdateRetry();
1218 // At this point we know that the traversal our parent took to get to node is still valid.
1219 // The recursive implementation will validate the traversal from node to
1220 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1221 // This means that we are no longer vulnerable to node shrinks, and we don't need
1222 // to validate node version any more.
1224 assert( pos < c_stackSize );
1225 stack[pos].pNode = pChild;
1226 stack[pos].nVersion = nChildVersion;
1227 assert( nChildVersion != node_type::unlinked );
1228 break; // child iteration
1230 m_stat.onUpdateRetry();
1234 return update_flags::retry;
1237 template <typename K, typename Compare, typename Func>
1238 int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1240 assert( gc::is_locked() );
1241 assert( nVersion != node_type::unlinked );
1245 node_type * pParent;
1247 version_type nVersion;
1250 stack_record stack[c_stackSize];
1252 stack[0].pParent = pParent;
1253 stack[0].pNode = pNode;
1254 stack[0].nVersion = nVersion;
1256 while ( pos >= 0 ) {
1257 pParent = stack[pos].pParent;
1258 pNode = stack[pos].pNode;
1259 nVersion = stack[pos].nVersion;
1261 int nCmp = cmp( key, pNode->m_key );
1263 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1264 if ( result != update_flags::retry )
1267 m_stat.onRemoveRetry();
1272 node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire );
1273 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1275 m_stat.onRemoveRetry();
1279 if ( pChild == nullptr )
1280 return update_flags::failed;
1283 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1284 if ( nChildVersion & node_type::shrinking ) {
1285 m_stat.onRemoveWaitShrinking();
1286 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1289 else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) {
1290 // this second read is important, because it is protected by nChildVersion
1292 // validate the read that our caller took to get to node
1293 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1295 m_stat.onRemoveRetry();
1299 // At this point we know that the traversal our parent took to get to node is still valid.
1300 // The recursive implementation will validate the traversal from node to
1301 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1302 // This means that we are no longer vulnerable to node shrinks, and we don't need
1303 // to validate node version any more.
1305 assert( pos < c_stackSize );
1306 stack[pos].pParent = pNode;
1307 stack[pos].pNode = pChild;
1308 stack[pos].nVersion = nChildVersion;
1309 break; // child iteration
1311 m_stat.onRemoveRetry();
1314 return update_flags::retry;
1317 template <typename Func>
1318 int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1320 assert( gc::is_locked() );
1321 assert( nVersion != node_type::unlinked );
1325 node_type * pParent;
1327 version_type nVersion;
1330 stack_record stack[c_stackSize];
1332 stack[0].pParent = pParent;
1333 stack[0].pNode = pNode;
1334 stack[0].nVersion = nVersion;
1336 while ( pos >= 0 ) {
1337 pParent = stack[pos].pParent;
1338 pNode = stack[pos].pNode;
1339 nVersion = stack[pos].nVersion;
1342 node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire );
1343 if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) {
1345 m_stat.onRemoveRetry();
1349 if ( pChild == nullptr ) {
1351 assert(pNode->is_valued( memory_model::memory_order_acquire ));
1352 int result = try_remove_node( pParent, pNode, nVersion, func, disp );
1353 if ( result != update_flags::retry )
1356 m_stat.onRemoveRetry();
1360 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
1361 if ( nChildVersion & node_type::shrinking ) {
1362 m_stat.onRemoveWaitShrinking();
1363 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_acquire );
1366 else if ( pChild == child( pNode, nDir, memory_model::memory_order_acquire )) {
1367 // this second read is important, because it is protected by nChildVersion
1369 // validate the read that our caller took to get to node
1370 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
1372 m_stat.onRemoveRetry();
1376 // At this point we know that the traversal our parent took to get to node is still valid.
1377 // The recursive implementation will validate the traversal from node to
1378 // child, so just prior to the node nVersion validation both traversals were definitely okay.
1379 // This means that we are no longer vulnerable to node shrinks, and we don't need
1380 // to validate node version any more.
1382 assert( pos < c_stackSize );
1383 stack[pos].pParent = pNode;
1384 stack[pos].pNode = pChild;
1385 stack[pos].nVersion = nChildVersion;
1386 break; // child iteration
1388 m_stat.onRemoveRetry();
1391 return update_flags::retry;
1394 template <typename K, typename Func>
1395 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
1399 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
1400 mapped_type pVal = funcUpdate( pNew );
1401 assert( pVal != nullptr );
1402 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
1405 if ( c_bRelaxedInsert ) {
1406 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1407 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1409 m_stat.onInsertRetry();
1410 return update_flags::retry;
1413 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1416 node_type * pDamaged;
1418 assert( pNode != nullptr );
1419 node_scoped_lock l( m_Monitor, *pNode );
1421 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
1422 || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr )
1424 if ( c_bRelaxedInsert ) {
1425 mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed );
1426 pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
1429 m_stat.onRelaxedInsertFailed();
1432 m_stat.onInsertRetry();
1433 return update_flags::retry;
1436 if ( !c_bRelaxedInsert )
1437 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
1439 pNode->child( pNew, nDir, memory_model::memory_order_release );
1440 pDamaged = fix_height_locked( pNode );
1444 m_stat.onInsertSuccess();
1447 fix_height_and_rebalance( pDamaged, disp );
1448 m_stat.onInsertRebalanceRequired();
1451 return update_flags::result_inserted;
1454 template <typename Func>
1455 int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp )
1459 assert( pNode != nullptr );
1461 node_scoped_lock l( m_Monitor, *pNode );
1463 if ( pNode->version(memory_model::memory_order_acquire) != nVersion )
1464 return update_flags::retry;
1466 if ( pNode->is_unlinked( memory_model::memory_order_acquire )) {
1467 m_stat.onUpdateUnlinked();
1468 return update_flags::retry;
1471 if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update) ) {
1472 m_stat.onInsertFailed();
1473 return update_flags::failed;
1476 pOld = pNode->value( memory_model::memory_order_relaxed );
1477 bInserted = pOld == nullptr;
1478 mapped_type pVal = funcUpdate( pNode );
1482 assert( pVal != nullptr );
1483 pNode->m_pValue.store( pVal, memory_model::memory_order_release );
1488 disp.dispose_value(pOld);
1489 m_stat.onDisposeValue();
1494 m_stat.onInsertSuccess();
1495 return update_flags::result_inserted;
1498 m_stat.onUpdateSuccess();
1499 return update_flags::result_updated;
1502 template <typename Func>
1503 int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp )
1505 assert( pParent != nullptr );
1506 assert( pNode != nullptr );
1508 if ( !pNode->is_valued( memory_model::memory_order_acquire ) )
1509 return update_flags::failed;
1511 if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr
1512 || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr )
1514 // pNode can be replaced with its child
1516 node_type * pDamaged;
1519 node_scoped_lock lp( m_Monitor, *pParent );
1520 if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent )
1521 return update_flags::retry;
1524 node_scoped_lock ln( m_Monitor, *pNode );
1525 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
1526 return update_flags::retry;
1528 pOld = pNode->value( memory_model::memory_order_relaxed );
1530 return update_flags::failed;
1532 if ( !try_unlink_locked( pParent, pNode, disp ))
1533 return update_flags::retry;
1535 pDamaged = fix_height_locked( pParent );
1539 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1540 m_stat.onDisposeValue();
1542 m_stat.onExtractValue();
1545 fix_height_and_rebalance( pDamaged, disp );
1546 m_stat.onRemoveRebalanceRequired();
1550 // pNode is an internal with two children
1554 node_scoped_lock ln( m_Monitor, *pNode );
1555 pOld = pNode->value( memory_model::memory_order_relaxed );
1556 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion )
1557 return update_flags::retry;
1559 return update_flags::failed;
1561 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1562 m_stat.onMakeRoutingNode();
1566 if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside
1567 m_stat.onDisposeValue();
1569 m_stat.onExtractValue();
1571 return update_flags::result_removed;
1574 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1576 // pParent and pNode must be locked
1577 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
1579 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1580 node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed );
1581 if ( pNode != pParentLeft && pNode != pParentRight ) {
1582 // node is no longer a child of parent
1586 assert( !pNode->is_unlinked( memory_model::memory_order_relaxed ));
1587 assert( pParent == parent( pNode, memory_model::memory_order_relaxed ));
1589 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1590 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1591 if ( pLeft != nullptr && pRight != nullptr ) {
1592 // splicing is no longer possible
1595 node_type * pSplice = pLeft ? pLeft : pRight;
1597 if ( pParentLeft == pNode )
1598 pParent->m_pLeft.store( pSplice, memory_model::memory_order_release );
1600 pParent->m_pRight.store( pSplice, memory_model::memory_order_release );
1603 pSplice->parent( pParent, memory_model::memory_order_release );
1605 // Mark the node as unlinked
1606 pNode->version( node_type::unlinked, memory_model::memory_order_release );
1608 // The value will be disposed by calling function
1609 pNode->m_pValue.store( nullptr, memory_model::memory_order_release );
1611 disp.dispose( pNode );
1612 m_stat.onDisposeNode();
1619 private: // rotations
1621 int estimate_node_condition( node_type * pNode )
1623 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire );
1624 node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire );
1626 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire ))
1627 return unlink_required;
1629 int h = height( pNode, memory_model::memory_order_acquire );
1630 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1631 int hR = height_null( pRight, memory_model::memory_order_acquire );
1633 int hNew = 1 + std::max( hL, hR );
1634 int nBalance = hL - hR;
1636 if ( nBalance < -1 || nBalance > 1 )
1637 return rebalance_required;
1639 return h != hNew ? hNew : nothing_required;
1642 node_type * fix_height( node_type * pNode )
1644 assert( pNode != nullptr );
1645 node_scoped_lock l( m_Monitor, *pNode );
1646 return fix_height_locked( pNode );
1649 node_type * fix_height_locked( node_type * pNode )
1651 // pNode must be locked!!!
1652 int h = estimate_node_condition( pNode );
1654 case rebalance_required:
1655 case unlink_required:
1657 case nothing_required:
1660 set_height( pNode, h, memory_model::memory_order_release );
1661 return parent( pNode, memory_model::memory_order_relaxed );
1665 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
1667 while ( pNode && parent( pNode, memory_model::memory_order_acquire )) {
1668 int nCond = estimate_node_condition( pNode );
1669 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire ) )
1672 if ( nCond != unlink_required && nCond != rebalance_required )
1673 pNode = fix_height( pNode );
1675 node_type * pParent = parent( pNode, memory_model::memory_order_acquire );
1676 assert( pParent != nullptr );
1678 node_scoped_lock lp( m_Monitor, *pParent );
1679 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) {
1680 node_scoped_lock ln( m_Monitor, *pNode );
1681 pNode = rebalance_locked( pParent, pNode, disp );
1688 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
1690 // pParent and pNode should be locked.
1691 // Returns a damaged node, or nullptr if no more rebalancing is necessary
1692 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1694 node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed );
1695 node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed );
1697 if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
1698 if ( try_unlink_locked( pParent, pNode, disp ))
1699 return fix_height_locked( pParent );
1701 // retry needed for pNode
1706 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1707 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1709 int h = height( pNode, memory_model::memory_order_acquire );
1710 int hL = height_null( pLeft, memory_model::memory_order_acquire );
1711 int hR = height_null( pRight, memory_model::memory_order_acquire );
1712 int hNew = 1 + std::max( hL, hR );
1713 int balance = hL - hR;
1716 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
1717 else if ( balance < -1 )
1718 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
1719 else if ( hNew != h ) {
1720 set_height( pNode, hNew, memory_model::memory_order_release );
1722 // pParent is already locked
1723 return fix_height_locked( pParent );
1729 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
1731 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1732 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1733 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1735 // pParent and pNode is locked yet
1736 // pNode->pLeft is too large, we will rotate-right.
1737 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1740 assert( pLeft != nullptr );
1741 node_scoped_lock l( m_Monitor, *pLeft );
1742 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1743 return pNode; // retry for pNode
1745 int hL = height( pLeft, memory_model::memory_order_acquire );
1747 return pNode; // retry
1749 node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed );
1750 int hLR = height_null( pLRight, memory_model::memory_order_acquire );
1751 node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed );
1752 int hLL = height_null( pLLeft, memory_model::memory_order_acquire );
1756 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1760 node_scoped_lock lr( m_Monitor, *pLRight );
1761 if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight )
1762 return pNode; // retry
1764 hLR = height( pLRight, memory_model::memory_order_acquire );
1766 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1768 int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire);
1769 int balance = hLL - hLRL;
1770 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && !pLeft->is_valued(memory_model::memory_order_relaxed))) {
1771 // nParent.child.left won't be damaged after a double rotation
1772 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1776 return pNode; // retry
1778 // focus on pLeft, if necessary pNode will be balanced later
1779 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1784 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1786 assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent );
1787 assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode
1788 || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
1790 // pParent and pNode is locked yet
1792 assert( pRight != nullptr );
1793 node_scoped_lock l( m_Monitor, *pRight );
1794 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1795 return pNode; // retry for pNode
1797 int hR = height( pRight, memory_model::memory_order_acquire );
1798 if ( hL - hR >= -1 )
1799 return pNode; // retry
1801 node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed );
1802 int hRL = height_null( pRLeft, memory_model::memory_order_acquire );
1803 node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed );
1804 int hRR = height_null( pRRight, memory_model::memory_order_acquire );
1806 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1809 node_scoped_lock lrl( m_Monitor, *pRLeft );
1810 if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft )
1811 return pNode; // retry
1813 hRL = height( pRLeft, memory_model::memory_order_acquire );
1815 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1817 node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed );
1818 int hRLR = height_null( pRLRight, memory_model::memory_order_acquire );
1819 int balance = hRR - hRLR;
1820 if ( balance >= -1 && balance <= 1 && !((hRR == 0 || hRLR == 0) && !pRight->is_valued( memory_model::memory_order_relaxed )))
1821 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1824 return pNode; // retry
1825 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1829 static void begin_change( node_type * pNode, version_type version )
1831 assert(pNode->version(memory_model::memory_order_acquire) == version );
1832 assert( (version & node_type::shrinking) == 0 );
1833 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1835 static void end_change( node_type * pNode, version_type version )
1837 // Clear shrinking and unlinked flags and increment version
1838 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1841 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1843 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1844 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1846 begin_change( pNode, nodeVersion );
1848 pNode->m_pLeft.store( pLRight, memory_model::memory_order_release );
1849 if ( pLRight != nullptr )
1850 pLRight->parent( pNode, memory_model::memory_order_release );
1852 atomics::atomic_thread_fence( memory_model::memory_order_release );
1854 pLeft->m_pRight.store( pNode, memory_model::memory_order_release );
1855 pNode->parent( pLeft, memory_model::memory_order_release );
1857 atomics::atomic_thread_fence( memory_model::memory_order_release );
1859 if ( pParentLeft == pNode )
1860 pParent->m_pLeft.store( pLeft, memory_model::memory_order_release );
1862 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1863 pParent->m_pRight.store( pLeft, memory_model::memory_order_release );
1865 pLeft->parent( pParent, memory_model::memory_order_release );
1867 atomics::atomic_thread_fence( memory_model::memory_order_release );
1869 // fix up heights links
1870 int hNode = 1 + std::max( hLR, hR );
1871 set_height( pNode, hNode, memory_model::memory_order_release );
1872 set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release);
1874 end_change( pNode, nodeVersion );
1875 m_stat.onRotateRight();
1877 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1878 // parent.child). pNode is the deepest. Perform as many fixes as we can
1879 // with the locks we've got.
1881 // We've already fixed the height for pNode, but it might still be outside
1882 // our allowable balance range. In that case a simple fix_height_locked()
1884 int nodeBalance = hLR - hR;
1885 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1886 // we need another rotation at pNode
1887 m_stat.onRotateAfterRightRotation();
1891 // we've fixed balance and height damage for pNode, now handle
1892 // extra-routing node damage
1893 if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) {
1894 // we need to remove pNode and then repair
1895 m_stat.onRemoveAfterRightRotation();
1899 // we've already fixed the height at pLeft, do we need a rotation here?
1900 int leftBalance = hLL - hNode;
1901 if ( leftBalance < -1 || leftBalance > 1 ) {
1902 m_stat.onRotateAfterRightRotation();
1906 // pLeft might also have routing node damage (if pLeft.left was null)
1907 if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire) ) {
1908 m_stat.onDamageAfterRightRotation();
1912 // try to fix the parent height while we've still got the lock
1913 return fix_height_locked( pParent );
1916 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1918 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1919 node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed );
1921 begin_change( pNode, nodeVersion );
1923 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1924 pNode->m_pRight.store( pRLeft, memory_model::memory_order_release );
1925 if ( pRLeft != nullptr )
1926 pRLeft->parent( pNode, memory_model::memory_order_release );
1928 atomics::atomic_thread_fence( memory_model::memory_order_release );
1930 pRight->m_pLeft.store( pNode, memory_model::memory_order_release );
1931 pNode->parent( pRight, memory_model::memory_order_release );
1933 atomics::atomic_thread_fence( memory_model::memory_order_release );
1935 if ( pParentLeft == pNode )
1936 pParent->m_pLeft.store( pRight, memory_model::memory_order_release );
1938 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1939 pParent->m_pRight.store( pRight, memory_model::memory_order_release );
1941 pRight->parent( pParent, memory_model::memory_order_release );
1943 atomics::atomic_thread_fence( memory_model::memory_order_release );
1946 int hNode = 1 + std::max( hL, hRL );
1947 set_height( pNode, hNode, memory_model::memory_order_release );
1948 set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release);
1950 end_change( pNode, nodeVersion );
1951 m_stat.onRotateLeft();
1953 int nodeBalance = hRL - hL;
1954 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1955 m_stat.onRotateAfterLeftRotation();
1959 if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
1960 m_stat.onRemoveAfterLeftRotation();
1964 int rightBalance = hRR - hNode;
1965 if ( rightBalance < -1 || rightBalance > 1 ) {
1966 m_stat.onRotateAfterLeftRotation();
1970 if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire) ) {
1971 m_stat.onDamageAfterLeftRotation();
1975 return fix_height_locked( pParent );
1978 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1980 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1981 version_type leftVersion = pLeft->version( memory_model::memory_order_acquire );
1983 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
1984 node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire );
1985 node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire );
1986 int hLRR = height_null( pLRR, memory_model::memory_order_acquire );
1988 begin_change( pNode, nodeVersion );
1989 begin_change( pLeft, leftVersion );
1991 // fix up pNode links, careful about the order!
1992 pNode->m_pLeft.store( pLRR, memory_model::memory_order_release );
1993 if ( pLRR != nullptr )
1994 pLRR->parent( pNode, memory_model::memory_order_release );
1996 pLeft->m_pRight.store( pLRL, memory_model::memory_order_release );
1997 if ( pLRL != nullptr )
1998 pLRL->parent( pLeft, memory_model::memory_order_release );
2000 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_release );
2001 pLeft->parent( pLRight, memory_model::memory_order_release );
2003 pLRight->m_pRight.store( pNode, memory_model::memory_order_release );
2004 pNode->parent( pLRight, memory_model::memory_order_release );
2007 pParent->m_pLeft.store( pLRight, memory_model::memory_order_release );
2009 assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode );
2010 pParent->m_pRight.store( pLRight, memory_model::memory_order_release );
2012 pLRight->parent( pParent, memory_model::memory_order_release );
2015 int hNode = 1 + std::max( hLRR, hR );
2016 set_height( pNode, hNode, memory_model::memory_order_release );
2017 int hLeft = 1 + std::max( hLL, hLRL );
2018 set_height( pLeft, hLeft, memory_model::memory_order_release );
2019 set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release);
2021 end_change( pNode, nodeVersion );
2022 end_change( pLeft, leftVersion );
2023 m_stat.onRotateRightOverLeft();
2025 // caller should have performed only a single rotation if pLeft was going
2026 // to end up damaged
2027 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
2028 assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire )));
2030 // We have damaged pParent, pLR (now parent.child), and pNode (now
2031 // parent.child.right). pNode is the deepest. Perform as many fixes as we
2032 // can with the locks we've got.
2034 // We've already fixed the height for pNode, but it might still be outside
2035 // our allowable balance range. In that case a simple fix_height_locked()
2037 int nodeBalance = hLRR - hR;
2038 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2039 // we need another rotation at pNode
2040 m_stat.onRotateAfterRLRotation();
2044 // pNode might also be damaged by being an unnecessary routing node
2045 if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) {
2046 // repair involves splicing out pNode and maybe more rotations
2047 m_stat.onRemoveAfterRLRotation();
2051 // we've already fixed the height at pLRight, do we need a rotation here?
2052 int balanceLR = hLeft - hNode;
2053 if ( balanceLR < -1 || balanceLR > 1 ) {
2054 m_stat.onRotateAfterRLRotation();
2058 // try to fix the parent height while we've still got the lock
2059 return fix_height_locked( pParent );
2062 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
2064 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
2065 version_type rightVersion = pRight->version( memory_model::memory_order_acquire );
2067 node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed );
2068 node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire );
2069 node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire );
2070 int hRLL = height_null( pRLL, memory_model::memory_order_acquire );
2072 begin_change( pNode, nodeVersion );
2073 begin_change( pRight, rightVersion );
2075 // fix up pNode links, careful about the order!
2076 pNode->m_pRight.store( pRLL, memory_model::memory_order_release );
2077 if ( pRLL != nullptr )
2078 pRLL->parent( pNode, memory_model::memory_order_release );
2080 pRight->m_pLeft.store( pRLR, memory_model::memory_order_release );
2081 if ( pRLR != nullptr )
2082 pRLR->parent( pRight, memory_model::memory_order_release );
2084 pRLeft->m_pRight.store( pRight, memory_model::memory_order_release );
2085 pRight->parent( pRLeft, memory_model::memory_order_release );
2087 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_release );
2088 pNode->parent( pRLeft, memory_model::memory_order_release );
2091 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_release );
2093 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
2094 pParent->m_pRight.store( pRLeft, memory_model::memory_order_release );
2096 pRLeft->parent( pParent, memory_model::memory_order_release );
2099 int hNode = 1 + std::max( hL, hRLL );
2100 set_height( pNode, hNode, memory_model::memory_order_release );
2101 int hRight = 1 + std::max( hRLR, hRR );
2102 set_height( pRight, hRight, memory_model::memory_order_release );
2103 set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release);
2105 end_change( pNode, nodeVersion );
2106 end_change( pRight, rightVersion );
2107 m_stat.onRotateLeftOverRight();
2109 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
2111 int nodeBalance = hRLL - hL;
2112 if ( nodeBalance < -1 || nodeBalance > 1 ) {
2113 m_stat.onRotateAfterLRRotation();
2117 if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed) ) {
2118 m_stat.onRemoveAfterLRRotation();
2122 int balRL = hRight - hNode;
2123 if ( balRL < -1 || balRL > 1 ) {
2124 m_stat.onRotateAfterLRRotation();
2128 return fix_height_locked( pParent );
2133 }} // namespace cds::container
2135 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H