3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <cds/container/details/bronson_avltree_base.h>
7 #include <cds/urcu/details/check_deadlock.h>
9 namespace cds { namespace container {
11 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
12 /** @ingroup cds_nonintrusive_map
13 @ingroup cds_nonintrusive_tree
14 @headerfile cds/container/bronson_avltree_map_rcu.h
16 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
17 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
18 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
19 the disposer functor provided by \p Traits template parameter.
21 The set of available member functions differs from classic map.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::allocator allocator_type; ///< allocator for maintaining internal node
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::lock_type lock_type; ///< Node lock type
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static bool const c_bRelaxedInsert = traits::relaxed_insert;
72 typedef bronson_avltree::node< key_type, mapped_type, lock_type > node_type;
74 typedef typename std::conditional <
75 std::is_same< typename traits::node_type, opt::none >::value,
76 bronson_avltree::node< key_type, mapped_type, lock_type >,
77 typename traits::node_type
78 >::type alloc_node_type;
80 typedef typename allocator_type::template rebind<alloc_node_type>::other memory_allocator;
81 typedef cds::details::Allocator< alloc_node_type, memory_allocator > cxx_allocator;
83 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
85 enum class find_result
92 enum class update_flags
99 result_insert = allow_insert,
100 result_update = allow_update
105 nothing_required = -3,
106 rebalance_required = -2,
110 class node_scoped_lock
114 node_scoped_lock( node_type * pNode )
117 pNode->m_Lock.lock();
122 m_pNode->m_Lock.unlock();
130 template <typename K>
131 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
133 alloc_node_type * pNode = memory_allocator().allocate( 1 );
134 return new (static_cast<node_type *>(pNode)) node_type( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
137 static void internal_free_node( node_type * pNode )
139 // Free node without disposer
140 cxx_allocator().Delete( static_cast<alloc_node_type *>(pNode) );
143 static void dispose_node( node_type * pNode )
145 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
147 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
150 //TODO: deallocate pNode in safe manner
157 typename node_type::base_class m_Root;
159 item_counter m_ItemCounter;
164 /// Creates empty map
166 : m_pRoot( static_cast<node_type *>(&m_Root) )
177 The \p key_type should be constructible from a value of type \p K.
179 RCU \p synchronize() can be called. RCU should not be locked.
181 Returns \p true if inserting successful, \p false otherwise.
183 template <typename K>
184 bool insert( K const& key, mapped_type * pVal )
186 return do_update( key,
187 [pVal]( node_type * pNode ) -> mapped_type*
189 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
192 update_flags::allow_insert
193 ) == update_flags::result_insert;
196 /// Updates the value for \p key
198 The operation performs inserting or updating the value for \p key with lock-free manner.
199 If \p bInsert is \p false, only updating of existing node is possible.
201 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
202 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
203 constructible from type \p K.
204 Otherwise, the value is changed to \p pVal.
206 RCU \p synchronize() method can be called. RCU should not be locked.
208 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
209 \p second is \p true if new node has been added or \p false if the node with \p key
210 already is in the tree.
212 template <typename K, typename Func>
213 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
215 int result = do_update( key,
216 [pVal]( node_type * ) -> mapped_type*
220 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
222 return std::make_pair( result != 0, (result & update_flags::result_insert) != 0 );
225 /// Delete \p key from the map
227 RCU \p synchronize() method can be called. RCU should not be locked.
229 Return \p true if \p key is found and deleted, \p false otherwise
231 template <typename K>
232 bool erase( K const& key )
237 /// Deletes the item from the map using \p pred predicate for searching
239 The function is an analog of \p erase(K const&)
240 but \p pred is used for key comparing.
241 \p Less functor has the interface like \p std::less.
242 \p Less must imply the same element order as the comparator used for building the map.
244 template <typename K, typename Less>
245 bool erase_with( K const& key, Less pred )
251 /// Delete \p key from the map
253 The function searches an item with key \p key, calls \p f functor
254 and deletes the item. If \p key is not found, the functor is not called.
256 The functor \p Func interface:
259 void operator()(mapped_type& item) { ... }
263 RCU \p synchronize method can be called. RCU should not be locked.
265 Return \p true if key is found and deleted, \p false otherwise
267 template <typename K, typename Func>
268 bool erase( K const& key, Func f )
273 /// Deletes the item from the map using \p pred predicate for searching
275 The function is an analog of \p erase(K const&, Func)
276 but \p pred is used for key comparing.
277 \p Less functor has the interface like \p std::less.
278 \p Less must imply the same element order as the comparator used for building the map.
280 template <typename K, typename Less, typename Func>
281 bool erase_with( K const& key, Less pred, Func f )
287 /// Extracts an item with minimal key from the map
289 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
290 If the set is empty, returns empty \p exempt_ptr.
292 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
293 It means that the function gets leftmost leaf of the tree and tries to unlink it.
294 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
295 So, the function returns the item with minimum key at the moment of tree traversing.
297 RCU \p synchronize method can be called. RCU should NOT be locked.
298 The function does not free the item.
299 The deallocator will be implicitly invoked when the returned object is destroyed or when
300 its \p release() member function is called.
302 exempt_ptr extract_min()
307 /// Extracts an item with maximal key from the map
309 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
310 If the set is empty, returns empty \p exempt_ptr.
312 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
313 It means that the function gets rightmost leaf of the tree and tries to unlink it.
314 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
315 So, the function returns the item with maximum key at the moment of tree traversing.
317 RCU \p synchronize method can be called. RCU should NOT be locked.
318 The function does not free the item.
319 The deallocator will be implicitly invoked when the returned object is destroyed or when
320 its \p release() is called.
321 @note Before reusing \p result object you should call its \p release() method.
323 exempt_ptr extract_max()
328 /// Extracts an item from the map
330 The function searches an item with key equal to \p key in the tree,
331 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found.
332 If \p key is not found the function returns an empty \p exempt_ptr.
334 RCU \p synchronize method can be called. RCU should NOT be locked.
335 The function does not destroy the item found.
336 The dealloctor will be implicitly invoked when the returned object is destroyed or when
337 its \p release() member function is called.
339 template <typename Q>
340 exempt_ptr extract( Q const& key )
345 /// Extracts an item from the map using \p pred for searching
347 The function is an analog of \p extract(exempt_ptr&, Q const&)
348 but \p pred is used for key compare.
349 \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less
350 "predicate requirements".
351 \p pred must imply the same element order as the comparator used for building the map.
353 template <typename Q, typename Less>
354 exempt_ptr extract_with( Q const& val, Less pred )
360 /// Find the key \p key
362 The function searches the item with key equal to \p key and calls the functor \p f for item found.
363 The interface of \p Func functor is:
366 void operator()( mapped_type& item );
369 where \p item is the item found.
370 The functor is called under node-level lock.
372 The function applies RCU lock internally.
374 The function returns \p true if \p key is found, \p false otherwise.
376 template <typename K, typename Func>
377 bool find( K const& key, Func f )
379 return do_find( key, key_comparator(), [&f]( node_type * pNode ) -> bool {
380 node_scoped_lock l( pNode );
381 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
390 /// Finds the key \p val using \p pred predicate for searching
392 The function is an analog of \p find(K const&, Func)
393 but \p pred is used for key comparing.
394 \p Less functor has the interface like \p std::less.
395 \p Less must imply the same element order as the comparator used for building the map.
397 template <typename K, typename Less, typename Func>
398 bool find_with( K const& key, Less pred, Func f )
401 return do_find( key, opt::details::make_comparator_from_less<Less>(), [&f]( node_type * pNode ) -> bool {
402 node_scoped_lock l( pNode );
403 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
412 /// Find the key \p key
414 The function searches the item with key equal to \p key
415 and returns \p true if it is found, and \p false otherwise.
417 The function applies RCU lock internally.
419 template <typename K>
420 bool find( K const& key )
422 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
425 /// Finds the key \p val using \p pred predicate for searching
427 The function is an analog of \p find(K const&)
428 but \p pred is used for key comparing.
429 \p Less functor has the interface like \p std::less.
430 \p Less must imply the same element order as the comparator used for building the map.
432 template <typename K, typename Less>
433 bool find_with( K const& key, Less pred )
436 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
439 /// Finds \p key and return the item found
441 The function searches the item with key equal to \p key and returns the pointer to item found.
442 If \p key is not found it returns \p nullptr.
444 RCU should be locked before call the function.
445 Returned pointer is valid while RCU is locked.
447 template <typename Q>
448 mapped_type * get( Q const& key ) const
453 /// Finds \p key with \p pred predicate and return the item found
455 The function is an analog of \p get(Q const&)
456 but \p pred is used for comparing the keys.
458 \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
459 and \p Q in any order.
460 \p pred must imply the same element order as the comparator used for building the map.
462 template <typename Q, typename Less>
463 mapped_type * get_with( Q const& key, Less pred ) const
469 /// Clears the tree (thread safe, not atomic)
471 The function unlink all items from the tree.
472 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
476 assert( set.empty() );
478 the assertion could be raised.
480 For each node the \ref disposer will be called after unlinking.
482 RCU \p synchronize method can be called. RCU should not be locked.
486 for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
490 /// Clears the tree (not thread safe)
492 This function is not thread safe and may be called only when no other thread deals with the tree.
493 The function is used in the tree destructor.
500 /// Checks if the map is empty
503 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
506 /// Returns item count in the map
508 Only leaf nodes containing user data are counted.
510 The value returned depends on item counter type provided by \p Traits template parameter.
511 If it is \p atomicity::empty_item_counter this function always returns 0.
513 The function is not suitable for checking the tree emptiness, use \p empty()
514 member function for this purpose.
518 return m_ItemCounter;
521 /// Returns const reference to internal statistics
522 stat const& statistics() const
527 /// Checks internal consistency (not atomic, not thread-safe)
529 The debugging function to check internal consistency of the tree.
531 bool check_consistency() const
538 template <typename Q, typename Compare, typename Func>
539 bool do_find( Q& key, Compare cmp, Func f ) const
544 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
546 assert( result != find_result::retry );
547 return result == find_result::found;
550 template <typename K, typename Func>
551 int do_update( K const& key, Func funcUpdate, int nFlags )
553 check_deadlock_policy::check();
556 return try_udate( key, pVal, nFlags, m_pRoot, 1, 0 );
563 template <typename Q, typename Compare, typename Func>
564 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, typename node_type::version_type nVersion ) const
566 assert( gc::is_locked() );
570 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
572 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
573 m_stat.onFindRetry();
574 return find_result::retry;
577 m_stat.onFindFailed();
578 return find_result::not_found;
581 int nCmp = cmp( key, pChild->m_key );
583 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
586 m_stat.onFindSuccess();
587 return find_result::found;
591 m_stat.onFindFailed();
592 return find_result::not_found;
595 typename node_type::version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
596 if ( nChildVersion & node_type::shrinking ) {
597 m_stat.onFindWaitShrinking();
598 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
600 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
601 m_stat.onFindRetry();
602 return find_result::retry;
605 else if ( nChildVersion != node_type::unlinked ) {
607 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
608 m_stat.onFindRetry();
609 return find_result::retry;
612 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
613 if ( found != find_result::retry )
619 template <typename K, typename Func>
620 int try_update( K const& key, Func funcUpdate, int nFlags, node_type * pNode, int nDir, typename node_type::version_type nVersion )
624 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
625 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
626 return update_flags::retry;
628 if ( pChild == nullptr ) {
630 if ( nFlags & update_flags::allow_insert )
631 result = try_insert_node( key, funcUpdate, pNode, nDir, nVersion );
633 result = update_flags::failed;
638 } while ( result == update_flags::retry );
642 template <typename K, typename Func>
643 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, typename node_type::version_type nVersion )
647 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
648 mapped_type * pVal = funcUpdate( pNew );
649 assert( pVal != nullptr );
650 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
653 if ( c_bRelaxedInsert ) {
654 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
655 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
657 m_stat.onInsertRetry();
658 return update_flags::retry;
661 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
664 node_type * pDamaged;
666 node_scoped_lock l( pNode );
668 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
669 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
671 if ( c_RelaxedInsert ) {
672 internal_free_node( pNew );
673 m_stat.onRelaxedInsertFailed();
676 m_stat.onInsertRetry();
677 return update_flags::retry;
680 if ( !c_bRelaxedInsert )
681 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
683 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
684 pDamaged = fix_height_locked( pNode );
686 m_stat.onInsertSuccess();
688 fix_height_and_rebalance( pDamaged );
690 return update_flags::result_insert;
693 bool try_unlink_locked( node_type * pParent, node_type * pNode )
695 // pParent and pNode must be locked
696 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
698 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
699 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
700 if ( pNode != pParentLeft && pNode != pParentRight ) {
701 // node is no longer a child of parent
705 assert( !pNode->is_unlinked());
706 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
708 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
709 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
710 if ( pLeft != nullptr && pRight != nullptr ) {
711 // splicing is no longer possible
714 node_type * pSplice = pLeft ? pLeft : pRight;
716 if ( pParentLeft == pNode )
717 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
719 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
722 pSplice->pParent.store( pParent, memory_model::memory_order_release );
724 pNode->version( node_type::unlinked, memory_model::memory_order_release );
725 dispose_node( pNode );
731 private: // rotations
733 int estimate_node_condition( node_type * pNode )
735 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
736 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
738 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
739 return unlink_required;
741 int h = pNode->height( memory_model::memory_order_relaxed );
742 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
743 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
745 int hNew = 1 + std::max( hL, hR );
746 int nBalance = hL - hR;
748 if ( nBalance < -1 || nBalance > 1 )
749 return rebalance_required;
751 return h != hNew ? hNew : nothing_required;
754 node_type * fix_height( node_type * pNode )
756 node_scoped_lock l( pNode );
757 return fix_height_locked( pNode );
760 node_type * fix_height_locked( node_type * pNode )
762 // pNode must be locked!!!
763 int h = estimate_node_condition( pNode );
765 case rebalance_required:
766 case unlink_required:
768 case nothing_required:
771 pNode->height( h, memory_model::memory_order_relaxed );
772 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
776 void fix_height_and_rebalance( node_type * pNode )
778 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
779 int nCond = estimate_node_condition( pNode );
780 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
783 if ( nCond != unlink_required && nCond != rebalance_required )
784 pNode = fix_height( pNode );
786 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
787 assert( pParent != nullptr );
789 node_scoped_lock lp( pParent );
790 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
791 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
793 node_scoped_lock ln( pNode );
794 pNode = rebalance_locked( pParent, pNode );
801 node_type * rebalance_locked( node_type * pParent, node_type * pNode )
803 // pParent and pNode shlould be locked.
804 // Returns a damaged node, or nullptr if no more rebalancing is necessary
805 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
806 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
807 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
809 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
810 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
812 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
813 if ( try_unlink_locked( pParent, pNode ))
814 return fix_height_locked( pParent );
816 // retry needed for pNode
821 int h = pNode->height( memory_model::memory_order_relaxed );
822 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
823 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
824 int hNew = 1 + std::max( hL, hR );
825 int balance = hL - hR;
828 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
829 else if ( balance < -1 )
830 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
831 else if ( hNew != h ) {
832 pNode->height( hNew, memory_model::memory_order_relaxed );
834 // pParent is already locked
835 return fix_height_locked( pParent );
841 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
843 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
844 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
845 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
847 // pParent and pNode is locked yet
848 // pNode->pLeft is too large, we will rotate-right.
849 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
852 node_scoped_lock l( pLeft );
853 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
854 return pNode; // retry for pNode
856 int hL = pLeft->height( memory_model::memory_order_relaxed );
858 return pNode; // retry
860 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
861 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
862 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
863 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
867 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
870 assert( pLRight != nullptr );
872 node_scoped_lock lr( pLRight );
873 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
874 return pNode; // retry
876 hLR = pLRight->height( memory_model::memory_order_relaxed );
878 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
880 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
881 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
882 int balance = hLL - hLRL;
883 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
884 // nParent.child.left won't be damaged after a double rotation
885 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
889 // focus on pLeft, if necessary pNode will be balanced later
890 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
895 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
897 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
898 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
899 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
901 // pParent and pNode is locked yet
903 node_scoped_lock l( pRight );
904 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
905 return pNode; // retry for pNode
907 int hR = pRight->height( memory_model::memory_order_relaxed );
909 return pNode; // retry
911 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
912 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
913 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
914 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
916 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
919 assert( pRLeft != nullptr );
920 node_scoped_lock lrl( pRLeft );
921 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
922 return pNode; // retry
924 hRL = pRLeft->height( memory_model::memory_order_relaxed );
926 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
928 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
929 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
930 int balance = hRR - hRLR;
931 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
932 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
934 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
938 static void begin_change( node_type * pNode, typename node_type::version_type version )
940 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
942 static void end_change( node_type * pNode, tpename node_type::version_type version )
944 // Clear shrinking nd unlinked flags and increment version
945 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
948 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
950 node_type::version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
951 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
953 begin_change( pNode, nodeVersion );
955 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
956 if ( pLRight != nullptr )
957 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
959 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
960 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
962 if ( pParentLeft == pNode )
963 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
965 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
966 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
968 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
970 // fix up heights links
971 int hNode = 1 + std::max( hLR, hR );
972 pNode->height( hNode, memory_model::memory_order_relaxed );
973 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
975 end_change( pNode, nodeVersion );
977 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
978 // parent.child). pNode is the deepest. Perform as many fixes as we can
979 // with the locks we've got.
981 // We've already fixed the height for pNode, but it might still be outside
982 // our allowable balance range. In that case a simple fix_height_locked()
984 int nodeBalance = hLR - hR;
985 if ( nodeBalance < -1 || nodeBalance > 1 ) {
986 // we need another rotation at pNode
990 // we've fixed balance and height damage for pNode, now handle
991 // extra-routing node damage
992 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
993 // we need to remove pNode and then repair
997 // we've already fixed the height at pLeft, do we need a rotation here?
998 int leftBalance = hLL - hNode;
999 if ( leftBalance < -1 || leftBalance > 1 )
1002 // pLeft might also have routing node damage (if pLeft.left was null)
1003 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1006 // try to fix the parent height while we've still got the lock
1007 return fix_height_locked( pParent );
1010 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1012 typename node_type::version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1013 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1015 begin_change( pNode, nodeVersion );
1017 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1018 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1019 if ( pRLeft != nullptr )
1020 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1022 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1023 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1025 if ( pParentLeft == pNode )
1026 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1028 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1029 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1031 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1034 int hNode = 1 + std::max( hL, hRL );
1035 pNode->height( hNode, memory_model::memory_order_relaxed );
1036 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1038 end_change( pNode, nodeVersion );
1040 int nodeBalance = hRL - hL;
1041 if ( nodeBalance < -1 || nodeBalance > 1 )
1044 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1047 int rightBalance = hRR - hNode;
1048 if ( rightBalance < -1 || rightBalance > 1 )
1051 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1054 return fix_height_locked( pParent );
1057 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1059 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1060 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1062 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1063 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1064 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1065 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1067 begin_change( pNode, nodeVersion );
1068 begin_change( pLeft, leftVersion );
1070 // fix up pNode links, careful about the order!
1071 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1072 if ( pLRR != nullptr )
1073 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1075 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1076 if ( pLRL != nullptr )
1077 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1079 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1080 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1081 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1082 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1085 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1087 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1088 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1090 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1093 int hNode = 1 + std::max( hLRR, hR );
1094 pNode->height( hNode, memory_model::memory_order_relaxed );
1095 int hLeft = 1 + std::max( hLL, hLRL );
1096 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1097 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1099 end_change( pNode, nodeVersion );
1100 end_change( pLeft, leftVersion );
1102 // caller should have performed only a single rotation if pLeft was going
1103 // to end up damaged
1104 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1105 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1107 // We have damaged pParent, pLR (now parent.child), and pNode (now
1108 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1109 // can with the locks we've got.
1111 // We've already fixed the height for pNode, but it might still be outside
1112 // our allowable balance range. In that case a simple fix_height_locked()
1114 int nodeBalance = hLRR - hR;
1115 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1116 // we need another rotation at pNode
1120 // pNode might also be damaged by being an unnecessary routing node
1121 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1122 // repair involves splicing out pNode and maybe more rotations
1126 // we've already fixed the height at pLRight, do we need a rotation here?
1127 final int balanceLR = hLeft - hNode;
1128 if ( balanceLR < -1 || balanceLR > 1 )
1131 // try to fix the parent height while we've still got the lock
1132 return fix_height_locked( pParent );
1135 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1137 typename node_type::version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1138 typename node_type::version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1140 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1141 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1142 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1143 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1145 begin_change( pNode, nodeVersion );
1146 begin_change( pRight, ghtVersion );
1148 // fix up pNode links, careful about the order!
1149 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1150 if ( pRLL != nullptr )
1151 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1153 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1154 if ( pRLR != nullptr )
1155 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1157 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1158 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1159 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1160 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1163 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1165 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1166 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1168 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1171 int hNode = 1 + std::max( hL, hRLL );
1172 pNode->height( hNode, memory_model::memory_order_relaxed );
1173 int hRight = 1 + std::max( hRLR, hRR );
1174 pRight->height( hRight, memory_model::memory_order_relaxed );
1175 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1177 end_change( pNode, nodeVersion );
1178 end_change( pRight, rightVersion );
1180 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1182 int nodeBalance = hRLL - hL;
1183 if ( nodeBalance < -1 || nodeBalance > 1 )
1185 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1188 int balRL = hRight - hNode;
1189 if ( balRL < -1 || balRL > 1 )
1192 return fix_height_locked( pParent );
1197 }} // namespace cds::container
1199 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H