3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <cds/container/details/bronson_avltree_base.h>
7 #include <cds/urcu/details/check_deadlock.h>
9 namespace cds { namespace container {
11 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
12 /** @ingroup cds_nonintrusive_map
13 @ingroup cds_nonintrusive_tree
14 @headerfile cds/container/bronson_avltree_map_rcu.h
16 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
17 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
18 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
19 the disposer functor provided by \p Traits template parameter.
21 The set of available member functions differs from classic map.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::allocator allocator_type; ///< allocator for maintaining internal node
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::lock_type lock_type; ///< Node lock type
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static bool const c_bRelaxedInsert = traits::relaxed_insert;
72 typedef bronson_avltree::node< key_type, mapped_type, lock_type > node_type;
73 typedef typename node_type::version_type version_type;
75 typedef typename std::conditional <
76 std::is_same< typename traits::node_type, opt::none >::value,
77 bronson_avltree::node< key_type, mapped_type, lock_type >,
78 typename traits::node_type
79 >::type alloc_node_type;
81 typedef typename allocator_type::template rebind<alloc_node_type>::other memory_allocator;
82 typedef cds::details::Allocator< alloc_node_type, memory_allocator > cxx_allocator;
84 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
86 enum class find_result
93 enum class update_flags
100 result_insert = allow_insert,
101 result_update = allow_update
106 nothing_required = -3,
107 rebalance_required = -2,
111 class node_scoped_lock
115 node_scoped_lock( node_type * pNode )
118 pNode->m_Lock.lock();
123 m_pNode->m_Lock.unlock();
131 template <typename K>
132 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
134 alloc_node_type * pNode = memory_allocator().allocate( 1 );
135 return new (static_cast<node_type *>(pNode)) node_type( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
138 static void internal_free_node( node_type * pNode )
140 // Free node without disposer
141 cxx_allocator().Delete( static_cast<alloc_node_type *>(pNode) );
147 node_type * m_pRetiredList; ///< head of retired list
151 : m_pRetiredList( nullptr )
159 void dispose( node_type * pNode )
161 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
163 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
167 pNode->m_pNextRemoved = m_pRetiredList;
168 m_pRetiredList = pNode;
172 struct internal_disposer
174 void operator()( alloc_node_type * p ) const
176 static_cast<node_type *>(p)->~node_type();
177 memory_allocator().deallocate( p, 1 );
183 assert( !gc::is_locked() );
185 for ( node_type * p = m_pRetiredList; p; ) {
186 node_type * pNext = p->m_pNextRemoved;
187 // Value already disposed
188 gc::template retire_ptr<internal_disposer>( static_cast<alloc_node_type *>(p) );
198 typename node_type::base_class m_Root;
200 item_counter m_ItemCounter;
205 /// Creates empty map
207 : m_pRoot( static_cast<node_type *>(&m_Root) )
218 The \p key_type should be constructible from a value of type \p K.
220 RCU \p synchronize() can be called. RCU should not be locked.
222 Returns \p true if inserting successful, \p false otherwise.
224 template <typename K>
225 bool insert( K const& key, mapped_type * pVal )
227 return do_update(key, key_comparator(),
228 [pVal]( node_type * pNode ) -> mapped_type*
230 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
233 update_flags::allow_insert
234 ) == update_flags::result_insert;
237 /// Updates the value for \p key
239 The operation performs inserting or updating the value for \p key with lock-free manner.
240 If \p bInsert is \p false, only updating of existing node is possible.
242 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
243 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
244 constructible from type \p K.
245 Otherwise, the value is changed to \p pVal.
247 RCU \p synchronize() method can be called. RCU should not be locked.
249 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
250 \p second is \p true if new node has been added or \p false if the node with \p key
251 already is in the tree.
253 template <typename K, typename Func>
254 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
256 int result = do_update( key, key_comparator(),
257 [pVal]( node_type * ) -> mapped_type*
261 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
263 return std::make_pair( result != 0, (result & update_flags::result_insert) != 0 );
266 /// Delete \p key from the map
268 RCU \p synchronize() method can be called. RCU should not be locked.
270 Return \p true if \p key is found and deleted, \p false otherwise
272 template <typename K>
273 bool erase( K const& key )
278 /// Deletes the item from the map using \p pred predicate for searching
280 The function is an analog of \p erase(K const&)
281 but \p pred is used for key comparing.
282 \p Less functor has the interface like \p std::less.
283 \p Less must imply the same element order as the comparator used for building the map.
285 template <typename K, typename Less>
286 bool erase_with( K const& key, Less pred )
292 /// Delete \p key from the map
294 The function searches an item with key \p key, calls \p f functor
295 and deletes the item. If \p key is not found, the functor is not called.
297 The functor \p Func interface:
300 void operator()(mapped_type& item) { ... }
304 RCU \p synchronize method can be called. RCU should not be locked.
306 Return \p true if key is found and deleted, \p false otherwise
308 template <typename K, typename Func>
309 bool erase( K const& key, Func f )
314 /// Deletes the item from the map using \p pred predicate for searching
316 The function is an analog of \p erase(K const&, Func)
317 but \p pred is used for key comparing.
318 \p Less functor has the interface like \p std::less.
319 \p Less must imply the same element order as the comparator used for building the map.
321 template <typename K, typename Less, typename Func>
322 bool erase_with( K const& key, Less pred, Func f )
328 /// Extracts an item with minimal key from the map
330 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
331 If the set is empty, returns empty \p exempt_ptr.
333 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
334 It means that the function gets leftmost leaf of the tree and tries to unlink it.
335 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
336 So, the function returns the item with minimum key at the moment of tree traversing.
338 RCU \p synchronize method can be called. RCU should NOT be locked.
339 The function does not free the item.
340 The deallocator will be implicitly invoked when the returned object is destroyed or when
341 its \p release() member function is called.
343 exempt_ptr extract_min()
348 /// Extracts an item with maximal key from the map
350 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
351 If the set is empty, returns empty \p exempt_ptr.
353 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
354 It means that the function gets rightmost leaf of the tree and tries to unlink it.
355 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
356 So, the function returns the item with maximum key at the moment of tree traversing.
358 RCU \p synchronize method can be called. RCU should NOT be locked.
359 The function does not free the item.
360 The deallocator will be implicitly invoked when the returned object is destroyed or when
361 its \p release() is called.
362 @note Before reusing \p result object you should call its \p release() method.
364 exempt_ptr extract_max()
369 /// Extracts an item from the map
371 The function searches an item with key equal to \p key in the tree,
372 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found.
373 If \p key is not found the function returns an empty \p exempt_ptr.
375 RCU \p synchronize method can be called. RCU should NOT be locked.
376 The function does not destroy the item found.
377 The dealloctor will be implicitly invoked when the returned object is destroyed or when
378 its \p release() member function is called.
380 template <typename Q>
381 exempt_ptr extract( Q const& key )
386 /// Extracts an item from the map using \p pred for searching
388 The function is an analog of \p extract(exempt_ptr&, Q const&)
389 but \p pred is used for key compare.
390 \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less
391 "predicate requirements".
392 \p pred must imply the same element order as the comparator used for building the map.
394 template <typename Q, typename Less>
395 exempt_ptr extract_with( Q const& val, Less pred )
401 /// Find the key \p key
403 The function searches the item with key equal to \p key and calls the functor \p f for item found.
404 The interface of \p Func functor is:
407 void operator()( mapped_type& item );
410 where \p item is the item found.
411 The functor is called under node-level lock.
413 The function applies RCU lock internally.
415 The function returns \p true if \p key is found, \p false otherwise.
417 template <typename K, typename Func>
418 bool find( K const& key, Func f )
420 return do_find( key, key_comparator(), [&f]( node_type * pNode ) -> bool {
421 node_scoped_lock l( pNode );
422 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
431 /// Finds the key \p val using \p pred predicate for searching
433 The function is an analog of \p find(K const&, Func)
434 but \p pred is used for key comparing.
435 \p Less functor has the interface like \p std::less.
436 \p Less must imply the same element order as the comparator used for building the map.
438 template <typename K, typename Less, typename Func>
439 bool find_with( K const& key, Less pred, Func f )
442 return do_find( key, opt::details::make_comparator_from_less<Less>(), [&f]( node_type * pNode ) -> bool {
443 node_scoped_lock l( pNode );
444 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
453 /// Find the key \p key
455 The function searches the item with key equal to \p key
456 and returns \p true if it is found, and \p false otherwise.
458 The function applies RCU lock internally.
460 template <typename K>
461 bool find( K const& key )
463 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
466 /// Finds the key \p val using \p pred predicate for searching
468 The function is an analog of \p find(K const&)
469 but \p pred is used for key comparing.
470 \p Less functor has the interface like \p std::less.
471 \p Less must imply the same element order as the comparator used for building the map.
473 template <typename K, typename Less>
474 bool find_with( K const& key, Less pred )
477 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
480 /// Finds \p key and return the item found
482 The function searches the item with key equal to \p key and returns the pointer to item found.
483 If \p key is not found it returns \p nullptr.
485 RCU should be locked before call the function.
486 Returned pointer is valid while RCU is locked.
488 template <typename Q>
489 mapped_type * get( Q const& key ) const
494 /// Finds \p key with \p pred predicate and return the item found
496 The function is an analog of \p get(Q const&)
497 but \p pred is used for comparing the keys.
499 \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
500 and \p Q in any order.
501 \p pred must imply the same element order as the comparator used for building the map.
503 template <typename Q, typename Less>
504 mapped_type * get_with( Q const& key, Less pred ) const
510 /// Clears the tree (thread safe, not atomic)
512 The function unlink all items from the tree.
513 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
517 assert( set.empty() );
519 the assertion could be raised.
521 For each node the \ref disposer will be called after unlinking.
523 RCU \p synchronize method can be called. RCU should not be locked.
527 for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
531 /// Clears the tree (not thread safe)
533 This function is not thread safe and may be called only when no other thread deals with the tree.
534 The function is used in the tree destructor.
541 /// Checks if the map is empty
544 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
547 /// Returns item count in the map
549 Only leaf nodes containing user data are counted.
551 The value returned depends on item counter type provided by \p Traits template parameter.
552 If it is \p atomicity::empty_item_counter this function always returns 0.
554 The function is not suitable for checking the tree emptiness, use \p empty()
555 member function for this purpose.
559 return m_ItemCounter;
562 /// Returns const reference to internal statistics
563 stat const& statistics() const
568 /// Checks internal consistency (not atomic, not thread-safe)
570 The debugging function to check internal consistency of the tree.
572 bool check_consistency() const
579 template <typename Q, typename Compare, typename Func>
580 bool do_find( Q& key, Compare cmp, Func f ) const
585 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
587 assert( result != find_result::retry );
588 return result == find_result::found;
591 template <typename K, typename Compare, typename Func>
592 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
594 check_deadlock_policy::check();
596 rcu_disposer removed_list;
599 return try_udate_root( key, cmp, nFlags, funcUpdate, removed_list );
607 template <typename Q, typename Compare, typename Func>
608 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
610 assert( gc::is_locked() );
614 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
616 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
617 m_stat.onFindRetry();
618 return find_result::retry;
621 m_stat.onFindFailed();
622 return find_result::not_found;
625 int nCmp = cmp( key, pChild->m_key );
627 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
630 m_stat.onFindSuccess();
631 return find_result::found;
635 m_stat.onFindFailed();
636 return find_result::not_found;
639 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
640 if ( nChildVersion & node_type::shrinking ) {
641 m_stat.onFindWaitShrinking();
642 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
644 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
645 m_stat.onFindRetry();
646 return find_result::retry;
649 else if ( nChildVersion != node_type::unlinked ) {
651 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
652 m_stat.onFindRetry();
653 return find_result::retry;
656 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
657 if ( found != find_result::retry )
663 template <typename K, typename Compare, typename Func>
664 int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp )
666 assert( gc::is_locked() );
670 node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
672 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
673 if ( nChildVersion & node_type::shrinking ) {
674 m_stat.onUpdateRootWaitShrinking();
675 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
678 else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
679 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion, disp );
684 if ( nFlags & update_flags::allow_insert ) {
685 // insert into tree as right child of the root
687 node_scoped_lock l( m_pRoot );
689 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
690 mapped_type * pVal = funcUpdate( pNew );
691 assert( pVal != nullptr );
692 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
694 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
695 m_pRoot->height( 2, memory_model::memory_order_relaxed );
699 m_stat.onInsertSuccess();
700 return update_flags::result_insert;
703 return update_flags::failed;
705 } while ( result != update_flags::retry );
709 template <typename K, typename Compare, typename Func>
710 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp )
712 assert( gc::is_locked() );
713 assert( nVersion != node_type::unlinked );
715 int nCmp = cmp( key, pNode->m_key );
717 if ( nFlags & update_flags::allow_update ) {
718 return try_update_node( funcUpdate, pNode );
720 return update_flags::failed;
725 node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
726 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
727 return update_flags::retry;
729 if ( pChild == nullptr ) {
731 if ( nFlags & update_flags::allow_insert )
732 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp );
734 result = update_flags::failed;
738 result = update_flags::retry;
739 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
740 if ( nChildVersion & node_type::shrinking ) {
741 m_stat.onUpdateWaitShrinking();
742 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
745 else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
746 // this second read is important, because it is protected by nChildVersion
748 // validate the read that our caller took to get to node
749 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
750 m_stat.onUpdateRetry();
751 return update_flags::retry;
754 // At this point we know that the traversal our parent took to get to node is still valid.
755 // The recursive implementation will validate the traversal from node to
756 // child, so just prior to the node nVersion validation both traversals were definitely okay.
757 // This means that we are no longer vulnerable to node shrinks, and we don't need
758 // to validate node version any more.
759 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion, disp );
762 } while ( result == update_flags::retry );
766 template <typename K, typename Func>
767 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp )
771 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
772 mapped_type * pVal = funcUpdate( pNew );
773 assert( pVal != nullptr );
774 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
777 if ( c_bRelaxedInsert ) {
778 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
779 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
781 m_stat.onInsertRetry();
782 return update_flags::retry;
785 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
788 node_type * pDamaged;
790 node_scoped_lock l( pNode );
792 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
793 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
795 if ( c_RelaxedInsert ) {
796 internal_free_node( pNew );
797 m_stat.onRelaxedInsertFailed();
800 m_stat.onInsertRetry();
801 return update_flags::retry;
804 if ( !c_bRelaxedInsert )
805 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
807 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
808 pDamaged = fix_height_locked( pNode );
810 m_stat.onInsertSuccess();
812 fix_height_and_rebalance( pDamaged, disp );
814 return update_flags::result_insert;
817 template <typename Func>
818 int try_update_node( Func funcUpdate, node_type * pNode )
822 node_scoped_lock l( pNode );
824 if ( pNode->version( memory_model::memory_order_relaxed ) == node_type::unlinked ) {
825 if ( c_RelaxedInsert )
827 m_stat.onUpdateUnlinked();
828 return update_flags::retry;
831 pOld = pNode->value( memory_model::memory_order_relaxed );
832 mapped_type * pVal = funcUpdate( pNode );
833 assert( pVal != nullptr );
834 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
839 m_stat.onDisposeValue();
842 m_stat.onUpdateSuccess();
843 return update_flags::result_update;
846 bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
848 // pParent and pNode must be locked
849 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
851 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
852 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
853 if ( pNode != pParentLeft && pNode != pParentRight ) {
854 // node is no longer a child of parent
858 assert( !pNode->is_unlinked());
859 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
861 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
862 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
863 if ( pLeft != nullptr && pRight != nullptr ) {
864 // splicing is no longer possible
867 node_type * pSplice = pLeft ? pLeft : pRight;
869 if ( pParentLeft == pNode )
870 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
872 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
875 pSplice->pParent.store( pParent, memory_model::memory_order_release );
877 // Mark the node as unlinked
878 pNode->version( node_type::unlinked, memory_model::memory_order_release );
879 disp.dispose( pNode );
885 private: // rotations
887 int estimate_node_condition( node_type * pNode )
889 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
890 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
892 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
893 return unlink_required;
895 int h = pNode->height( memory_model::memory_order_relaxed );
896 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
897 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
899 int hNew = 1 + std::max( hL, hR );
900 int nBalance = hL - hR;
902 if ( nBalance < -1 || nBalance > 1 )
903 return rebalance_required;
905 return h != hNew ? hNew : nothing_required;
908 node_type * fix_height( node_type * pNode )
910 node_scoped_lock l( pNode );
911 return fix_height_locked( pNode );
914 node_type * fix_height_locked( node_type * pNode )
916 // pNode must be locked!!!
917 int h = estimate_node_condition( pNode );
919 case rebalance_required:
920 case unlink_required:
922 case nothing_required:
925 pNode->height( h, memory_model::memory_order_relaxed );
926 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
930 void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp )
932 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
933 int nCond = estimate_node_condition( pNode );
934 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
937 if ( nCond != unlink_required && nCond != rebalance_required )
938 pNode = fix_height( pNode );
940 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
941 assert( pParent != nullptr );
943 node_scoped_lock lp( pParent );
944 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
945 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
947 node_scoped_lock ln( pNode );
948 pNode = rebalance_locked( pParent, pNode, disp );
955 node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp )
957 // pParent and pNode shlould be locked.
958 // Returns a damaged node, or nullptr if no more rebalancing is necessary
959 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
960 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
961 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
963 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
964 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
966 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
967 if ( try_unlink_locked( pParent, pNode, disp ))
968 return fix_height_locked( pParent );
970 // retry needed for pNode
975 int h = pNode->height( memory_model::memory_order_relaxed );
976 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
977 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
978 int hNew = 1 + std::max( hL, hR );
979 int balance = hL - hR;
982 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
983 else if ( balance < -1 )
984 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
985 else if ( hNew != h ) {
986 pNode->height( hNew, memory_model::memory_order_relaxed );
988 // pParent is already locked
989 return fix_height_locked( pParent );
995 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
997 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
998 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
999 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1001 // pParent and pNode is locked yet
1002 // pNode->pLeft is too large, we will rotate-right.
1003 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
1006 node_scoped_lock l( pLeft );
1007 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
1008 return pNode; // retry for pNode
1010 int hL = pLeft->height( memory_model::memory_order_relaxed );
1012 return pNode; // retry
1014 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
1015 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
1016 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1017 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
1021 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1024 assert( pLRight != nullptr );
1026 node_scoped_lock lr( pLRight );
1027 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
1028 return pNode; // retry
1030 hLR = pLRight->height( memory_model::memory_order_relaxed );
1032 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
1034 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1035 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
1036 int balance = hLL - hLRL;
1037 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
1038 // nParent.child.left won't be damaged after a double rotation
1039 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1043 // focus on pLeft, if necessary pNode will be balanced later
1044 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1049 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1051 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1052 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1053 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1055 // pParent and pNode is locked yet
1057 node_scoped_lock l( pRight );
1058 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1059 return pNode; // retry for pNode
1061 int hR = pRight->height( memory_model::memory_order_relaxed );
1062 if ( hL - hR >= -1 )
1063 return pNode; // retry
1065 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
1066 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1067 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
1068 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1070 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1073 assert( pRLeft != nullptr );
1074 node_scoped_lock lrl( pRLeft );
1075 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1076 return pNode; // retry
1078 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1080 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1082 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1083 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1084 int balance = hRR - hRLR;
1085 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
1086 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1088 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1092 static void begin_change( node_type * pNode, version_type version )
1094 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1096 static void end_change( node_type * pNode, version_type version )
1098 // Clear shrinking and unlinked flags and increment version
1099 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1102 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1104 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1105 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1107 begin_change( pNode, nodeVersion );
1109 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1110 if ( pLRight != nullptr )
1111 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1113 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1114 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1116 if ( pParentLeft == pNode )
1117 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1119 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1120 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
1122 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1124 // fix up heights links
1125 int hNode = 1 + std::max( hLR, hR );
1126 pNode->height( hNode, memory_model::memory_order_relaxed );
1127 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1129 end_change( pNode, nodeVersion );
1131 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1132 // parent.child). pNode is the deepest. Perform as many fixes as we can
1133 // with the locks we've got.
1135 // We've already fixed the height for pNode, but it might still be outside
1136 // our allowable balance range. In that case a simple fix_height_locked()
1138 int nodeBalance = hLR - hR;
1139 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1140 // we need another rotation at pNode
1144 // we've fixed balance and height damage for pNode, now handle
1145 // extra-routing node damage
1146 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1147 // we need to remove pNode and then repair
1151 // we've already fixed the height at pLeft, do we need a rotation here?
1152 int leftBalance = hLL - hNode;
1153 if ( leftBalance < -1 || leftBalance > 1 )
1156 // pLeft might also have routing node damage (if pLeft.left was null)
1157 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1160 // try to fix the parent height while we've still got the lock
1161 return fix_height_locked( pParent );
1164 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1166 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1167 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1169 begin_change( pNode, nodeVersion );
1171 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1172 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1173 if ( pRLeft != nullptr )
1174 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1176 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1177 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1179 if ( pParentLeft == pNode )
1180 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1182 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1183 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1185 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1188 int hNode = 1 + std::max( hL, hRL );
1189 pNode->height( hNode, memory_model::memory_order_relaxed );
1190 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1192 end_change( pNode, nodeVersion );
1194 int nodeBalance = hRL - hL;
1195 if ( nodeBalance < -1 || nodeBalance > 1 )
1198 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1201 int rightBalance = hRR - hNode;
1202 if ( rightBalance < -1 || rightBalance > 1 )
1205 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1208 return fix_height_locked( pParent );
1211 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1213 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1214 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1216 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1217 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1218 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1219 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1221 begin_change( pNode, nodeVersion );
1222 begin_change( pLeft, leftVersion );
1224 // fix up pNode links, careful about the order!
1225 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1226 if ( pLRR != nullptr )
1227 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1229 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1230 if ( pLRL != nullptr )
1231 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1233 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1234 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1235 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1236 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1239 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1241 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1242 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1244 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1247 int hNode = 1 + std::max( hLRR, hR );
1248 pNode->height( hNode, memory_model::memory_order_relaxed );
1249 int hLeft = 1 + std::max( hLL, hLRL );
1250 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1251 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1253 end_change( pNode, nodeVersion );
1254 end_change( pLeft, leftVersion );
1256 // caller should have performed only a single rotation if pLeft was going
1257 // to end up damaged
1258 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1259 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1261 // We have damaged pParent, pLR (now parent.child), and pNode (now
1262 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1263 // can with the locks we've got.
1265 // We've already fixed the height for pNode, but it might still be outside
1266 // our allowable balance range. In that case a simple fix_height_locked()
1268 int nodeBalance = hLRR - hR;
1269 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1270 // we need another rotation at pNode
1274 // pNode might also be damaged by being an unnecessary routing node
1275 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1276 // repair involves splicing out pNode and maybe more rotations
1280 // we've already fixed the height at pLRight, do we need a rotation here?
1281 final int balanceLR = hLeft - hNode;
1282 if ( balanceLR < -1 || balanceLR > 1 )
1285 // try to fix the parent height while we've still got the lock
1286 return fix_height_locked( pParent );
1289 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1291 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1292 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1294 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1295 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1296 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1297 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1299 begin_change( pNode, nodeVersion );
1300 begin_change( pRight, ghtVersion );
1302 // fix up pNode links, careful about the order!
1303 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1304 if ( pRLL != nullptr )
1305 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1307 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1308 if ( pRLR != nullptr )
1309 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1311 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1312 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1313 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1314 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1317 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1319 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1320 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1322 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1325 int hNode = 1 + std::max( hL, hRLL );
1326 pNode->height( hNode, memory_model::memory_order_relaxed );
1327 int hRight = 1 + std::max( hRLR, hRR );
1328 pRight->height( hRight, memory_model::memory_order_relaxed );
1329 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1331 end_change( pNode, nodeVersion );
1332 end_change( pRight, rightVersion );
1334 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1336 int nodeBalance = hRLL - hL;
1337 if ( nodeBalance < -1 || nodeBalance > 1 )
1339 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1342 int balRL = hRight - hNode;
1343 if ( balRL < -1 || balRL > 1 )
1346 return fix_height_locked( pParent );
1351 }} // namespace cds::container
1353 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H