3 #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
4 #define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H
6 #include <cds/container/details/bronson_avltree_base.h>
7 #include <cds/urcu/details/check_deadlock.h>
9 namespace cds { namespace container {
11 /// Bronson et al AVL-tree (RCU specialization for storing pointer to values)
12 /** @ingroup cds_nonintrusive_map
13 @ingroup cds_nonintrusive_tree
14 @headerfile cds/container/bronson_avltree_map_rcu.h
16 This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree"
17 for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy
18 of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call
19 the disposer functor provided by \p Traits template parameter.
21 The set of available member functions differs from classic map.
23 <b>Template arguments</b>:
24 - \p RCU - one of \ref cds_urcu_gc "RCU type"
26 - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated
28 - \p Traits - tree traits, default is \p bronson_avltree::traits
29 It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction
30 instead of \p Traits template argument.
32 @note Before including <tt><cds/container/bronson_avltree_map_rcu.h></tt> you should include appropriate RCU header file,
33 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
39 # ifdef CDS_DOXYGEN_INVOKED
40 typename Traits = bronson_avltree::traits
45 class BronsonAVLTreeMap< cds::urcu::gc<RCU>, Key, T*, Traits >
48 typedef cds::urcu::gc<RCU> gc; ///< RCU Garbage collector
49 typedef Key key_type; ///< type of a key stored in the map
50 typedef T * mapped_type; ///< type of value stored in the map
51 typedef Traits traits; ///< Traits template parameter
53 # ifdef CDS_DOXYGEN_INVOKED
54 typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less
56 typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator;
58 typedef typename traits::item_counter item_counter; ///< Item counting policy
59 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
60 typedef typename traits::allocator allocator_type; ///< allocator for maintaining internal node
61 typedef typename traits::stat stat; ///< internal statistics
62 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
63 typedef typename traits::back_off back_off; ///< Back-off strategy
64 typedef typename traits::disposer disposer; ///< Value disposer
65 typedef typename traits::lock_type lock_type; ///< Node lock type
67 /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion"
68 static bool const c_bRelaxedInsert = traits::relaxed_insert;
72 typedef bronson_avltree::node< key_type, mapped_type, lock_type > node_type;
73 typedef typename node_type::version_type version_type;
75 typedef typename std::conditional <
76 std::is_same< typename traits::node_type, opt::none >::value,
77 bronson_avltree::node< key_type, mapped_type, lock_type >,
78 typename traits::node_type
79 >::type alloc_node_type;
81 typedef typename allocator_type::template rebind<alloc_node_type>::other memory_allocator;
82 typedef cds::details::Allocator< alloc_node_type, memory_allocator > cxx_allocator;
84 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy;
86 enum class find_result
93 enum class update_flags
100 result_insert = allow_insert,
101 result_update = allow_update
106 nothing_required = -3,
107 rebalance_required = -2,
111 class node_scoped_lock
115 node_scoped_lock( node_type * pNode )
118 pNode->m_Lock.lock();
123 m_pNode->m_Lock.unlock();
131 template <typename K>
132 static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight )
134 alloc_node_type * pNode = memory_allocator().allocate( 1 );
135 return new (static_cast<node_type *>(pNode)) node_type( std::forward<K>( key ), nHeight, version, pParent, pLeft, pRight );
138 static void internal_free_node( node_type * pNode )
140 // Free node without disposer
141 cxx_allocator().Delete( static_cast<alloc_node_type *>(pNode) );
144 void dispose_node( node_type * pNode )
146 mapped_type * pVal = pNode->value( memory_model::memory_order_relaxed );
148 pNode->m_pValue.store( nullptr, memory_model::memory_order_relaxed );
151 //TODO: deallocate pNode in safe manner
158 typename node_type::base_class m_Root;
160 item_counter m_ItemCounter;
165 /// Creates empty map
167 : m_pRoot( static_cast<node_type *>(&m_Root) )
178 The \p key_type should be constructible from a value of type \p K.
180 RCU \p synchronize() can be called. RCU should not be locked.
182 Returns \p true if inserting successful, \p false otherwise.
184 template <typename K>
185 bool insert( K const& key, mapped_type * pVal )
187 return do_update(key, key_comparator(),
188 [pVal]( node_type * pNode ) -> mapped_type*
190 assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr );
193 update_flags::allow_insert
194 ) == update_flags::result_insert;
197 /// Updates the value for \p key
199 The operation performs inserting or updating the value for \p key with lock-free manner.
200 If \p bInsert is \p false, only updating of existing node is possible.
202 If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true),
203 then the new node created from \p key is inserted into the map; note that in this case the \ref key_type should be
204 constructible from type \p K.
205 Otherwise, the value is changed to \p pVal.
207 RCU \p synchronize() method can be called. RCU should not be locked.
209 Returns <tt> std::pair<bool, bool> </tt> where \p first is \p true if operation is successfull,
210 \p second is \p true if new node has been added or \p false if the node with \p key
211 already is in the tree.
213 template <typename K, typename Func>
214 std::pair<bool, bool> update( K const& key, mapped_type * pVal, bool bInsert = true )
216 int result = do_update( key, key_comparator(),
217 [pVal]( node_type * ) -> mapped_type*
221 update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0)
223 return std::make_pair( result != 0, (result & update_flags::result_insert) != 0 );
226 /// Delete \p key from the map
228 RCU \p synchronize() method can be called. RCU should not be locked.
230 Return \p true if \p key is found and deleted, \p false otherwise
232 template <typename K>
233 bool erase( K const& key )
238 /// Deletes the item from the map using \p pred predicate for searching
240 The function is an analog of \p erase(K const&)
241 but \p pred is used for key comparing.
242 \p Less functor has the interface like \p std::less.
243 \p Less must imply the same element order as the comparator used for building the map.
245 template <typename K, typename Less>
246 bool erase_with( K const& key, Less pred )
252 /// Delete \p key from the map
254 The function searches an item with key \p key, calls \p f functor
255 and deletes the item. If \p key is not found, the functor is not called.
257 The functor \p Func interface:
260 void operator()(mapped_type& item) { ... }
264 RCU \p synchronize method can be called. RCU should not be locked.
266 Return \p true if key is found and deleted, \p false otherwise
268 template <typename K, typename Func>
269 bool erase( K const& key, Func f )
274 /// Deletes the item from the map using \p pred predicate for searching
276 The function is an analog of \p erase(K const&, Func)
277 but \p pred is used for key comparing.
278 \p Less functor has the interface like \p std::less.
279 \p Less must imply the same element order as the comparator used for building the map.
281 template <typename K, typename Less, typename Func>
282 bool erase_with( K const& key, Less pred, Func f )
288 /// Extracts an item with minimal key from the map
290 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item.
291 If the set is empty, returns empty \p exempt_ptr.
293 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> minimum key.
294 It means that the function gets leftmost leaf of the tree and tries to unlink it.
295 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
296 So, the function returns the item with minimum key at the moment of tree traversing.
298 RCU \p synchronize method can be called. RCU should NOT be locked.
299 The function does not free the item.
300 The deallocator will be implicitly invoked when the returned object is destroyed or when
301 its \p release() member function is called.
303 exempt_ptr extract_min()
308 /// Extracts an item with maximal key from the map
310 Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item.
311 If the set is empty, returns empty \p exempt_ptr.
313 @note Due the concurrent nature of the map, the function extracts <i>nearly</i> maximal key.
314 It means that the function gets rightmost leaf of the tree and tries to unlink it.
315 During unlinking, a concurrent thread may insert an item with key great than leftmost item's key.
316 So, the function returns the item with maximum key at the moment of tree traversing.
318 RCU \p synchronize method can be called. RCU should NOT be locked.
319 The function does not free the item.
320 The deallocator will be implicitly invoked when the returned object is destroyed or when
321 its \p release() is called.
322 @note Before reusing \p result object you should call its \p release() method.
324 exempt_ptr extract_max()
329 /// Extracts an item from the map
331 The function searches an item with key equal to \p key in the tree,
332 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found.
333 If \p key is not found the function returns an empty \p exempt_ptr.
335 RCU \p synchronize method can be called. RCU should NOT be locked.
336 The function does not destroy the item found.
337 The dealloctor will be implicitly invoked when the returned object is destroyed or when
338 its \p release() member function is called.
340 template <typename Q>
341 exempt_ptr extract( Q const& key )
346 /// Extracts an item from the map using \p pred for searching
348 The function is an analog of \p extract(exempt_ptr&, Q const&)
349 but \p pred is used for key compare.
350 \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less
351 "predicate requirements".
352 \p pred must imply the same element order as the comparator used for building the map.
354 template <typename Q, typename Less>
355 exempt_ptr extract_with( Q const& val, Less pred )
361 /// Find the key \p key
363 The function searches the item with key equal to \p key and calls the functor \p f for item found.
364 The interface of \p Func functor is:
367 void operator()( mapped_type& item );
370 where \p item is the item found.
371 The functor is called under node-level lock.
373 The function applies RCU lock internally.
375 The function returns \p true if \p key is found, \p false otherwise.
377 template <typename K, typename Func>
378 bool find( K const& key, Func f )
380 return do_find( key, key_comparator(), [&f]( node_type * pNode ) -> bool {
381 node_scoped_lock l( pNode );
382 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
391 /// Finds the key \p val using \p pred predicate for searching
393 The function is an analog of \p find(K const&, Func)
394 but \p pred is used for key comparing.
395 \p Less functor has the interface like \p std::less.
396 \p Less must imply the same element order as the comparator used for building the map.
398 template <typename K, typename Less, typename Func>
399 bool find_with( K const& key, Less pred, Func f )
402 return do_find( key, opt::details::make_comparator_from_less<Less>(), [&f]( node_type * pNode ) -> bool {
403 node_scoped_lock l( pNode );
404 mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed );
413 /// Find the key \p key
415 The function searches the item with key equal to \p key
416 and returns \p true if it is found, and \p false otherwise.
418 The function applies RCU lock internally.
420 template <typename K>
421 bool find( K const& key )
423 return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; });
426 /// Finds the key \p val using \p pred predicate for searching
428 The function is an analog of \p find(K const&)
429 but \p pred is used for key comparing.
430 \p Less functor has the interface like \p std::less.
431 \p Less must imply the same element order as the comparator used for building the map.
433 template <typename K, typename Less>
434 bool find_with( K const& key, Less pred )
437 return do_find( key, opt::details::make_comparator_from_less<Less>(), []( node_type * ) -> bool { return true; } );
440 /// Finds \p key and return the item found
442 The function searches the item with key equal to \p key and returns the pointer to item found.
443 If \p key is not found it returns \p nullptr.
445 RCU should be locked before call the function.
446 Returned pointer is valid while RCU is locked.
448 template <typename Q>
449 mapped_type * get( Q const& key ) const
454 /// Finds \p key with \p pred predicate and return the item found
456 The function is an analog of \p get(Q const&)
457 but \p pred is used for comparing the keys.
459 \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type
460 and \p Q in any order.
461 \p pred must imply the same element order as the comparator used for building the map.
463 template <typename Q, typename Less>
464 mapped_type * get_with( Q const& key, Less pred ) const
470 /// Clears the tree (thread safe, not atomic)
472 The function unlink all items from the tree.
473 The function is thread safe but not atomic: in multi-threaded environment with parallel insertions
477 assert( set.empty() );
479 the assertion could be raised.
481 For each node the \ref disposer will be called after unlinking.
483 RCU \p synchronize method can be called. RCU should not be locked.
487 for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min() )
491 /// Clears the tree (not thread safe)
493 This function is not thread safe and may be called only when no other thread deals with the tree.
494 The function is used in the tree destructor.
501 /// Checks if the map is empty
504 return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr;
507 /// Returns item count in the map
509 Only leaf nodes containing user data are counted.
511 The value returned depends on item counter type provided by \p Traits template parameter.
512 If it is \p atomicity::empty_item_counter this function always returns 0.
514 The function is not suitable for checking the tree emptiness, use \p empty()
515 member function for this purpose.
519 return m_ItemCounter;
522 /// Returns const reference to internal statistics
523 stat const& statistics() const
528 /// Checks internal consistency (not atomic, not thread-safe)
530 The debugging function to check internal consistency of the tree.
532 bool check_consistency() const
539 template <typename Q, typename Compare, typename Func>
540 bool do_find( Q& key, Compare cmp, Func f ) const
545 result = try_find( key, cmp, f, m_pRoot, 1, 0 );
547 assert( result != find_result::retry );
548 return result == find_result::found;
551 template <typename K, typename Compare, typename Func>
552 int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags )
554 check_deadlock_policy::check();
557 return try_udate_root( key, cmp, nFlags, funcUpdate );
564 template <typename Q, typename Compare, typename Func>
565 find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const
567 assert( gc::is_locked() );
571 node_type * pChild = pNode->child( nDir ).load( memory_model::memory_order_relaxed );
573 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
574 m_stat.onFindRetry();
575 return find_result::retry;
578 m_stat.onFindFailed();
579 return find_result::not_found;
582 int nCmp = cmp( key, pChild->m_key );
584 if ( pChild->is_valued( memory_model::memory_order_relaxed ) ) {
587 m_stat.onFindSuccess();
588 return find_result::found;
592 m_stat.onFindFailed();
593 return find_result::not_found;
596 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
597 if ( nChildVersion & node_type::shrinking ) {
598 m_stat.onFindWaitShrinking();
599 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
601 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
602 m_stat.onFindRetry();
603 return find_result::retry;
606 else if ( nChildVersion != node_type::unlinked ) {
608 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) {
609 m_stat.onFindRetry();
610 return find_result::retry;
613 find_result found = try_find( key, cmp, f, pChild, nCmp, nChildVersion );
614 if ( found != find_result::retry )
620 template <typename K, typename Compare, typename Func>
621 int try_update_root( K const* key, Compare cmp, int nFlags, Func funcUpdate )
623 assert( gc::is_locked() );
627 node_type * pChild = m_Root.child( 1, memory_model::memory_order_acquire );
629 version_type nChildVersion = pChild->version( memory_model::memory_order_relaxed );
630 if ( nChildVersion & node_type::shrinking ) {
631 m_stat.onUpdateRootWaitShrinking();
632 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
635 else if ( pChild == m_Root.child( 1, memory_model::memory_order_acquire ) ) {
636 result = try_update( key, cmp, nFlags, funcUpdate, m_pRoot, pChild, nChildVersion );
641 if ( nFlags & update_flags::allow_insert ) {
642 // insert into tree as right child of the root
644 node_scoped_lock l( m_pRoot );
646 node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr );
647 mapped_type * pVal = funcUpdate( pNew );
648 assert( pVal != nullptr );
649 pNew->m_pValue.store( pVal, memory_model::memory_order_release );
651 m_pRoot->child( pNew, 1, memory_model::memory_order_relaxed );
652 m_pRoot->height( 2, memory_model::memory_order_relaxed );
656 m_stat.onInsertSuccess();
657 return update_flags::result_insert;
660 return update_flags::failed;
662 } while ( result != update_flags::retry );
666 template <typename K, typename Compare, typename Func>
667 int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pParent, node_type * pNode, version_type nVersion )
669 assert( gc::is_locked() );
670 assert( nVersion != node_type::unlinked );
672 int nCmp = cmp( key, pNode->m_key );
674 if ( nFlags & update_flags::allow_update ) {
675 return try_update_node( funcUpdate, pNode );
677 return update_flags::failed;
682 node_type * pChild = pNode->child( nCmp ).load( memory_model::memory_order_relaxed );
683 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion )
684 return update_flags::retry;
686 if ( pChild == nullptr ) {
688 if ( nFlags & update_flags::allow_insert )
689 result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion );
691 result = update_flags::failed;
695 result = update_flags::retry;
696 version_type nChildVersion = pChild->version( memory_model::memory_order_acquire );
697 if ( nChildVersion & node_type::shrinking ) {
698 m_stat.onUpdateWaitShrinking();
699 pChild->template wait_until_shrink_completed<back_off>( memory_model::memory_order_relaxed );
702 else if ( pChild == pNode->child( nCmp ).load( memory_model::memory_order_relaxed ) ) {
703 // this second read is important, because it is protected by nChildVersion
705 // validate the read that our caller took to get to node
706 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) {
707 m_stat.onUpdateRetry();
708 return update_flags::retry;
711 // At this point we know that the traversal our parent took to get to node is still valid.
712 // The recursive implementation will validate the traversal from node to
713 // child, so just prior to the node nVersion validation both traversals were definitely okay.
714 // This means that we are no longer vulnerable to node shrinks, and we don't need
715 // to validate node version any more.
716 result = try_update( key, cmp, nFlags, funcUpdate, pNode, pChild, nChildVersion );
719 } while ( result == update_flags::retry );
723 template <typename K, typename Func>
724 int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion )
728 auto fnCreateNode = [&funcUpdate]( node_type * pNew ) {
729 mapped_type * pVal = funcUpdate( pNew );
730 assert( pVal != nullptr );
731 pNew->m_pValue.store( pVal, memory_model::memory_order_relaxed );
734 if ( c_bRelaxedInsert ) {
735 if ( pNode->version( memory_model::memory_order_acquire ) != nVersion
736 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
738 m_stat.onInsertRetry();
739 return update_flags::retry;
742 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
745 node_type * pDamaged;
747 node_scoped_lock l( pNode );
749 if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion
750 || pNode->child( nDir ).load( memory_model::memory_order_relaxed ) != nullptr )
752 if ( c_RelaxedInsert ) {
753 internal_free_node( pNew );
754 m_stat.onRelaxedInsertFailed();
757 m_stat.onInsertRetry();
758 return update_flags::retry;
761 if ( !c_bRelaxedInsert )
762 fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr ));
764 pNode->child( pNew, nDir, memory_model::memory_order_relaxed );
765 pDamaged = fix_height_locked( pNode );
767 m_stat.onInsertSuccess();
769 fix_height_and_rebalance( pDamaged );
771 return update_flags::result_insert;
774 template <typename Func>
775 int try_update_node( Func funcUpdate, node_type * pNode )
779 node_scoped_lock l( pNode );
781 if ( pNode->version( memory_model::memory_order_relaxed ) == node_type::unlinked ) {
782 if ( c_RelaxedInsert )
784 m_stat.onUpdateUnlinked();
785 return update_flags::retry;
788 pOld = pNode->value( memory_model::memory_order_relaxed );
789 mapped_type * pVal = funcUpdate( pNode );
790 assert( pVal != nullptr );
791 pNode->m_pValue.store( pVal, memory_model::memory_order_relaxed ));
796 m_stat.onDisposeValue();
799 m_stat.onUpdateSuccess();
800 return update_flags::result_update;
803 bool try_unlink_locked( node_type * pParent, node_type * pNode )
805 // pParent and pNode must be locked
806 assert( !pParent->is_unlinked(memory_model::memory_order_relaxed) );
808 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
809 node_type * pParentRight = pParent->m_pRight.load( memory_model::memory_order_relaxed );
810 if ( pNode != pParentLeft && pNode != pParentRight ) {
811 // node is no longer a child of parent
815 assert( !pNode->is_unlinked());
816 assert( pParent == pNode->m_pParent.load(memory_model::memory_order_relaxed));
818 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
819 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
820 if ( pLeft != nullptr && pRight != nullptr ) {
821 // splicing is no longer possible
824 node_type * pSplice = pLeft ? pLeft : pRight;
826 if ( pParentLeft == pNode )
827 pParent->m_pLeft.store( pSplice, memory_model::memory_order_relaxed );
829 pParent->m_pRight.store( pSplice, memory_model::memory_order_relaxed );
832 pSplice->pParent.store( pParent, memory_model::memory_order_release );
834 // Mark the node as unlinked
835 pNode->version( node_type::unlinked, memory_model::memory_order_release );
836 dispose_node( pNode );
842 private: // rotations
844 int estimate_node_condition( node_type * pNode )
846 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
847 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
849 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
850 return unlink_required;
852 int h = pNode->height( memory_model::memory_order_relaxed );
853 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
854 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
856 int hNew = 1 + std::max( hL, hR );
857 int nBalance = hL - hR;
859 if ( nBalance < -1 || nBalance > 1 )
860 return rebalance_required;
862 return h != hNew ? hNew : nothing_required;
865 node_type * fix_height( node_type * pNode )
867 node_scoped_lock l( pNode );
868 return fix_height_locked( pNode );
871 node_type * fix_height_locked( node_type * pNode )
873 // pNode must be locked!!!
874 int h = estimate_node_condition( pNode );
876 case rebalance_required:
877 case unlink_required:
879 case nothing_required:
882 pNode->height( h, memory_model::memory_order_relaxed );
883 return pNode->m_pParent.load( memory_model::memory_order_relaxed );
887 void fix_height_and_rebalance( node_type * pNode )
889 while ( pNode && pNode->m_pParent.load( memory_model::memory_order_relaxed ) ) {
890 int nCond = estimate_node_condition( pNode );
891 if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_relaxed ) )
894 if ( nCond != unlink_required && nCond != rebalance_required )
895 pNode = fix_height( pNode );
897 node_type * pParent = pNode->m_pParent.load( memory_model::memry_order_relaxed );
898 assert( pParent != nullptr );
900 node_scoped_lock lp( pParent );
901 if ( !pParent->is_unlinked( memory_model::memory_order_relaxed )
902 && pNode->m_pParent.load( memory_model::memory_order_relaxed ) == pParent )
904 node_scoped_lock ln( pNode );
905 pNode = rebalance_locked( pParent, pNode );
912 node_type * rebalance_locked( node_type * pParent, node_type * pNode )
914 // pParent and pNode shlould be locked.
915 // Returns a damaged node, or nullptr if no more rebalancing is necessary
916 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
917 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
918 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
920 node_type * pLeft = pNode->m_pLeft.load( memory_model::memory_order_relaxed );
921 node_type * pRight = pNode->m_pRight.load( memory_model::memory_order_relaxed );
923 if ( (pLeft == nullptr || pRight == nullptr) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
924 if ( try_unlink_locked( pParent, pNode ))
925 return fix_height_locked( pParent );
927 // retry needed for pNode
932 int h = pNode->height( memory_model::memory_order_relaxed );
933 int hL = pLeft ? pLeft->height( memory_model::memory_order_relaxed ) : 0;
934 int hR = pRight ? pRight->height( memory_model::memory_order_relaxed ) : 0;
935 int hNew = 1 + std::max( hL, hR );
936 int balance = hL - hR;
939 return rebalance_to_right_locked( pParent, pNode, pLeft, hR );
940 else if ( balance < -1 )
941 return rebalance_to_left_locked( pParent, pNode, pRight, hL );
942 else if ( hNew != h ) {
943 pNode->height( hNew, memory_model::memory_order_relaxed );
945 // pParent is already locked
946 return fix_height_locked( pParent );
952 node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR )
954 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
955 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
956 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
958 // pParent and pNode is locked yet
959 // pNode->pLeft is too large, we will rotate-right.
960 // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft.
963 node_scoped_lock l( pLeft );
964 if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft )
965 return pNode; // retry for pNode
967 int hL = pLeft->height( memory_model::memory_order_relaxed );
969 return pNode; // retry
971 node_type * pLRight = pLeft->m_pRight.load( memory_model::memory_order_relaxed );
972 int hLR = pLRight ? pLRight->height( memory_model::memory_order_relaxed ) : 0;
973 node_type * pLLeft = pLeft->m_pLeft.load( memory_model::memory_order_relaxed );
974 int hLL = pLLeft ? pLLeft->height( memory_model::memory_order_relaxed ) : 0;
978 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
981 assert( pLRight != nullptr );
983 node_scoped_lock lr( pLRight );
984 if ( pLeft->m_pRight.load( memory_model::memory_order_relaxed ) != pLRight )
985 return pNode; // retry
987 hLR = pLRight->height( memory_model::memory_order_relaxed );
989 return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR );
991 node_type * pLRLeft = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
992 int hLRL = pLRLeft ? pLRLeft->height( memory_model::memory_order_relaxed ) : 0;
993 int balance = hLL - hLRL;
994 if ( balance >= -1 && balance <= 1 && !((hLL == 0 || hLRL == 0) && pLeft->value(memory_model::memory_order_relaxed) == nullptr) ) {
995 // nParent.child.left won't be damaged after a double rotation
996 return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL );
1000 // focus on pLeft, if necessary pNode will be balanced later
1001 return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL );
1006 node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL )
1008 assert( pNode->m_pParent.load( memory_model::memry_order_relaxed ) == pNode );
1009 assert( m_pParent->m_pLeft.load( memory_model::memory_order_relaxed ) == pNode
1010 || m_pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1012 // pParent and pNode is locked yet
1014 node_scoped_lock l( pRight );
1015 if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight )
1016 return pNode; // retry for pNode
1018 int hR = pRight->height( memory_model::memory_order_relaxed );
1019 if ( hL - hR >= -1 )
1020 return pNode; // retry
1022 node_type * pRLeft = pRight->m_pLeft.load( memory_model::memory_order_relaxed );
1023 int hRL = pRLeft > pRLeft->height( memory_model::memory_order_relaxed ) : 0;
1024 node_type * pRRight = pRight->m_pRight.load( memory_model::memory_order_relaxed );
1025 int hRR = pRRight ? pRRight->height( memory_model::memory_order_relaxed ) : 0;
1027 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1030 assert( pRLeft != nullptr );
1031 node_scoped_lock lrl( pRLeft );
1032 if ( pRight->m_pLeft.load( memory_model::memory_order_relaxed ) != pRLeft )
1033 return pNode; // retry
1035 hRL = pRLeft->height( memory_model::memory_order_relaxed );
1037 return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR );
1039 node_type * pRLRight = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1040 int hRLR = pRLRight ? pRLRight->height( memory_model::memory_order_relaxed ) : 0;
1041 int balance = hRR - hRLR;
1042 if ( b >= -1 && b <= 1 && !((hRR == 0 || hRLR == 0) && pRight->value( memory_odel::memory_order_relaxed ) == nullptr)
1043 return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR );
1045 return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR );
1049 static void begin_change( node_type * pNode, version_type version )
1051 pNode->version( version | node_type::shrinking, memory_model::memory_order_release );
1053 static void end_change( node_type * pNode, version_type version )
1055 // Clear shrinking and unlinked flags and increment version
1056 pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release );
1059 node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR )
1061 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1062 node_type * pParentLeft = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1064 begin_change( pNode, nodeVersion );
1066 pNode->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1067 if ( pLRight != nullptr )
1068 pLRight->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1070 pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1071 pNode->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1073 if ( pParentLeft == pNode )
1074 pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1076 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1077 pParent->m_pRight.store( pLeft, memory_mdel::memory_order_relaxed );
1079 pLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1081 // fix up heights links
1082 int hNode = 1 + std::max( hLR, hR );
1083 pNode->height( hNode, memory_model::memory_order_relaxed );
1084 pLeft->height( 1 + std::max( hLL, hNode ), memory_model::memory_order_relaxed );
1086 end_change( pNode, nodeVersion );
1088 // We have damaged pParent, pNode (now parent.child.right), and pLeft (now
1089 // parent.child). pNode is the deepest. Perform as many fixes as we can
1090 // with the locks we've got.
1092 // We've already fixed the height for pNode, but it might still be outside
1093 // our allowable balance range. In that case a simple fix_height_locked()
1095 int nodeBalance = hLR - hR;
1096 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1097 // we need another rotation at pNode
1101 // we've fixed balance and height damage for pNode, now handle
1102 // extra-routing node damage
1103 if ( (pLRight == nullptr || hR == 0) && pNode->value(memory_model::memory_order_relaxed) == nullptr ) {
1104 // we need to remove pNode and then repair
1108 // we've already fixed the height at pLeft, do we need a rotation here?
1109 int leftBalance = hLL - hNode;
1110 if ( leftBalance < -1 || leftBalance > 1 )
1113 // pLeft might also have routing node damage (if pLeft.left was null)
1114 if ( hLL == 0 && pLeft->value( memory_model::memory_order_relaxed ) == nullptr )
1117 // try to fix the parent height while we've still got the lock
1118 return fix_height_locked( pParent );
1121 node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR )
1123 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1124 node_type * pParentLeft = pParent->m_pLeft.load( memory_odel::memory_order_relaxed );
1126 begin_change( pNode, nodeVersion );
1128 // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode
1129 pNode->m_pRight.store( pRLeft, memory_nodel::memory_order_relaxed );
1130 if ( pRLeft != nullptr )
1131 pRLeft->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1133 pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1134 pNode->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1136 if ( pParentLeft == pNode )
1137 pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed );
1139 assert( pParent->m_pRight.load( memory_odel::memory_order_relaxed ) == pNode );
1140 pParent->m_pRight.store( pRight, memory_model::memory_model_relaxed );
1142 pRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1145 int hNode = 1 + std::max( hL, hRL );
1146 pNode->height( hNode, memory_model::memory_order_relaxed );
1147 pRight->height( 1 + std::max( hNode, hRR ), memory_model::memory_order_relaxed );
1149 end_change( pNode, nodeVersion );
1151 int nodeBalance = hRL - hL;
1152 if ( nodeBalance < -1 || nodeBalance > 1 )
1155 if ( (pRLeft == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1158 int rightBalance = hRR - hNode;
1159 if ( rightBalance < -1 || rightBalance > 1 )
1162 if ( hRR == 0 && pRight->value( memory_model::memory_order_relaxed ) == nullptr )
1165 return fix_height_locked( pParent );
1168 node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL )
1170 typename node_type::value_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1171 typename node_type::value_type leftVersion = pLeft->version( memory_model::memory_order_relaxed );
1173 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1174 node_type * pLRL = pLRight->m_pLeft.load( memory_model::memory_order_relaxed );
1175 node_type * pLRR = pLRight->m_pRight.load( memory_model::memory_order_relaxed );
1176 int hLRR = pLRR ? pLRR->hight( memory_model::memory_order_relaxed ) : 0;
1178 begin_change( pNode, nodeVersion );
1179 begin_change( pLeft, leftVersion );
1181 // fix up pNode links, careful about the order!
1182 pNode->m_pLeft.store( pLRR, memory_model::memory_order_relaxed );
1183 if ( pLRR != nullptr )
1184 pLRR->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1186 pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed );
1187 if ( pLRL != nullptr )
1188 pLRL->m_pParent.store( pLeft, memory_model::memory_order_relaxed );
1190 pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed );
1191 pLeft->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1192 pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed );
1193 pNode->m_pParent.store( pLRight, memory_model::memory_order_relaxed );
1196 pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed );
1198 assert( Parent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1199 pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed );
1201 pLRight->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1204 int hNode = 1 + std::max( hLRR, hR );
1205 pNode->height( hNode, memory_model::memory_order_relaxed );
1206 int hLeft = 1 + std::max( hLL, hLRL );
1207 pLeft->height( hLeft, memory_model::memory_order_relaxed );
1208 pLRight->height( 1 + std::max( hLeft, hNode ), memory_model::memory_order_relaxed );
1210 end_change( pNode, nodeVersion );
1211 end_change( pLeft, leftVersion );
1213 // caller should have performed only a single rotation if pLeft was going
1214 // to end up damaged
1215 assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 );
1216 assert( !((hLL == 0 || pLRL == nullptr) && pLeft->value( memory_model::memory_order_relaxed ) == nullptr ));
1218 // We have damaged pParent, pLR (now parent.child), and pNode (now
1219 // parent.child.right). pNode is the deepest. Perform as many fixes as we
1220 // can with the locks we've got.
1222 // We've already fixed the height for pNode, but it might still be outside
1223 // our allowable balance range. In that case a simple fix_height_locked()
1225 int nodeBalance = hLRR - hR;
1226 if ( nodeBalance < -1 || nodeBalance > 1 ) {
1227 // we need another rotation at pNode
1231 // pNode might also be damaged by being an unnecessary routing node
1232 if ( (pLRR == nullptr || hR == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr ) {
1233 // repair involves splicing out pNode and maybe more rotations
1237 // we've already fixed the height at pLRight, do we need a rotation here?
1238 final int balanceLR = hLeft - hNode;
1239 if ( balanceLR < -1 || balanceLR > 1 )
1242 // try to fix the parent height while we've still got the lock
1243 return fix_height_locked( pParent );
1246 node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR )
1248 version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed );
1249 version_type rightVersion = pRight->version( memory_model::memory_order_relaxed );
1251 node_type * pPL = pParent->m_pLeft.load( memory_model::memory_order_relaxed );
1252 node_type * pRLL = pRLeft->m_pLeft.load( memory_model::memory_order_relaxed );
1253 node_type * pRLR = pRLeft->m_pRight.load( memory_model::memory_order_relaxed );
1254 int hRLL = pRLL ? pRLL->height( memory_model::memory_order_relaxed ) : 0;
1256 begin_change( pNode, nodeVersion );
1257 begin_change( pRight, ghtVersion );
1259 // fix up pNode links, careful about the order!
1260 pNode->m_pRight.store( pRLL, memory_model::memory_order_relaxed );
1261 if ( pRLL != nullptr )
1262 pRLL->m_pParent.store( pNode, memory_model::memory_order_relaxed );
1264 pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed );
1265 if ( pRLR != nullptr )
1266 pRLR->m_pParent.store( pRight, memory_model::memory_order_relaxed );
1268 pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed );
1269 pRight->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1270 pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed );
1271 pNode->m_pParent.store( pRLeft, memory_model::memory_order_relaxed );
1274 pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed );
1276 assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode );
1277 pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed );
1279 pRLeft->m_pParent.store( pParent, memory_model::memory_order_relaxed );
1282 int hNode = 1 + std::max( hL, hRLL );
1283 pNode->height( hNode, memory_model::memory_order_relaxed );
1284 int hRight = 1 + std::max( hRLR, hRR );
1285 pRight->height( hRight, memory_model::memory_order_relaxed );
1286 pRLeft->height( 1 + std::max( hNode, hRight ), memory_model::memory_order_relaxed );
1288 end_change( pNode, nodeVersion );
1289 end_change( pRight, rightVersion );
1291 assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 );
1293 int nodeBalance = hRLL - hL;
1294 if ( nodeBalance < -1 || nodeBalance > 1 )
1296 if ( (pRLL == nullptr || hL == 0) && pNode->value( memory_model::memory_order_relaxed ) == nullptr )
1299 int balRL = hRight - hNode;
1300 if ( balRL < -1 || balRL > 1 )
1303 return fix_height_locked( pParent );
1308 }} // namespace cds::container
1310 #endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H