3 #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H
4 #define __CDS_INTRUSIVE_SKIP_LIST_RCU_H
8 #include <cds/intrusive/details/skip_list_base.h>
9 #include <cds/opt/compare.h>
10 #include <cds/urcu/details/check_deadlock.h>
11 #include <cds/details/binary_functor_wrapper.h>
12 #include <cds/urcu/exempt_ptr.h>
15 namespace cds { namespace intrusive {
20 template <class RCU, typename Tag>
21 class node< cds::urcu::gc< RCU >, Tag >
24 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
25 typedef Tag tag ; ///< tag
28 // bit 0 - the item is logically deleted
29 // bit 1 - the item is extracted (only for level 0)
30 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
31 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
32 typedef atomic_marked_ptr tower_item_type;
35 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
37 node * m_pDelChain; ///< Deleted node chain (local for a thread)
39 bool volatile m_bLinked;
40 bool volatile m_bUnlinked;
43 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
44 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
47 /// Constructs a node of height 1 (a bottom-list node)
50 , m_pDelChain( nullptr )
53 , m_bUnlinked( false )
56 , m_arrNext( nullptr )
62 assert( !m_bLinked || m_bUnlinked );
66 /// Constructs a node of height \p nHeight
67 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
69 assert( nHeight > 0 );
70 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
71 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
74 m_arrNext = nextTower;
78 atomic_marked_ptr * release_tower()
80 atomic_marked_ptr * pTower = m_arrNext;
86 atomic_marked_ptr * get_tower() const
93 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
94 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
97 /// Access to element of next pointer array
98 atomic_marked_ptr& next( unsigned int nLevel )
100 assert( nLevel < height() );
101 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
103 return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
106 /// Access to element of next pointer array (const version)
107 atomic_marked_ptr const& next( unsigned int nLevel ) const
109 assert( nLevel < height() );
110 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
112 return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
115 /// Access to element of next pointer array (same as \ref next function)
116 atomic_marked_ptr& operator[]( unsigned int nLevel )
118 return next( nLevel );
121 /// Access to element of next pointer array (same as \ref next function)
122 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
124 return next( nLevel );
127 /// Height of the node
128 unsigned int height() const
133 /// Clears internal links
136 assert( m_arrNext == nullptr );
137 m_pNext.store( marked_ptr(), atomics::memory_order_release );
138 m_pDelChain = nullptr;
141 bool is_cleared() const
143 return m_pNext == atomic_marked_ptr()
144 && m_arrNext == nullptr
148 } // namespace skip_list
152 namespace skip_list { namespace details {
154 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
155 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
158 typedef cds::urcu::gc< RCU > gc;
159 typedef NodeTraits node_traits;
160 typedef BackOff back_off;
161 typedef typename node_traits::node_type node_type;
162 typedef typename node_traits::value_type value_type;
163 static bool const c_isConst = IsConst;
165 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
168 typedef typename node_type::marked_ptr marked_ptr;
169 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
176 // RCU should be locked before iterating!!!
177 assert( gc::is_locked() );
182 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
183 // Current node is marked as deleted. So, its next pointer can point to anything
184 // In this case we interrupt our iteration and returns end() iterator.
189 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
190 node_type * pp = p.ptr();
192 // p is marked as deleted. Spin waiting for physical removal
196 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
197 // p is marked as deleted. Spin waiting for physical removal
207 public: // for internal use only!!!
208 iterator( node_type& refHead )
211 // RCU should be locked before iterating!!!
212 assert( gc::is_locked() );
217 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
223 node_type * pp = p.ptr();
224 // Logically deleted node is marked from highest level
225 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
238 // RCU should be locked before iterating!!!
239 assert( gc::is_locked() );
242 iterator( iterator const& s)
243 : m_pNode( s.m_pNode )
245 // RCU should be locked before iterating!!!
246 assert( gc::is_locked() );
249 value_type * operator ->() const
251 assert( m_pNode != nullptr );
252 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
254 return node_traits::to_value_ptr( m_pNode );
257 value_ref operator *() const
259 assert( m_pNode != nullptr );
260 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
262 return *node_traits::to_value_ptr( m_pNode );
266 iterator& operator ++()
272 iterator& operator = (const iterator& src)
274 m_pNode = src.m_pNode;
278 template <typename Bkoff, bool C>
279 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
281 return m_pNode == i.m_pNode;
283 template <typename Bkoff, bool C>
284 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
286 return !( *this == i );
289 }} // namespace skip_list::details
292 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
293 /** @ingroup cds_intrusive_map
294 @anchor cds_intrusive_SkipListSet_rcu
296 The implementation of well-known probabilistic data structure called skip-list
297 invented by W.Pugh in his papers:
298 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
299 - [1990] W.Pugh A Skip List Cookbook
301 A skip-list is a probabilistic data structure that provides expected logarithmic
302 time search without the need of rebalance. The skip-list is a collection of sorted
303 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
304 Each list has a level, ranging from 0 to 32. The bottom-level list contains
305 all the nodes, and each higher-level list is a sublist of the lower-level lists.
306 Each node is created with a random top level (with a random height), and belongs
307 to all lists up to that level. The probability that a node has the height 1 is 1/2.
308 The probability that a node has the height N is 1/2 ** N (more precisely,
309 the distribution depends on an random generator provided, but our generators
312 The lock-free variant of skip-list is implemented according to book
313 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
314 chapter 14.4 "A Lock-Free Concurrent Skiplist".
316 <b>Template arguments</b>:
317 - \p RCU - one of \ref cds_urcu_gc "RCU type"
318 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
319 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
320 - \p Traits - set traits, default is \p skip_list::traits
321 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
322 instead of \p Traits template argument.
324 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
325 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
329 The class supports a forward iterator (\ref iterator and \ref const_iterator).
330 The iteration is ordered.
332 You may iterate over skip-list set items only under RCU lock.
333 Only in this case the iterator is thread-safe since
334 while RCU is locked any set's item cannot be reclaimed.
336 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
337 (i.e. inserting, erasing and so on) is not possible.
339 @warning The iterator object cannot be passed between threads.
341 Example how to use skip-list set iterators:
343 // First, you should include the header for RCU type you have chosen
344 #include <cds/urcu/general_buffered.h>
345 #include <cds/intrusive/skip_list_rcu.h>
347 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
353 // Traits for your skip-list.
354 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
355 struct my_traits: public cds::intrusive::skip_list::traits
359 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
361 my_skiplist_set theSet;
367 // Apply RCU locking manually
368 typename rcu_type::scoped_lock sl;
370 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
374 // rcu_type::scoped_lock destructor releases RCU lock implicitly
378 The iterator class supports the following minimalistic interface:
385 iterator( iterator const& s);
387 value_type * operator ->() const;
388 value_type& operator *() const;
391 iterator& operator ++();
394 iterator& operator = (const iterator& src);
396 bool operator ==(iterator const& i ) const;
397 bool operator !=(iterator const& i ) const;
400 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
404 You should incorporate skip_list::node into your struct \p T and provide
405 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
406 define a struct based on \p skip_list::traits.
408 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
410 // First, you should include the header for RCU type you have chosen
411 #include <cds/urcu/general_buffered.h>
413 // Include RCU skip-list specialization
414 #include <cds/intrusive/skip_list_rcu.h>
417 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
419 // Data stored in skip list
420 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
429 // my_data compare functor
431 int operator()( const my_data& d1, const my_data& d2 )
433 return d1.strKey.compare( d2.strKey );
436 int operator()( const my_data& d, const std::string& s )
438 return d.strKey.compare(s);
441 int operator()( const std::string& s, const my_data& d )
443 return s.compare( d.strKey );
448 struct my_traits: public cds::intrusive::skip_list::traits
450 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
451 typedef my_data_cmp compare;
454 // Declare skip-list set type
455 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
458 Equivalent option-based code:
460 #include <cds/urcu/general_buffered.h>
461 #include <cds/intrusive/skip_list_rcu.h>
463 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
472 // Declare option-based skip-list set
473 typedef cds::intrusive::SkipListSet< rcu_type
475 , typename cds::intrusive::skip_list::make_traits<
476 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
477 ,cds::intrusive::opt::compare< my_data_cmp >
486 #ifdef CDS_DOXYGEN_INVOKED
487 ,typename Traits = skip_list::traits
492 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
495 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
496 typedef T value_type; ///< type of value stored in the skip-list
497 typedef Traits traits; ///< Traits template parameter
499 typedef typename traits::hook hook; ///< hook type
500 typedef typename hook::node_type node_type; ///< node type
502 # ifdef CDS_DOXYGEN_INVOKED
503 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
505 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
508 typedef typename traits::disposer disposer; ///< disposer
509 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
511 typedef typename traits::item_counter item_counter; ///< Item counting policy used
512 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
513 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
514 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
515 typedef typename traits::back_off back_off; ///< Back-off strategy
516 typedef typename traits::stat stat; ///< internal statistics type
517 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
518 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
519 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
522 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
524 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
525 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
527 static unsigned int const c_nMaxHeight = std::conditional<
528 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
529 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
530 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
534 static unsigned int const c_nMinHeight = 5;
538 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
539 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
543 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
545 typedef typename std::conditional<
546 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
547 ,intrusive_node_builder
548 ,typename traits::internal_node_builder
549 >::type node_builder;
551 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
554 node_type * pPrev[ c_nMaxHeight ];
555 node_type * pSucc[ c_nMaxHeight ];
556 node_type * pNext[ c_nMaxHeight ];
559 node_type * pDelChain;
562 : pDelChain( nullptr )
567 assert( pDelChain == nullptr );
572 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
576 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
578 item_counter m_ItemCounter; ///< item counter
579 random_level_generator m_RandomLevelGen; ///< random level generator instance
580 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
581 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
582 mutable stat m_Stat; ///< internal statistics
586 unsigned int random_level()
588 // Random generator produces a number from range [0..31]
589 // We need a number from range [1..32]
590 return m_RandomLevelGen() + 1;
593 template <typename Q>
594 node_type * build_node( Q v )
596 return node_builder::make_tower( v, m_RandomLevelGen );
599 static void dispose_node( value_type * pVal )
603 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
609 void operator()( value_type * pVal )
611 dispose_node( pVal );
617 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
622 bool is_extracted( marked_node_ptr const p ) const
624 return (p.bits() & 2) != 0;
627 template <typename Q, typename Compare >
628 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
630 assert( gc::is_locked() );
633 marked_node_ptr pSucc;
634 marked_node_ptr pCur;
638 pPred = m_Head.head();
640 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
643 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
645 // pCur.bits() means that pPred is logically deleted
649 if ( pCur.ptr() == nullptr ) {
650 // end of the list at level nLevel - goto next level
654 // pSucc contains deletion mark for pCur
655 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
657 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
660 if ( pSucc.bits() ) {
661 // pCur is marked, i.e. logically deleted.
662 marked_node_ptr p( pCur.ptr() );
663 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
664 memory_model::memory_order_release, atomics::memory_order_relaxed ))
668 pCur->m_bUnlinked = true;
671 if ( !is_extracted( pSucc )) {
672 // We cannot free the node at this moment since RCU is locked
673 // Link deleted nodes to a chain to free later
674 link_for_remove( pos, pCur.ptr() );
675 m_Stat.onEraseWhileFind();
678 m_Stat.onExtractWhileFind();
685 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
688 else if ( nCmp == 0 && bStopIfFound )
696 pos.pPrev[ nLevel ] = pPred;
697 pos.pSucc[ nLevel ] = pCur.ptr();
704 pos.pCur = pCur.ptr();
705 return pCur.ptr() && nCmp == 0;
708 bool find_min_position( position& pos )
710 assert( gc::is_locked() );
713 marked_node_ptr pSucc;
714 marked_node_ptr pCur;
717 pPred = m_Head.head();
719 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
721 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
722 // pCur.bits() means that pPred is logically deleted
723 // head cannot be deleted
724 assert( pCur.bits() == 0 );
728 // pSucc contains deletion mark for pCur
729 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
731 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
734 if ( pSucc.bits() ) {
735 // pCur is marked, i.e. logically deleted.
736 marked_node_ptr p( pCur.ptr() );
737 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
738 memory_model::memory_order_release, atomics::memory_order_relaxed ))
742 pCur->m_bUnlinked = true;
745 if ( !is_extracted( pSucc )) {
746 // We cannot free the node at this moment since RCU is locked
747 // Link deleted nodes to a chain to free later
748 link_for_remove( pos, pCur.ptr() );
749 m_Stat.onEraseWhileFind();
752 m_Stat.onExtractWhileFind();
761 pos.pPrev[ nLevel ] = pPred;
762 pos.pSucc[ nLevel ] = pCur.ptr();
764 return (pos.pCur = pCur.ptr()) != nullptr;
767 bool find_max_position( position& pos )
769 assert( gc::is_locked() );
772 marked_node_ptr pSucc;
773 marked_node_ptr pCur;
776 pPred = m_Head.head();
778 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
781 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
783 // pCur.bits() means that pPred is logically deleted
787 if ( pCur.ptr() == nullptr ) {
788 // end of the list at level nLevel - goto next level
792 // pSucc contains deletion mark for pCur
793 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
795 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
798 if ( pSucc.bits() ) {
799 // pCur is marked, i.e. logically deleted.
800 marked_node_ptr p( pCur.ptr() );
801 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
802 memory_model::memory_order_release, atomics::memory_order_relaxed ))
806 pCur->m_bUnlinked = true;
809 if ( !is_extracted( pSucc )) {
810 // We cannot free the node at this moment since RCU is locked
811 // Link deleted nodes to a chain to free later
812 link_for_remove( pos, pCur.ptr() );
813 m_Stat.onEraseWhileFind();
816 m_Stat.onExtractWhileFind();
831 pos.pPrev[ nLevel ] = pPred;
832 pos.pSucc[ nLevel ] = pCur.ptr();
835 return (pos.pCur = pCur.ptr()) != nullptr;
838 template <typename Func>
839 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
841 assert( gc::is_locked() );
843 unsigned int nHeight = pNode->height();
844 pNode->clear_tower();
847 marked_node_ptr p( pos.pSucc[0] );
848 pNode->next( 0 ).store( p, memory_model::memory_order_release );
849 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
853 pNode->m_bLinked = true;
858 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
861 marked_node_ptr q( pos.pSucc[ nLevel ]);
862 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
863 // pNode has been marked as removed while we are inserting it
866 m_Stat.onLogicDeleteWhileInsert();
870 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
873 // Renew insert position
874 m_Stat.onRenewInsertPosition();
875 if ( !find_position( val, pos, key_comparator(), false )) {
876 // The node has been deleted while we are inserting it
877 m_Stat.onNotFoundWhileInsert();
885 static void link_for_remove( position& pos, node_type * pDel )
887 assert( pDel->m_pDelChain == nullptr );
889 pDel->m_pDelChain = pos.pDelChain;
890 pos.pDelChain = pDel;
893 template <typename Func>
894 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
896 assert( pDel != nullptr );
897 assert( gc::is_locked() );
899 marked_node_ptr pSucc;
901 // logical deletion (marking)
902 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
903 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
906 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
913 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
918 int const nMask = bExtract ? 3 : 1;
919 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
921 f( *node_traits::to_value_ptr( pDel ));
926 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
927 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
928 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
929 memory_model::memory_order_release, atomics::memory_order_relaxed) )
932 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
934 m_Stat.onSlowExtract();
936 m_Stat.onSlowErase();
938 assert( pDel->m_bUnlinked );
945 pDel->m_bUnlinked = true;
948 // We cannot free the node at this moment since RCU is locked
949 // Link deleted nodes to a chain to free later
950 link_for_remove( pos, pDel );
951 m_Stat.onFastErase();
954 m_Stat.onFastExtract();
961 enum finsd_fastpath_result {
963 find_fastpath_not_found,
966 template <typename Q, typename Compare, typename Func>
967 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
970 marked_node_ptr pCur;
971 marked_node_ptr pSucc;
972 marked_node_ptr pNull;
976 pPred = m_Head.head();
977 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
978 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
982 while ( pCur != pNull ) {
984 // Wait until pCur is removed
985 unsigned int nAttempt = 0;
986 while ( pCur.bits() && nAttempt++ < 16 ) {
988 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
993 // Maybe, we are on deleted node sequence
994 // Abort searching, try slow-path
995 return find_fastpath_abort;
1000 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1003 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1005 else if ( nCmp == 0 ) {
1007 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1008 return find_fastpath_found;
1010 else // pCur > val - go down
1016 return find_fastpath_not_found;
1019 template <typename Q, typename Compare, typename Func>
1020 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1022 if ( find_position( val, pos, cmp, true )) {
1023 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1025 f( *node_traits::to_value_ptr( pos.pCur ), val );
1032 template <typename Q, typename Compare, typename Func>
1033 bool do_find_with( Q& val, Compare cmp, Func f )
1040 switch ( find_fastpath( val, cmp, f )) {
1041 case find_fastpath_found:
1042 m_Stat.onFindFastSuccess();
1044 case find_fastpath_not_found:
1045 m_Stat.onFindFastFailed();
1051 if ( find_slowpath( val, cmp, f, pos )) {
1052 m_Stat.onFindSlowSuccess();
1056 m_Stat.onFindSlowFailed();
1065 template <typename Q, typename Compare, typename Func>
1066 bool do_erase( Q const& val, Compare cmp, Func f )
1068 check_deadlock_policy::check();
1076 if ( !find_position( val, pos, cmp, false ) ) {
1077 m_Stat.onEraseFailed();
1081 node_type * pDel = pos.pCur;
1082 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1084 unsigned int nHeight = pDel->height();
1085 if ( try_remove_at( pDel, pos, f, false )) {
1087 m_Stat.onRemoveNode( nHeight );
1088 m_Stat.onEraseSuccess();
1092 m_Stat.onEraseFailed();
1098 dispose_chain( pos );
1102 template <typename Q, typename Compare>
1103 value_type * do_extract_key( Q const& key, Compare cmp )
1105 // RCU should be locked!!!
1106 assert( gc::is_locked() );
1111 if ( !find_position( key, pos, cmp, false ) ) {
1112 m_Stat.onExtractFailed();
1117 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1119 unsigned int const nHeight = pDel->height();
1121 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1123 m_Stat.onRemoveNode( nHeight );
1124 m_Stat.onExtractSuccess();
1127 m_Stat.onExtractFailed();
1133 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1136 template <typename Q>
1137 value_type * do_extract( Q const& key )
1139 check_deadlock_policy::check();
1140 value_type * pDel = nullptr;
1143 pDel = do_extract_key( key, key_comparator() );
1150 template <typename Q, typename Less>
1151 value_type * do_extract_with( Q const& key, Less pred )
1154 check_deadlock_policy::check();
1155 value_type * pDel = nullptr;
1159 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>() );
1166 value_type * do_extract_min()
1168 assert( !gc::is_locked() );
1176 if ( !find_min_position( pos ) ) {
1177 m_Stat.onExtractMinFailed();
1182 unsigned int const nHeight = pDel->height();
1184 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1186 m_Stat.onRemoveNode( nHeight );
1187 m_Stat.onExtractMinSuccess();
1190 m_Stat.onExtractMinFailed();
1199 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1202 value_type * do_extract_max()
1204 assert( !gc::is_locked() );
1212 if ( !find_max_position( pos ) ) {
1213 m_Stat.onExtractMaxFailed();
1218 unsigned int const nHeight = pDel->height();
1220 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1222 m_Stat.onRemoveNode( nHeight );
1223 m_Stat.onExtractMaxSuccess();
1226 m_Stat.onExtractMaxFailed();
1235 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1238 void increase_height( unsigned int nHeight )
1240 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1241 if ( nCur < nHeight )
1242 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1245 class deferred_list_iterator
1249 explicit deferred_list_iterator( node_type * p )
1252 deferred_list_iterator()
1256 cds::urcu::retired_ptr operator *() const
1258 return cds::urcu::retired_ptr( node_traits::to_value_ptr(pCur), dispose_node );
1263 pCur = pCur->m_pDelChain;
1266 bool operator ==( deferred_list_iterator const& i ) const
1268 return pCur == i.pCur;
1270 bool operator !=( deferred_list_iterator const& i ) const
1272 return !operator ==( i );
1276 void dispose_chain( node_type * pHead )
1278 // RCU should NOT be locked
1279 check_deadlock_policy::check();
1281 gc::batch_retire( deferred_list_iterator( pHead ), deferred_list_iterator() );
1284 void dispose_chain( position& pos )
1286 // RCU should NOT be locked
1287 check_deadlock_policy::check();
1289 // Delete local chain
1290 if ( pos.pDelChain ) {
1291 dispose_chain( pos.pDelChain );
1292 pos.pDelChain = nullptr;
1295 // Delete deferred chain
1299 void dispose_deferred()
1301 dispose_chain( m_pDeferredDelChain.exchange( nullptr, memory_model::memory_order_acq_rel ) );
1304 void defer_chain( position& pos )
1306 if ( pos.pDelChain ) {
1307 node_type * pHead = pos.pDelChain;
1308 node_type * pTail = pHead;
1309 while ( pTail->m_pDelChain )
1310 pTail = pTail->m_pDelChain;
1312 node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
1314 pTail->m_pDelChain = pDeferList;
1315 } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ));
1317 pos.pDelChain = nullptr;
1324 /// Default constructor
1326 : m_Head( c_nMaxHeight )
1327 , m_nHeight( c_nMinHeight )
1328 , m_pDeferredDelChain( nullptr )
1330 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1332 // Barrier for head node
1333 atomics::atomic_thread_fence( memory_model::memory_order_release );
1336 /// Clears and destructs the skip-list
1344 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1346 /// Const iterator type
1347 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1349 /// Returns a forward iterator addressing the first element in a set
1352 return iterator( *m_Head.head() );
1355 /// Returns a forward const iterator addressing the first element in a set
1356 const_iterator begin() const
1358 return const_iterator( *m_Head.head() );
1361 /// Returns a forward const iterator addressing the first element in a set
1362 const_iterator cbegin() const
1364 return const_iterator( *m_Head.head() );
1367 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1373 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1374 const_iterator end() const
1376 return const_iterator();
1379 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1380 const_iterator cend() const
1382 return const_iterator();
1386 /// Inserts new node
1388 The function inserts \p val in the set if it does not contain
1389 an item with key equal to \p val.
1391 The function applies RCU lock internally.
1393 Returns \p true if \p val is placed into the set, \p false otherwise.
1395 bool insert( value_type& val )
1397 return insert( val, []( value_type& ) {} );
1400 /// Inserts new node
1402 This function is intended for derived non-intrusive containers.
1404 The function allows to split creating of new item into two part:
1405 - create item with key only
1406 - insert new item into the set
1407 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1409 The functor signature is:
1411 void func( value_type& val );
1413 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1414 \p val no any other changes could be made on this set's item by concurrent threads.
1415 The user-defined functor is called only if the inserting is success.
1417 RCU \p synchronize method can be called. RCU should not be locked.
1419 template <typename Func>
1420 bool insert( value_type& val, Func f )
1422 check_deadlock_policy::check();
1428 node_type * pNode = node_traits::to_node_ptr( val );
1429 scoped_node_ptr scp( pNode );
1430 unsigned int nHeight = pNode->height();
1431 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1432 bool bTowerMade = false;
1438 bool bFound = find_position( val, pos, key_comparator(), true );
1440 // scoped_node_ptr deletes the node tower if we create it
1444 m_Stat.onInsertFailed();
1450 build_node( pNode );
1451 nHeight = pNode->height();
1456 if ( !insert_at_position( val, pNode, pos, f )) {
1457 m_Stat.onInsertRetry();
1461 increase_height( nHeight );
1463 m_Stat.onAddNode( nHeight );
1464 m_Stat.onInsertSuccess();
1471 dispose_chain( pos );
1476 /// Ensures that the \p val exists in the set
1478 The operation performs inserting or changing data with lock-free manner.
1480 If the item \p val is not found in the set, then \p val is inserted into the set.
1481 Otherwise, the functor \p func is called with item found.
1482 The functor signature is:
1484 void func( bool bNew, value_type& item, value_type& val );
1487 - \p bNew - \p true if the item has been inserted, \p false otherwise
1488 - \p item - item of the set
1489 - \p val - argument \p val passed into the \p %ensure() function
1490 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1491 refer to the same thing.
1493 The functor can change non-key fields of the \p item; however, \p func must guarantee
1494 that during changing no any other modifications could be made on this item by concurrent threads.
1496 RCU \p synchronize method can be called. RCU should not be locked.
1498 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1499 \p second is \p true if new item has been added or \p false if the item with \p key
1500 already is in the set.
1502 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1504 template <typename Func>
1505 std::pair<bool, bool> ensure( value_type& val, Func func )
1507 check_deadlock_policy::check();
1510 std::pair<bool, bool> bRet( true, false );
1513 node_type * pNode = node_traits::to_node_ptr( val );
1514 scoped_node_ptr scp( pNode );
1515 unsigned int nHeight = pNode->height();
1516 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1517 bool bTowerMade = false;
1522 bool bFound = find_position( val, pos, key_comparator(), true );
1524 // scoped_node_ptr deletes the node tower if we create it before
1528 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1529 m_Stat.onEnsureExist();
1534 build_node( pNode );
1535 nHeight = pNode->height();
1540 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1541 m_Stat.onInsertRetry();
1545 increase_height( nHeight );
1548 m_Stat.onAddNode( nHeight );
1549 m_Stat.onEnsureNew();
1555 dispose_chain( pos );
1560 /// Unlinks the item \p val from the set
1562 The function searches the item \p val in the set and unlink it from the set
1563 if it is found and is equal to \p val.
1565 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1566 and deletes the item found. \p %unlink() searches an item by key and deletes it
1567 only if \p val is an item of that set, i.e. the pointer to item found
1568 is equal to <tt> &val </tt>.
1570 RCU \p synchronize method can be called. RCU should not be locked.
1572 The \ref disposer specified in \p Traits class template parameter is called
1573 by garbage collector \p GC asynchronously.
1575 The function returns \p true if success and \p false otherwise.
1577 bool unlink( value_type& val )
1579 check_deadlock_policy::check();
1587 if ( !find_position( val, pos, key_comparator(), false ) ) {
1588 m_Stat.onUnlinkFailed();
1592 node_type * pDel = pos.pCur;
1593 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1595 unsigned int nHeight = pDel->height();
1597 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1599 m_Stat.onRemoveNode( nHeight );
1600 m_Stat.onUnlinkSuccess();
1604 m_Stat.onUnlinkFailed();
1610 dispose_chain( pos );
1615 /// Extracts the item from the set with specified \p key
1616 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1617 The function searches an item with key equal to \p key in the set,
1618 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1619 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1621 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1623 RCU \p synchronize method can be called. RCU should NOT be locked.
1624 The function does not call the disposer for the item found.
1625 The disposer will be implicitly invoked when the returned object is destroyed or when
1626 its \p release() member function is called.
1629 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1633 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1638 // Dispose returned item.
1643 template <typename Q>
1644 exempt_ptr extract( Q const& key )
1646 return exempt_ptr( do_extract( key ));
1649 /// Extracts the item from the set with comparing functor \p pred
1651 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_extract "extract(exempt_ptr&, Q const&)"
1652 but \p pred predicate is used for key comparing.
1653 \p Less has the interface like \p std::less.
1654 \p pred must imply the same element order as the comparator used for building the set.
1656 template <typename Q, typename Less>
1657 exempt_ptr extract_with( Q const& key, Less pred )
1659 return exempt_ptr( do_extract_with( key, pred ));
1662 /// Extracts an item with minimal key from the list
1664 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1665 If the skip-list is empty the function returns an empty \p exempt_ptr.
1667 RCU \p synchronize method can be called. RCU should NOT be locked.
1668 The function does not call the disposer for the item found.
1669 The disposer will be implicitly invoked when the returned object is destroyed or when
1670 its \p release() member function is manually called.
1673 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1677 typename skip_list::exempt_ptr ep(theList.extract_min());
1682 // Dispose returned item.
1687 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1688 It means that the function gets leftmost item and tries to unlink it.
1689 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1690 So, the function returns the item with minimum key at the moment of list traversing.
1692 exempt_ptr extract_min()
1694 return exempt_ptr( do_extract_min());
1697 /// Extracts an item with maximal key from the list
1699 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1700 If the skip-list is empty the function returns an empty \p exempt_ptr.
1702 RCU \p synchronize method can be called. RCU should NOT be locked.
1703 The function does not call the disposer for the item found.
1704 The disposer will be implicitly invoked when the returned object is destroyed or when
1705 its \p release() member function is manually called.
1708 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1712 typename skip_list::exempt_ptr ep( theList.extract_max() );
1716 // Dispose returned item.
1721 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1722 It means that the function gets rightmost item and tries to unlink it.
1723 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1724 So, the function returns the item with maximum key at the moment of list traversing.
1726 exempt_ptr extract_max()
1728 return exempt_ptr( do_extract_max());
1731 /// Deletes the item from the set
1732 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1733 The function searches an item with key equal to \p key in the set,
1734 unlinks it from the set, and returns \p true.
1735 If the item with key equal to \p key is not found the function return \p false.
1737 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1739 RCU \p synchronize method can be called. RCU should not be locked.
1741 template <typename Q>
1742 bool erase( const Q& key )
1744 return do_erase( key, key_comparator(), [](value_type const&) {} );
1747 /// Delete the item from the set with comparing functor \p pred
1749 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1750 but \p pred predicate is used for key comparing.
1751 \p Less has the interface like \p std::less.
1752 \p pred must imply the same element order as the comparator used for building the set.
1754 template <typename Q, typename Less>
1755 bool erase_with( const Q& key, Less pred )
1758 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1761 /// Deletes the item from the set
1762 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1763 The function searches an item with key equal to \p key in the set,
1764 call \p f functor with item found, unlinks it from the set, and returns \p true.
1765 The \ref disposer specified in \p Traits class template parameter is called
1766 by garbage collector \p GC asynchronously.
1768 The \p Func interface is
1771 void operator()( value_type const& item );
1774 If the item with key equal to \p key is not found the function return \p false.
1776 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1778 RCU \p synchronize method can be called. RCU should not be locked.
1780 template <typename Q, typename Func>
1781 bool erase( Q const& key, Func f )
1783 return do_erase( key, key_comparator(), f );
1786 /// Delete the item from the set with comparing functor \p pred
1788 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1789 but \p pred predicate is used for key comparing.
1790 \p Less has the interface like \p std::less.
1791 \p pred must imply the same element order as the comparator used for building the set.
1793 template <typename Q, typename Less, typename Func>
1794 bool erase_with( Q const& key, Less pred, Func f )
1797 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1801 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1802 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1803 The interface of \p Func functor is:
1806 void operator()( value_type& item, Q& key );
1809 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1811 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1812 that \p item cannot be disposed during functor is executing.
1813 The functor does not serialize simultaneous access to the set \p item. If such access is
1814 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1816 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1817 can modify both arguments.
1819 The function applies RCU lock internally.
1821 The function returns \p true if \p key is found, \p false otherwise.
1823 template <typename Q, typename Func>
1824 bool find( Q& key, Func f )
1826 return do_find_with( key, key_comparator(), f );
1829 template <typename Q, typename Func>
1830 bool find( Q const& key, Func f )
1832 return do_find_with( key, key_comparator(), f );
1836 /// Finds the key \p key with comparing functor \p pred
1838 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1839 but \p cmp is used for key comparison.
1840 \p Less functor has the interface like \p std::less.
1841 \p cmp must imply the same element order as the comparator used for building the set.
1843 template <typename Q, typename Less, typename Func>
1844 bool find_with( Q& key, Less pred, Func f )
1847 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1850 template <typename Q, typename Less, typename Func>
1851 bool find_with( Q const& key, Less pred, Func f )
1854 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1859 /** @anchor cds_intrusive_SkipListSet_rcu_find_val
1860 The function searches the item with key equal to \p key
1861 and returns \p true if it is found, and \p false otherwise.
1863 The function applies RCU lock internally.
1865 template <typename Q>
1866 bool find( Q const& key )
1868 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1871 /// Finds \p key with comparing functor \p pred
1873 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)"
1874 but \p pred is used for key compare.
1875 \p Less functor has the interface like \p std::less.
1876 \p pred must imply the same element order as the comparator used for building the set.
1878 template <typename Q, typename Less>
1879 bool find_with( Q const& key, Less pred )
1882 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1885 /// Finds \p key and return the item found
1886 /** \anchor cds_intrusive_SkipListSet_rcu_get
1887 The function searches the item with key equal to \p key and returns the pointer to item found.
1888 If \p key is not found it returns \p nullptr.
1890 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1892 RCU should be locked before call of this function.
1893 Returned item is valid only while RCU is locked:
1895 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1900 skip_list::rcu_lock lock;
1902 foo * pVal = theList.get( 5 );
1908 // Unlock RCU by rcu_lock destructor
1909 // pVal can be retired by disposer at any time after RCU has been unlocked
1912 After RCU unlocking the \p %force_dispose member function can be called manually,
1913 see \ref force_dispose for explanation.
1915 template <typename Q>
1916 value_type * get( Q const& key )
1918 assert( gc::is_locked());
1920 value_type * pFound;
1921 return do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } )
1925 /// Finds \p key and return the item found
1927 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1928 but \p pred is used for comparing the keys.
1930 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1932 \p pred must imply the same element order as the comparator used for building the set.
1934 template <typename Q, typename Less>
1935 value_type * get_with( Q const& key, Less pred )
1938 assert( gc::is_locked());
1940 value_type * pFound;
1941 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1942 [&pFound](value_type& found, Q const& ) { pFound = &found; } )
1946 /// Returns item count in the set
1948 The value returned depends on item counter type provided by \p Traits template parameter.
1949 For \p atomicity::empty_item_counter the function always returns 0.
1950 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1951 member function for this purpose.
1955 return m_ItemCounter;
1958 /// Checks if the set is empty
1961 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1964 /// Clears the set (not atomic)
1966 The function unlink all items from the set.
1967 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1971 assert( set.empty() );
1973 the assertion could be raised.
1975 For each item the \p disposer will be called automatically after unlinking.
1980 while ( (ep = extract_min()) );
1983 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1984 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1986 return c_nMaxHeight;
1989 /// Returns const reference to internal statistics
1990 stat const& statistics() const
1995 /// Clears internal list of ready-to-remove items passing it to RCU reclamation cycle
1996 /** @anchor cds_intrusive_SkipListSet_rcu_force_dispose
1997 Skip list has complex multi-step algorithm for removing an item. In fact, when you
1998 remove the item it is just marked as removed that is enough for the success of your operation.
1999 Actual removing can take place in the future, in another call or even in another thread.
2000 Inside RCU lock the removed item cannot be passed to RCU reclamation cycle
2001 since it can lead to deadlock. To solve this problem, the current skip list implementation
2002 has internal list of items which is ready to remove but is not yet passed to RCU reclamation.
2003 Usually, this list will be passed to RCU reclamation in the next suitable call of skip list member function.
2004 In some cases we want to pass it to RCU reclamation immediately after RCU unlocking.
2005 This function provides such opportunity: it checks whether the RCU is not locked and if it is true
2006 the function passes the internal ready-to-remove list to RCU reclamation cycle.
2008 The RCU \p synchronize can be called.
2010 void force_dispose()
2012 if ( !gc::is_locked() )
2017 }} // namespace cds::intrusive
2020 #endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H