3 #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H
4 #define __CDS_INTRUSIVE_SKIP_LIST_RCU_H
8 #include <cds/intrusive/details/skip_list_base.h>
9 #include <cds/opt/compare.h>
10 #include <cds/urcu/details/check_deadlock.h>
11 #include <cds/details/binary_functor_wrapper.h>
12 #include <cds/urcu/exempt_ptr.h>
15 namespace cds { namespace intrusive {
20 template <class RCU, typename Tag>
21 class node< cds::urcu::gc< RCU >, Tag >
24 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
25 typedef Tag tag ; ///< tag
28 // bit 0 - the item is logically deleted
29 // bit 1 - the item is extracted (only for level 0)
30 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
31 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
32 typedef atomic_marked_ptr tower_item_type;
35 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
37 node * m_pDelChain; ///< Deleted node chain (local for a thread)
39 bool volatile m_bLinked;
40 bool volatile m_bUnlinked;
43 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
44 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
47 /// Constructs a node of height 1 (a bottom-list node)
50 , m_pDelChain( nullptr )
53 , m_bUnlinked( false )
56 , m_arrNext( nullptr )
62 assert( !m_bLinked || m_bUnlinked );
66 /// Constructs a node of height \p nHeight
67 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
69 assert( nHeight > 0 );
70 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
71 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
74 m_arrNext = nextTower;
78 atomic_marked_ptr * release_tower()
80 atomic_marked_ptr * pTower = m_arrNext;
86 atomic_marked_ptr * get_tower() const
93 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
94 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
97 /// Access to element of next pointer array
98 atomic_marked_ptr& next( unsigned int nLevel )
100 assert( nLevel < height() );
101 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
103 return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
106 /// Access to element of next pointer array (const version)
107 atomic_marked_ptr const& next( unsigned int nLevel ) const
109 assert( nLevel < height() );
110 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
112 return nLevel ? m_arrNext[nLevel - 1] : m_pNext;
115 /// Access to element of next pointer array (same as \ref next function)
116 atomic_marked_ptr& operator[]( unsigned int nLevel )
118 return next( nLevel );
121 /// Access to element of next pointer array (same as \ref next function)
122 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
124 return next( nLevel );
127 /// Height of the node
128 unsigned int height() const
133 /// Clears internal links
136 assert( m_arrNext == nullptr );
137 m_pNext.store( marked_ptr(), atomics::memory_order_release );
138 m_pDelChain = nullptr;
141 bool is_cleared() const
143 return m_pNext == atomic_marked_ptr()
144 && m_arrNext == nullptr
148 } // namespace skip_list
152 namespace skip_list { namespace details {
154 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
155 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
158 typedef cds::urcu::gc< RCU > gc;
159 typedef NodeTraits node_traits;
160 typedef BackOff back_off;
161 typedef typename node_traits::node_type node_type;
162 typedef typename node_traits::value_type value_type;
163 static bool const c_isConst = IsConst;
165 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
168 typedef typename node_type::marked_ptr marked_ptr;
169 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
176 // RCU should be locked before iterating!!!
177 assert( gc::is_locked() );
182 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
183 // Current node is marked as deleted. So, its next pointer can point to anything
184 // In this case we interrupt our iteration and returns end() iterator.
189 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
190 node_type * pp = p.ptr();
192 // p is marked as deleted. Spin waiting for physical removal
196 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
197 // p is marked as deleted. Spin waiting for physical removal
207 public: // for internal use only!!!
208 iterator( node_type& refHead )
211 // RCU should be locked before iterating!!!
212 assert( gc::is_locked() );
217 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
223 node_type * pp = p.ptr();
224 // Logically deleted node is marked from highest level
225 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
238 // RCU should be locked before iterating!!!
239 assert( gc::is_locked() );
242 iterator( iterator const& s)
243 : m_pNode( s.m_pNode )
245 // RCU should be locked before iterating!!!
246 assert( gc::is_locked() );
249 value_type * operator ->() const
251 assert( m_pNode != nullptr );
252 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
254 return node_traits::to_value_ptr( m_pNode );
257 value_ref operator *() const
259 assert( m_pNode != nullptr );
260 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
262 return *node_traits::to_value_ptr( m_pNode );
266 iterator& operator ++()
272 iterator& operator = (const iterator& src)
274 m_pNode = src.m_pNode;
278 template <typename Bkoff, bool C>
279 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
281 return m_pNode == i.m_pNode;
283 template <typename Bkoff, bool C>
284 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
286 return !( *this == i );
289 }} // namespace skip_list::details
292 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
293 /** @ingroup cds_intrusive_map
294 @anchor cds_intrusive_SkipListSet_rcu
296 The implementation of well-known probabilistic data structure called skip-list
297 invented by W.Pugh in his papers:
298 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
299 - [1990] W.Pugh A Skip List Cookbook
301 A skip-list is a probabilistic data structure that provides expected logarithmic
302 time search without the need of rebalance. The skip-list is a collection of sorted
303 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
304 Each list has a level, ranging from 0 to 32. The bottom-level list contains
305 all the nodes, and each higher-level list is a sublist of the lower-level lists.
306 Each node is created with a random top level (with a random height), and belongs
307 to all lists up to that level. The probability that a node has the height 1 is 1/2.
308 The probability that a node has the height N is 1/2 ** N (more precisely,
309 the distribution depends on an random generator provided, but our generators
312 The lock-free variant of skip-list is implemented according to book
313 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
314 chapter 14.4 "A Lock-Free Concurrent Skiplist".
316 <b>Template arguments</b>:
317 - \p RCU - one of \ref cds_urcu_gc "RCU type"
318 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
319 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
320 - \p Traits - set traits, default is \p skip_list::type_traits
321 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
322 instead of \p Traits template argument.
324 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
325 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
329 The class supports a forward iterator (\ref iterator and \ref const_iterator).
330 The iteration is ordered.
332 You may iterate over skip-list set items only under RCU lock.
333 Only in this case the iterator is thread-safe since
334 while RCU is locked any set's item cannot be reclaimed.
336 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
337 (i.e. inserting, erasing and so on) is not possible.
339 @warning The iterator object cannot be passed between threads.
341 Example how to use skip-list set iterators:
343 // First, you should include the header for RCU type you have chosen
344 #include <cds/urcu/general_buffered.h>
345 #include <cds/intrusive/skip_list_rcu.h>
347 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
353 // Traits for your skip-list.
354 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
355 struct my_traits: public cds::intrusive::skip_list::traits
359 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
361 my_skiplist_set theSet;
367 // Apply RCU locking manually
368 typename rcu_type::scoped_lock sl;
370 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
374 // rcu_type::scoped_lock destructor releases RCU lock implicitly
378 The iterator class supports the following minimalistic interface:
385 iterator( iterator const& s);
387 value_type * operator ->() const;
388 value_type& operator *() const;
391 iterator& operator ++();
394 iterator& operator = (const iterator& src);
396 bool operator ==(iterator const& i ) const;
397 bool operator !=(iterator const& i ) const;
400 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
404 You should incorporate skip_list::node into your struct \p T and provide
405 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
406 define a struct based on \p skip_list::traits.
408 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
410 // First, you should include the header for RCU type you have chosen
411 #include <cds/urcu/general_buffered.h>
413 // Include RCU skip-list specialization
414 #include <cds/intrusive/skip_list_rcu.h>
417 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
419 // Data stored in skip list
420 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
429 // my_data compare functor
431 int operator()( const my_data& d1, const my_data& d2 )
433 return d1.strKey.compare( d2.strKey );
436 int operator()( const my_data& d, const std::string& s )
438 return d.strKey.compare(s);
441 int operator()( const std::string& s, const my_data& d )
443 return s.compare( d.strKey );
448 struct my_traits: public cds::intrusive::skip_list::traits
450 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
451 typedef my_data_cmp compare;
454 // Declare skip-list set type
455 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
458 Equivalent option-based code:
460 #include <cds/urcu/general_buffered.h>
461 #include <cds/intrusive/skip_list_rcu.h>
463 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
472 // Declare option-based skip-list set
473 typedef cds::intrusive::SkipListSet< rcu_type
475 , typename cds::intrusive::skip_list::make_traits<
476 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
477 ,cds::intrusive::opt::compare< my_data_cmp >
486 #ifdef CDS_DOXYGEN_INVOKED
487 ,typename Traits = skip_list::traits
492 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
495 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
496 typedef T value_type; ///< type of value stored in the skip-list
497 typedef Traits traits; ///< Traits template parameter
499 typedef typename traits::hook hook; ///< hook type
500 typedef typename hook::node_type node_type; ///< node type
502 # ifdef CDS_DOXYGEN_INVOKED
503 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
505 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
508 typedef typename traits::disposer disposer; ///< disposer
509 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
511 typedef typename traits::item_counter item_counter; ///< Item counting policy used
512 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
513 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
514 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
515 typedef typename traits::back_off back_off; ///< Back-off strategy
516 typedef typename traits::stat stat; ///< internal statistics type
517 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
518 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
519 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
522 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
524 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
525 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
527 static unsigned int const c_nMaxHeight = std::conditional<
528 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
529 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
530 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
534 static unsigned int const c_nMinHeight = 5;
538 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
539 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
543 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
545 typedef typename std::conditional<
546 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
547 ,intrusive_node_builder
548 ,typename traits::internal_node_builder
549 >::type node_builder;
551 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
554 node_type * pPrev[ c_nMaxHeight ];
555 node_type * pSucc[ c_nMaxHeight ];
556 node_type * pNext[ c_nMaxHeight ];
559 node_type * pDelChain;
562 : pDelChain( nullptr )
567 assert( pDelChain == nullptr );
572 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
576 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
578 item_counter m_ItemCounter; ///< item counter
579 random_level_generator m_RandomLevelGen; ///< random level generator instance
580 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
581 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
582 mutable stat m_Stat; ///< internal statistics
586 unsigned int random_level()
588 // Random generator produces a number from range [0..31]
589 // We need a number from range [1..32]
590 return m_RandomLevelGen() + 1;
593 template <typename Q>
594 node_type * build_node( Q v )
596 return node_builder::make_tower( v, m_RandomLevelGen );
599 static void dispose_node( value_type * pVal )
603 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
609 void operator()( value_type * pVal )
611 dispose_node( pVal );
617 typedef cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void > exempt_ptr; ///< pointer to extracted node
622 bool is_extracted( marked_node_ptr const p ) const
624 return (p.bits() & 2) != 0;
627 template <typename Q, typename Compare >
628 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
630 assert( gc::is_locked() );
633 marked_node_ptr pSucc;
634 marked_node_ptr pCur;
638 pPred = m_Head.head();
640 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
643 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
645 // pCur.bits() means that pPred is logically deleted
649 if ( pCur.ptr() == nullptr ) {
650 // end of the list at level nLevel - goto next level
654 // pSucc contains deletion mark for pCur
655 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
657 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
660 if ( pSucc.bits() ) {
661 // pCur is marked, i.e. logically deleted.
662 marked_node_ptr p( pCur.ptr() );
663 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
664 memory_model::memory_order_release, atomics::memory_order_relaxed ))
668 pCur->m_bUnlinked = true;
671 if ( !is_extracted( pSucc )) {
672 // We cannot free the node at this moment since RCU is locked
673 // Link deleted nodes to a chain to free later
674 link_for_remove( pos, pCur.ptr() );
675 m_Stat.onEraseWhileFind();
678 m_Stat.onExtractWhileFind();
685 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
688 else if ( nCmp == 0 && bStopIfFound )
696 pos.pPrev[ nLevel ] = pPred;
697 pos.pSucc[ nLevel ] = pCur.ptr();
704 pos.pCur = pCur.ptr();
705 return pCur.ptr() && nCmp == 0;
708 bool find_min_position( position& pos )
710 assert( gc::is_locked() );
713 marked_node_ptr pSucc;
714 marked_node_ptr pCur;
717 pPred = m_Head.head();
719 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
721 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
722 // pCur.bits() means that pPred is logically deleted
723 // head cannot be deleted
724 assert( pCur.bits() == 0 );
728 // pSucc contains deletion mark for pCur
729 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
731 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
734 if ( pSucc.bits() ) {
735 // pCur is marked, i.e. logically deleted.
736 marked_node_ptr p( pCur.ptr() );
737 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
738 memory_model::memory_order_release, atomics::memory_order_relaxed ))
742 pCur->m_bUnlinked = true;
745 if ( !is_extracted( pSucc )) {
746 // We cannot free the node at this moment since RCU is locked
747 // Link deleted nodes to a chain to free later
748 link_for_remove( pos, pCur.ptr() );
749 m_Stat.onEraseWhileFind();
752 m_Stat.onExtractWhileFind();
761 pos.pPrev[ nLevel ] = pPred;
762 pos.pSucc[ nLevel ] = pCur.ptr();
764 return (pos.pCur = pCur.ptr()) != nullptr;
767 bool find_max_position( position& pos )
769 assert( gc::is_locked() );
772 marked_node_ptr pSucc;
773 marked_node_ptr pCur;
776 pPred = m_Head.head();
778 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
781 pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed );
783 // pCur.bits() means that pPred is logically deleted
787 if ( pCur.ptr() == nullptr ) {
788 // end of the list at level nLevel - goto next level
792 // pSucc contains deletion mark for pCur
793 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed );
795 if ( pPred->next( nLevel ).load( memory_model::memory_order_relaxed ).all() != pCur.ptr() )
798 if ( pSucc.bits() ) {
799 // pCur is marked, i.e. logically deleted.
800 marked_node_ptr p( pCur.ptr() );
801 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
802 memory_model::memory_order_release, atomics::memory_order_relaxed ))
806 pCur->m_bUnlinked = true;
809 if ( !is_extracted( pSucc )) {
810 // We cannot free the node at this moment since RCU is locked
811 // Link deleted nodes to a chain to free later
812 link_for_remove( pos, pCur.ptr() );
813 m_Stat.onEraseWhileFind();
816 m_Stat.onExtractWhileFind();
831 pos.pPrev[ nLevel ] = pPred;
832 pos.pSucc[ nLevel ] = pCur.ptr();
835 return (pos.pCur = pCur.ptr()) != nullptr;
838 template <typename Func>
839 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
841 assert( gc::is_locked() );
843 unsigned int nHeight = pNode->height();
844 pNode->clear_tower();
847 marked_node_ptr p( pos.pSucc[0] );
848 pNode->next( 0 ).store( p, memory_model::memory_order_release );
849 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
853 pNode->m_bLinked = true;
858 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
861 marked_node_ptr q( pos.pSucc[ nLevel ]);
862 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
863 // pNode has been marked as removed while we are inserting it
866 m_Stat.onLogicDeleteWhileInsert();
870 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
873 // Renew insert position
874 m_Stat.onRenewInsertPosition();
875 if ( !find_position( val, pos, key_comparator(), false )) {
876 // The node has been deleted while we are inserting it
877 m_Stat.onNotFoundWhileInsert();
885 static void link_for_remove( position& pos, node_type * pDel )
887 assert( pDel->m_pDelChain == nullptr );
889 pDel->m_pDelChain = pos.pDelChain;
890 pos.pDelChain = pDel;
893 template <typename Func>
894 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
896 assert( pDel != nullptr );
897 assert( gc::is_locked() );
899 marked_node_ptr pSucc;
901 // logical deletion (marking)
902 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
903 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
906 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
913 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
918 int const nMask = bExtract ? 3 : 1;
919 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
921 f( *node_traits::to_value_ptr( pDel ));
926 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
927 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
928 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
929 memory_model::memory_order_release, atomics::memory_order_relaxed) )
932 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
934 m_Stat.onSlowExtract();
936 m_Stat.onSlowErase();
938 assert( pDel->m_bUnlinked );
945 pDel->m_bUnlinked = true;
948 // We cannot free the node at this moment since RCU is locked
949 // Link deleted nodes to a chain to free later
950 link_for_remove( pos, pDel );
951 m_Stat.onFastErase();
954 m_Stat.onFastExtract();
961 enum finsd_fastpath_result {
963 find_fastpath_not_found,
966 template <typename Q, typename Compare, typename Func>
967 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
970 marked_node_ptr pCur;
971 marked_node_ptr pSucc;
972 marked_node_ptr pNull;
976 pPred = m_Head.head();
977 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
978 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
982 while ( pCur != pNull ) {
984 // Wait until pCur is removed
985 unsigned int nAttempt = 0;
986 while ( pCur.bits() && nAttempt++ < 16 ) {
988 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
993 // Maybe, we are on deleted node sequence
994 // Abort searching, try slow-path
995 return find_fastpath_abort;
1000 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1003 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1005 else if ( nCmp == 0 ) {
1007 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1008 return find_fastpath_found;
1010 else // pCur > val - go down
1016 return find_fastpath_not_found;
1019 template <typename Q, typename Compare, typename Func>
1020 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1022 if ( find_position( val, pos, cmp, true )) {
1023 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1025 f( *node_traits::to_value_ptr( pos.pCur ), val );
1032 template <typename Q, typename Compare, typename Func>
1033 bool do_find_with( Q& val, Compare cmp, Func f )
1040 switch ( find_fastpath( val, cmp, f )) {
1041 case find_fastpath_found:
1042 m_Stat.onFindFastSuccess();
1044 case find_fastpath_not_found:
1045 m_Stat.onFindFastFailed();
1051 if ( find_slowpath( val, cmp, f, pos )) {
1052 m_Stat.onFindSlowSuccess();
1056 m_Stat.onFindSlowFailed();
1065 template <typename Q, typename Compare, typename Func>
1066 bool do_erase( Q const& val, Compare cmp, Func f )
1068 check_deadlock_policy::check();
1076 if ( !find_position( val, pos, cmp, false ) ) {
1077 m_Stat.onEraseFailed();
1081 node_type * pDel = pos.pCur;
1082 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1084 unsigned int nHeight = pDel->height();
1085 if ( try_remove_at( pDel, pos, f, false )) {
1087 m_Stat.onRemoveNode( nHeight );
1088 m_Stat.onEraseSuccess();
1092 m_Stat.onEraseFailed();
1098 dispose_chain( pos );
1102 template <typename Q, typename Compare>
1103 value_type * do_extract_key( Q const& key, Compare cmp )
1105 // RCU should be locked!!!
1106 assert( gc::is_locked() );
1111 if ( !find_position( key, pos, cmp, false ) ) {
1112 m_Stat.onExtractFailed();
1117 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1119 unsigned int const nHeight = pDel->height();
1121 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1123 m_Stat.onRemoveNode( nHeight );
1124 m_Stat.onExtractSuccess();
1127 m_Stat.onExtractFailed();
1133 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1136 template <typename ExemptPtr, typename Q>
1137 bool do_extract( ExemptPtr& result, Q const& key )
1139 check_deadlock_policy::check();
1144 value_type * pDel = do_extract_key( key, key_comparator() );
1145 bReturn = pDel != nullptr;
1154 template <typename ExemptPtr, typename Q, typename Less>
1155 bool do_extract_with( ExemptPtr& result, Q const& key, Less pred )
1157 check_deadlock_policy::check();
1162 value_type * pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>() );
1163 bReturn = pDel != nullptr;
1172 node_type * do_extract_min()
1174 assert( gc::is_locked() );
1179 if ( !find_min_position( pos ) ) {
1180 m_Stat.onExtractMinFailed();
1185 unsigned int const nHeight = pDel->height();
1187 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1189 m_Stat.onRemoveNode( nHeight );
1190 m_Stat.onExtractMinSuccess();
1193 m_Stat.onExtractMinFailed();
1202 template <typename ExemptPtr>
1203 bool do_extract_min( ExemptPtr& result )
1205 check_deadlock_policy::check();
1210 node_type * pDel = do_extract_min();
1211 bReturn = pDel != nullptr;
1213 result = node_traits::to_value_ptr(pDel);
1220 node_type * do_extract_max()
1222 assert( gc::is_locked() );
1227 if ( !find_max_position( pos ) ) {
1228 m_Stat.onExtractMaxFailed();
1233 unsigned int const nHeight = pDel->height();
1235 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1237 m_Stat.onRemoveNode( nHeight );
1238 m_Stat.onExtractMaxSuccess();
1241 m_Stat.onExtractMaxFailed();
1250 template <typename ExemptPtr>
1251 bool do_extract_max( ExemptPtr& result )
1253 check_deadlock_policy::check();
1258 node_type * pDel = do_extract_max();
1259 bReturn = pDel != nullptr;
1261 result = node_traits::to_value_ptr(pDel);
1268 void increase_height( unsigned int nHeight )
1270 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1271 if ( nCur < nHeight )
1272 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1275 class deferred_list_iterator
1279 explicit deferred_list_iterator( node_type * p )
1282 deferred_list_iterator()
1286 cds::urcu::retired_ptr operator *() const
1288 return cds::urcu::retired_ptr( node_traits::to_value_ptr(pCur), dispose_node );
1293 pCur = pCur->m_pDelChain;
1296 bool operator ==( deferred_list_iterator const& i ) const
1298 return pCur == i.pCur;
1300 bool operator !=( deferred_list_iterator const& i ) const
1302 return !operator ==( i );
1306 void dispose_chain( node_type * pHead )
1308 // RCU should NOT be locked
1309 check_deadlock_policy::check();
1311 gc::batch_retire( deferred_list_iterator( pHead ), deferred_list_iterator() );
1314 void dispose_chain( position& pos )
1316 // RCU should NOT be locked
1317 check_deadlock_policy::check();
1319 // Delete local chain
1320 if ( pos.pDelChain ) {
1321 dispose_chain( pos.pDelChain );
1322 pos.pDelChain = nullptr;
1325 // Delete deferred chain
1329 void dispose_deferred()
1331 dispose_chain( m_pDeferredDelChain.exchange( nullptr, memory_model::memory_order_acq_rel ) );
1334 void defer_chain( position& pos )
1336 if ( pos.pDelChain ) {
1337 node_type * pHead = pos.pDelChain;
1338 node_type * pTail = pHead;
1339 while ( pTail->m_pDelChain )
1340 pTail = pTail->m_pDelChain;
1342 node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
1344 pTail->m_pDelChain = pDeferList;
1345 } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ));
1347 pos.pDelChain = nullptr;
1354 /// Default constructor
1356 : m_Head( c_nMaxHeight )
1357 , m_nHeight( c_nMinHeight )
1358 , m_pDeferredDelChain( nullptr )
1360 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1362 // Barrier for head node
1363 atomics::atomic_thread_fence( memory_model::memory_order_release );
1366 /// Clears and destructs the skip-list
1374 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1376 /// Const iterator type
1377 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1379 /// Returns a forward iterator addressing the first element in a set
1382 return iterator( *m_Head.head() );
1385 /// Returns a forward const iterator addressing the first element in a set
1386 const_iterator begin() const
1388 return const_iterator( *m_Head.head() );
1391 /// Returns a forward const iterator addressing the first element in a set
1392 const_iterator cbegin() const
1394 return const_iterator( *m_Head.head() );
1397 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1403 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1404 const_iterator end() const
1406 return const_iterator();
1409 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1410 const_iterator cend() const
1412 return const_iterator();
1416 /// Inserts new node
1418 The function inserts \p val in the set if it does not contain
1419 an item with key equal to \p val.
1421 The function applies RCU lock internally.
1423 Returns \p true if \p val is placed into the set, \p false otherwise.
1425 bool insert( value_type& val )
1427 return insert( val, []( value_type& ) {} );
1430 /// Inserts new node
1432 This function is intended for derived non-intrusive containers.
1434 The function allows to split creating of new item into two part:
1435 - create item with key only
1436 - insert new item into the set
1437 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1439 The functor signature is:
1441 void func( value_type& val );
1443 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1444 \p val no any other changes could be made on this set's item by concurrent threads.
1445 The user-defined functor is called only if the inserting is success.
1447 RCU \p synchronize method can be called. RCU should not be locked.
1449 template <typename Func>
1450 bool insert( value_type& val, Func f )
1452 check_deadlock_policy::check();
1458 node_type * pNode = node_traits::to_node_ptr( val );
1459 scoped_node_ptr scp( pNode );
1460 unsigned int nHeight = pNode->height();
1461 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1462 bool bTowerMade = false;
1468 bool bFound = find_position( val, pos, key_comparator(), true );
1470 // scoped_node_ptr deletes the node tower if we create it
1474 m_Stat.onInsertFailed();
1480 build_node( pNode );
1481 nHeight = pNode->height();
1486 if ( !insert_at_position( val, pNode, pos, f )) {
1487 m_Stat.onInsertRetry();
1491 increase_height( nHeight );
1493 m_Stat.onAddNode( nHeight );
1494 m_Stat.onInsertSuccess();
1501 dispose_chain( pos );
1506 /// Ensures that the \p val exists in the set
1508 The operation performs inserting or changing data with lock-free manner.
1510 If the item \p val is not found in the set, then \p val is inserted into the set.
1511 Otherwise, the functor \p func is called with item found.
1512 The functor signature is:
1514 void func( bool bNew, value_type& item, value_type& val );
1517 - \p bNew - \p true if the item has been inserted, \p false otherwise
1518 - \p item - item of the set
1519 - \p val - argument \p val passed into the \p %ensure() function
1520 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1521 refer to the same thing.
1523 The functor can change non-key fields of the \p item; however, \p func must guarantee
1524 that during changing no any other modifications could be made on this item by concurrent threads.
1526 RCU \p synchronize method can be called. RCU should not be locked.
1528 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
1529 \p second is \p true if new item has been added or \p false if the item with \p key
1530 already is in the set.
1532 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1534 template <typename Func>
1535 std::pair<bool, bool> ensure( value_type& val, Func func )
1537 check_deadlock_policy::check();
1540 std::pair<bool, bool> bRet( true, false );
1543 node_type * pNode = node_traits::to_node_ptr( val );
1544 scoped_node_ptr scp( pNode );
1545 unsigned int nHeight = pNode->height();
1546 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1547 bool bTowerMade = false;
1552 bool bFound = find_position( val, pos, key_comparator(), true );
1554 // scoped_node_ptr deletes the node tower if we create it before
1558 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1559 m_Stat.onEnsureExist();
1564 build_node( pNode );
1565 nHeight = pNode->height();
1570 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1571 m_Stat.onInsertRetry();
1575 increase_height( nHeight );
1578 m_Stat.onAddNode( nHeight );
1579 m_Stat.onEnsureNew();
1585 dispose_chain( pos );
1590 /// Unlinks the item \p val from the set
1592 The function searches the item \p val in the set and unlink it from the set
1593 if it is found and is equal to \p val.
1595 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1596 and deletes the item found. \p %unlink() searches an item by key and deletes it
1597 only if \p val is an item of that set, i.e. the pointer to item found
1598 is equal to <tt> &val </tt>.
1600 RCU \p synchronize method can be called. RCU should not be locked.
1602 The \ref disposer specified in \p Traits class template parameter is called
1603 by garbage collector \p GC asynchronously.
1605 The function returns \p true if success and \p false otherwise.
1607 bool unlink( value_type& val )
1609 check_deadlock_policy::check();
1617 if ( !find_position( val, pos, key_comparator(), false ) ) {
1618 m_Stat.onUnlinkFailed();
1622 node_type * pDel = pos.pCur;
1623 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1625 unsigned int nHeight = pDel->height();
1627 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1629 m_Stat.onRemoveNode( nHeight );
1630 m_Stat.onUnlinkSuccess();
1634 m_Stat.onUnlinkFailed();
1640 dispose_chain( pos );
1645 /// Extracts the item from the set with specified \p key
1646 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1647 The function searches an item with key equal to \p key in the set,
1648 unlinks it from the set, places it to \p result parameter, and returns \p true.
1649 If the item with key equal to \p key is not found the function returns \p false.
1651 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1653 RCU \p synchronize method can be called. RCU should NOT be locked.
1654 The function does not call the disposer for the item found.
1655 The disposer will be implicitly invoked when \p result object is destroyed or when
1656 <tt>result.release()</tt> is called, see cds::urcu::exempt_ptr for explanation.
1657 @note Before reusing \p result object you should call its \p release() method.
1660 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1664 typename skip_list::exempt_ptr ep;
1665 if ( theList.extract( ep, 5 ) ) {
1669 // Dispose returned item.
1674 template <typename Q>
1675 bool extract( exempt_ptr& result, Q const& key )
1677 return do_extract( result, key );
1680 /// Extracts the item from the set with comparing functor \p pred
1682 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_extract "extract(exempt_ptr&, Q const&)"
1683 but \p pred predicate is used for key comparing.
1684 \p Less has the interface like \p std::less.
1685 \p pred must imply the same element order as the comparator used for building the set.
1687 template <typename Q, typename Less>
1688 bool extract_with( exempt_ptr& result, Q const& key, Less pred )
1690 return do_extract_with( result, key, pred );
1693 /// Extracts an item with minimal key from the list
1695 The function searches an item with minimal key, unlinks it, and returns the item found in \p result parameter.
1696 If the skip-list is empty the function returns \p false.
1698 RCU \p synchronize method can be called. RCU should NOT be locked.
1699 The function does not call the disposer for the item found.
1700 The disposer will be implicitly invoked when \p result object is destroyed or when
1701 <tt>result.release()</tt> is called, see cds::urcu::exempt_ptr for explanation.
1702 @note Before reusing \p result object you should call its \p release() method.
1705 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1709 typename skip_list::exempt_ptr ep;
1710 if ( theList.extract_min(ep)) {
1714 // Dispose returned item.
1719 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1720 It means that the function gets leftmost item and tries to unlink it.
1721 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1722 So, the function returns the item with minimum key at the moment of list traversing.
1724 bool extract_min( exempt_ptr& result )
1726 return do_extract_min( result );
1729 /// Extracts an item with maximal key from the list
1731 The function searches an item with maximal key, unlinks it, and returns the item found in \p result parameter.
1732 If the skip-list is empty the function returns \p false.
1734 RCU \p synchronize method can be called. RCU should NOT be locked.
1735 The function does not call the disposer for the item found.
1736 The disposer will be implicitly invoked when \p result object is destroyed or when
1737 <tt>result.release()</tt> is called, see cds::urcu::exempt_ptr for explanation.
1738 @note Before reusing \p result object you should call its \p release() method.
1741 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1745 typename skip_list::exempt_ptr ep;
1746 if ( theList.extract_max(ep) ) {
1749 // Dispose returned item.
1754 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1755 It means that the function gets rightmost item and tries to unlink it.
1756 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1757 So, the function returns the item with maximum key at the moment of list traversing.
1759 bool extract_max( exempt_ptr& result )
1761 return do_extract_max( result );
1764 /// Deletes the item from the set
1765 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1766 The function searches an item with key equal to \p key in the set,
1767 unlinks it from the set, and returns \p true.
1768 If the item with key equal to \p key is not found the function return \p false.
1770 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1772 RCU \p synchronize method can be called. RCU should not be locked.
1774 template <typename Q>
1775 bool erase( const Q& key )
1777 return do_erase( key, key_comparator(), [](value_type const&) {} );
1780 /// Delete the item from the set with comparing functor \p pred
1782 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1783 but \p pred predicate is used for key comparing.
1784 \p Less has the interface like \p std::less.
1785 \p pred must imply the same element order as the comparator used for building the set.
1787 template <typename Q, typename Less>
1788 bool erase_with( const Q& key, Less pred )
1790 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1793 /// Deletes the item from the set
1794 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1795 The function searches an item with key equal to \p key in the set,
1796 call \p f functor with item found, unlinks it from the set, and returns \p true.
1797 The \ref disposer specified in \p Traits class template parameter is called
1798 by garbage collector \p GC asynchronously.
1800 The \p Func interface is
1803 void operator()( value_type const& item );
1806 If the item with key equal to \p key is not found the function return \p false.
1808 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1810 RCU \p synchronize method can be called. RCU should not be locked.
1812 template <typename Q, typename Func>
1813 bool erase( Q const& key, Func f )
1815 return do_erase( key, key_comparator(), f );
1818 /// Delete the item from the set with comparing functor \p pred
1820 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1821 but \p pred predicate is used for key comparing.
1822 \p Less has the interface like \p std::less.
1823 \p pred must imply the same element order as the comparator used for building the set.
1825 template <typename Q, typename Less, typename Func>
1826 bool erase_with( Q const& key, Less pred, Func f )
1828 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1832 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1833 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1834 The interface of \p Func functor is:
1837 void operator()( value_type& item, Q& key );
1840 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1842 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1843 that \p item cannot be disposed during functor is executing.
1844 The functor does not serialize simultaneous access to the set \p item. If such access is
1845 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1847 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1848 can modify both arguments.
1850 The function applies RCU lock internally.
1852 The function returns \p true if \p key is found, \p false otherwise.
1854 template <typename Q, typename Func>
1855 bool find( Q& key, Func f )
1857 return do_find_with( key, key_comparator(), f );
1860 /// Finds the key \p key with comparing functor \p pred
1862 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1863 but \p cmp is used for key comparison.
1864 \p Less functor has the interface like \p std::less.
1865 \p cmp must imply the same element order as the comparator used for building the set.
1867 template <typename Q, typename Less, typename Func>
1868 bool find_with( Q& key, Less pred, Func f )
1870 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1874 /** @anchor cds_intrusive_SkipListSet_rcu_find_val
1875 The function searches the item with key equal to \p key
1876 and returns \p true if it is found, and \p false otherwise.
1878 The function applies RCU lock internally.
1880 template <typename Q>
1881 bool find( Q const& key )
1883 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1886 /// Finds \p key with comparing functor \p pred
1888 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_val "find(Q const&)"
1889 but \p pred is used for key compare.
1890 \p Less functor has the interface like \p std::less.
1891 \p pred must imply the same element order as the comparator used for building the set.
1893 template <typename Q, typename Less>
1894 bool find_with( Q const& key, Less pred )
1896 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1899 /// Finds \p key and return the item found
1900 /** \anchor cds_intrusive_SkipListSet_rcu_get
1901 The function searches the item with key equal to \p key and returns the pointer to item found.
1902 If \p key is not found it returns \p nullptr.
1904 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1906 RCU should be locked before call of this function.
1907 Returned item is valid only while RCU is locked:
1909 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1914 skip_list::rcu_lock lock;
1916 foo * pVal = theList.get( 5 );
1922 // Unlock RCU by rcu_lock destructor
1923 // pVal can be retired by disposer at any time after RCU has been unlocked
1926 After RCU unlocking the \p %force_dispose member function can be called manually,
1927 see \ref force_dispose for explanation.
1929 template <typename Q>
1930 value_type * get( Q const& key )
1932 assert( gc::is_locked());
1934 value_type * pFound;
1935 return do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; } )
1939 /// Finds \p key and return the item found
1941 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1942 but \p pred is used for comparing the keys.
1944 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1946 \p pred must imply the same element order as the comparator used for building the set.
1948 template <typename Q, typename Less>
1949 value_type * get_with( Q const& key, Less pred )
1951 assert( gc::is_locked());
1953 value_type * pFound;
1954 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1955 [&pFound](value_type& found, Q const& ) { pFound = &found; } )
1959 /// Returns item count in the set
1961 The value returned depends on item counter type provided by \p Traits template parameter.
1962 For \p atomicity::empty_item_counter the function always returns 0.
1963 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1964 member function for this purpose.
1968 return m_ItemCounter;
1971 /// Checks if the set is empty
1974 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1977 /// Clears the set (not atomic)
1979 The function unlink all items from the set.
1980 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1984 assert( set.empty() );
1986 the assertion could be raised.
1988 For each item the \p disposer will be called automatically after unlinking.
1993 while ( extract_min(ep) )
1997 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1998 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
2000 return c_nMaxHeight;
2003 /// Returns const reference to internal statistics
2004 stat const& statistics() const
2009 /// Clears internal list of ready-to-remove items passing it to RCU reclamation cycle
2010 /** @anchor cds_intrusive_SkipListSet_rcu_force_dispose
2011 Skip list has complex multi-step algorithm for removing an item. In fact, when you
2012 remove the item it is just marked as removed that is enough for the success of your operation.
2013 Actual removing can take place in the future, in another call or even in another thread.
2014 Inside RCU lock the removed item cannot be passed to RCU reclamation cycle
2015 since it can lead to deadlock. To solve this problem, the current skip list implementation
2016 has internal list of items which is ready to remove but is not yet passed to RCU reclamation.
2017 Usually, this list will be passed to RCU reclamation in the next suitable call of skip list member function.
2018 In some cases we want to pass it to RCU reclamation immediately after RCU unlocking.
2019 This function provides such opportunity: it checks whether the RCU is not locked and if it is true
2020 the function passes the internal ready-to-remove list to RCU reclamation cycle.
2022 The RCU \p synchronize can be called.
2024 void force_dispose()
2026 if ( !gc::is_locked() )
2031 }} // namespace cds::intrusive
2034 #endif // #ifndef __CDS_INTRUSIVE_SKIP_LIST_RCU_H