2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
34 #include <type_traits>
36 #include <cds/intrusive/details/skip_list_base.h>
37 #include <cds/opt/compare.h>
38 #include <cds/urcu/details/check_deadlock.h>
39 #include <cds/details/binary_functor_wrapper.h>
40 #include <cds/urcu/exempt_ptr.h>
41 #include <cds/urcu/raw_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
44 namespace cds { namespace intrusive {
49 template <class RCU, typename Tag>
50 class node< cds::urcu::gc< RCU >, Tag >
53 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
54 typedef Tag tag ; ///< tag
57 // bit 0 - the item is logically deleted
58 // bit 1 - the item is extracted (only for level 0)
59 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
60 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
61 typedef atomic_marked_ptr tower_item_type;
64 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
66 node * m_pDelChain; ///< Deleted node chain (local for a thread)
68 bool volatile m_bLinked;
69 bool volatile m_bUnlinked;
72 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
73 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
76 /// Constructs a node of height 1 (a bottom-list node)
79 , m_pDelChain( nullptr )
82 , m_bUnlinked( false )
85 , m_arrNext( nullptr )
91 assert( !m_bLinked || m_bUnlinked );
95 /// Constructs a node of height \p nHeight
96 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
98 assert( nHeight > 0 );
99 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
100 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
103 m_arrNext = nextTower;
107 atomic_marked_ptr * release_tower()
109 atomic_marked_ptr * pTower = m_arrNext;
115 atomic_marked_ptr * get_tower() const
122 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
123 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
126 /// Access to element of next pointer array
127 atomic_marked_ptr& next( unsigned int nLevel )
129 assert( nLevel < height() );
130 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr) );
132 # ifdef CDS_THREAD_SANITIZER_ENABLED
133 // TSan false positive: m_arrNext is read-only array
134 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
135 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
136 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
139 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
143 /// Access to element of next pointer array (const version)
144 atomic_marked_ptr const& next( unsigned int nLevel ) const
146 assert( nLevel < height() );
147 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
149 # ifdef CDS_THREAD_SANITIZER_ENABLED
150 // TSan false positive: m_arrNext is read-only array
151 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
152 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
153 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
156 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
160 /// Access to element of next pointer array (same as \ref next function)
161 atomic_marked_ptr& operator[]( unsigned int nLevel )
163 return next( nLevel );
166 /// Access to element of next pointer array (same as \ref next function)
167 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
169 return next( nLevel );
172 /// Height of the node
173 unsigned int height() const
178 /// Clears internal links
181 assert( m_arrNext == nullptr );
182 m_pNext.store( marked_ptr(), atomics::memory_order_release );
183 m_pDelChain = nullptr;
186 bool is_cleared() const
188 return m_pNext == atomic_marked_ptr()
189 && m_arrNext == nullptr
193 } // namespace skip_list
197 namespace skip_list { namespace details {
199 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
200 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
203 typedef cds::urcu::gc< RCU > gc;
204 typedef NodeTraits node_traits;
205 typedef BackOff back_off;
206 typedef typename node_traits::node_type node_type;
207 typedef typename node_traits::value_type value_type;
208 static bool const c_isConst = IsConst;
210 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
213 typedef typename node_type::marked_ptr marked_ptr;
214 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
224 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
225 // Current node is marked as deleted. So, its next pointer can point to anything
226 // In this case we interrupt our iteration and returns end() iterator.
231 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
232 node_type * pp = p.ptr();
234 // p is marked as deleted. Spin waiting for physical removal
238 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
239 // p is marked as deleted. Spin waiting for physical removal
249 public: // for internal use only!!!
250 iterator( node_type& refHead )
256 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
262 node_type * pp = p.ptr();
263 // Logically deleted node is marked from highest level
264 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
278 iterator( iterator const& s)
279 : m_pNode( s.m_pNode )
282 value_type * operator ->() const
284 assert( m_pNode != nullptr );
285 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
287 return node_traits::to_value_ptr( m_pNode );
290 value_ref operator *() const
292 assert( m_pNode != nullptr );
293 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
295 return *node_traits::to_value_ptr( m_pNode );
299 iterator& operator ++()
305 iterator& operator = (const iterator& src)
307 m_pNode = src.m_pNode;
311 template <typename Bkoff, bool C>
312 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
314 return m_pNode == i.m_pNode;
316 template <typename Bkoff, bool C>
317 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
319 return !( *this == i );
322 }} // namespace skip_list::details
325 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
326 /** @ingroup cds_intrusive_map
327 @anchor cds_intrusive_SkipListSet_rcu
329 The implementation of well-known probabilistic data structure called skip-list
330 invented by W.Pugh in his papers:
331 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
332 - [1990] W.Pugh A Skip List Cookbook
334 A skip-list is a probabilistic data structure that provides expected logarithmic
335 time search without the need of rebalance. The skip-list is a collection of sorted
336 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
337 Each list has a level, ranging from 0 to 32. The bottom-level list contains
338 all the nodes, and each higher-level list is a sublist of the lower-level lists.
339 Each node is created with a random top level (with a random height), and belongs
340 to all lists up to that level. The probability that a node has the height 1 is 1/2.
341 The probability that a node has the height N is 1/2 ** N (more precisely,
342 the distribution depends on an random generator provided, but our generators
345 The lock-free variant of skip-list is implemented according to book
346 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
347 chapter 14.4 "A Lock-Free Concurrent Skiplist".
349 <b>Template arguments</b>:
350 - \p RCU - one of \ref cds_urcu_gc "RCU type"
351 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
352 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
353 - \p Traits - set traits, default is \p skip_list::traits
354 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
355 instead of \p Traits template argument.
357 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
358 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
362 The class supports a forward iterator (\ref iterator and \ref const_iterator).
363 The iteration is ordered.
365 You may iterate over skip-list set items only under RCU lock.
366 Only in this case the iterator is thread-safe since
367 while RCU is locked any set's item cannot be reclaimed.
369 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
370 (i.e. inserting, erasing and so on) is not possible.
372 @warning The iterator object cannot be passed between threads.
374 Example how to use skip-list set iterators:
376 // First, you should include the header for RCU type you have chosen
377 #include <cds/urcu/general_buffered.h>
378 #include <cds/intrusive/skip_list_rcu.h>
380 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
386 // Traits for your skip-list.
387 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
388 struct my_traits: public cds::intrusive::skip_list::traits
392 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
394 my_skiplist_set theSet;
400 // Apply RCU locking manually
401 typename rcu_type::scoped_lock sl;
403 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
407 // rcu_type::scoped_lock destructor releases RCU lock implicitly
411 The iterator class supports the following minimalistic interface:
418 iterator( iterator const& s);
420 value_type * operator ->() const;
421 value_type& operator *() const;
424 iterator& operator ++();
427 iterator& operator = (const iterator& src);
429 bool operator ==(iterator const& i ) const;
430 bool operator !=(iterator const& i ) const;
433 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
437 You should incorporate skip_list::node into your struct \p T and provide
438 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
439 define a struct based on \p skip_list::traits.
441 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
443 // First, you should include the header for RCU type you have chosen
444 #include <cds/urcu/general_buffered.h>
446 // Include RCU skip-list specialization
447 #include <cds/intrusive/skip_list_rcu.h>
450 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
452 // Data stored in skip list
453 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
462 // my_data compare functor
464 int operator()( const my_data& d1, const my_data& d2 )
466 return d1.strKey.compare( d2.strKey );
469 int operator()( const my_data& d, const std::string& s )
471 return d.strKey.compare(s);
474 int operator()( const std::string& s, const my_data& d )
476 return s.compare( d.strKey );
481 struct my_traits: public cds::intrusive::skip_list::traits
483 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
484 typedef my_data_cmp compare;
487 // Declare skip-list set type
488 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
491 Equivalent option-based code:
493 #include <cds/urcu/general_buffered.h>
494 #include <cds/intrusive/skip_list_rcu.h>
496 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
505 // Declare option-based skip-list set
506 typedef cds::intrusive::SkipListSet< rcu_type
508 , typename cds::intrusive::skip_list::make_traits<
509 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
510 ,cds::intrusive::opt::compare< my_data_cmp >
519 #ifdef CDS_DOXYGEN_INVOKED
520 ,typename Traits = skip_list::traits
525 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
528 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
529 typedef T value_type; ///< type of value stored in the skip-list
530 typedef Traits traits; ///< Traits template parameter
532 typedef typename traits::hook hook; ///< hook type
533 typedef typename hook::node_type node_type; ///< node type
535 # ifdef CDS_DOXYGEN_INVOKED
536 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
538 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
541 typedef typename traits::disposer disposer; ///< disposer
542 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
544 typedef typename traits::item_counter item_counter; ///< Item counting policy used
545 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
546 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
547 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
548 typedef typename traits::back_off back_off; ///< Back-off strategy
549 typedef typename traits::stat stat; ///< internal statistics type
550 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
551 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
552 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
555 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
557 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
558 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
560 static unsigned int const c_nMaxHeight = std::conditional<
561 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
562 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
563 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
567 static unsigned int const c_nMinHeight = 5;
571 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
572 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
576 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
578 typedef typename std::conditional<
579 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
580 ,intrusive_node_builder
581 ,typename traits::internal_node_builder
582 >::type node_builder;
584 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
586 static void dispose_node( value_type * pVal )
590 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal) );
596 void operator()( value_type * pVal )
598 dispose_node( pVal );
602 static void dispose_chain( node_type * pChain )
605 assert( !gc::is_locked() );
607 auto f = [&pChain]() -> cds::urcu::retired_ptr {
608 node_type * p = pChain;
610 pChain = p->m_pDelChain;
611 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
613 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
615 gc::batch_retire(std::ref(f));
620 node_type * pPrev[ c_nMaxHeight ];
621 node_type * pSucc[ c_nMaxHeight ];
622 node_type * pNext[ c_nMaxHeight ];
625 node_type * pDelChain;
628 : pDelChain( nullptr )
633 dispose_chain( pDelChain );
636 void dispose( node_type * p )
638 assert( p != nullptr );
639 assert( p->m_pDelChain == nullptr );
641 p->m_pDelChain = pDelChain;
646 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
650 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
652 item_counter m_ItemCounter; ///< item counter
653 random_level_generator m_RandomLevelGen; ///< random level generator instance
654 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
655 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
656 mutable stat m_Stat; ///< internal statistics
660 unsigned int random_level()
662 // Random generator produces a number from range [0..31]
663 // We need a number from range [1..32]
664 return m_RandomLevelGen() + 1;
667 template <typename Q>
668 node_type * build_node( Q v )
670 return node_builder::make_tower( v, m_RandomLevelGen );
675 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
679 struct chain_disposer {
680 void operator()( node_type * pChain ) const
682 dispose_chain( pChain );
685 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
689 /// Result of \p get(), \p get_with() functions - pointer to the node found
690 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
695 bool is_extracted( marked_node_ptr const p ) const
697 return (p.bits() & 2) != 0;
700 template <typename Q, typename Compare >
701 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
703 assert( gc::is_locked() );
706 marked_node_ptr pSucc;
707 marked_node_ptr pCur;
711 pPred = m_Head.head();
713 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
716 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
718 // pCur.bits() means that pPred is logically deleted
722 if ( pCur.ptr() == nullptr ) {
723 // end of the list at level nLevel - goto next level
727 // pSucc contains deletion mark for pCur
728 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
730 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
733 if ( pSucc.bits() ) {
734 // pCur is marked, i.e. logically deleted.
735 marked_node_ptr p( pCur.ptr() );
738 pCur->m_bUnlinked = true;
740 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
741 memory_model::memory_order_release, atomics::memory_order_relaxed ))
744 if ( !is_extracted( pSucc )) {
745 // We cannot free the node at this moment since RCU is locked
746 // Link deleted nodes to a chain to free later
747 pos.dispose( pCur.ptr() );
748 m_Stat.onEraseWhileFind();
751 m_Stat.onExtractWhileFind();
758 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
761 else if ( nCmp == 0 && bStopIfFound )
769 pos.pPrev[ nLevel ] = pPred;
770 pos.pSucc[ nLevel ] = pCur.ptr();
777 pos.pCur = pCur.ptr();
778 return pCur.ptr() && nCmp == 0;
781 bool find_min_position( position& pos )
783 assert( gc::is_locked() );
786 marked_node_ptr pSucc;
787 marked_node_ptr pCur;
790 pPred = m_Head.head();
792 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
794 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
795 // pCur.bits() means that pPred is logically deleted
796 // head cannot be deleted
797 assert( pCur.bits() == 0 );
801 // pSucc contains deletion mark for pCur
802 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
804 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
807 if ( pSucc.bits() ) {
808 // pCur is marked, i.e. logically deleted.
811 pCur->m_bUnlinked = true;
813 marked_node_ptr p( pCur.ptr() );
814 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
815 memory_model::memory_order_release, atomics::memory_order_relaxed ))
818 if ( !is_extracted( pSucc )) {
819 // We cannot free the node at this moment since RCU is locked
820 // Link deleted nodes to a chain to free later
821 pos.dispose( pCur.ptr() );
822 m_Stat.onEraseWhileFind();
825 m_Stat.onExtractWhileFind();
834 pos.pPrev[ nLevel ] = pPred;
835 pos.pSucc[ nLevel ] = pCur.ptr();
837 return (pos.pCur = pCur.ptr()) != nullptr;
840 bool find_max_position( position& pos )
842 assert( gc::is_locked() );
845 marked_node_ptr pSucc;
846 marked_node_ptr pCur;
849 pPred = m_Head.head();
851 for ( int nLevel = static_cast<int>(c_nMaxHeight - 1); nLevel >= 0; --nLevel ) {
854 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
856 // pCur.bits() means that pPred is logically deleted
860 if ( pCur.ptr() == nullptr ) {
861 // end of the list at level nLevel - goto next level
865 // pSucc contains deletion mark for pCur
866 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
868 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr() )
871 if ( pSucc.bits() ) {
872 // pCur is marked, i.e. logically deleted.
875 pCur->m_bUnlinked = true;
877 marked_node_ptr p( pCur.ptr() );
878 if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
879 memory_model::memory_order_release, atomics::memory_order_relaxed ))
882 if ( !is_extracted( pSucc )) {
883 // We cannot free the node at this moment since RCU is locked
884 // Link deleted nodes to a chain to free later
885 pos.dispose( pCur.ptr() );
886 m_Stat.onEraseWhileFind();
889 m_Stat.onExtractWhileFind();
904 pos.pPrev[ nLevel ] = pPred;
905 pos.pSucc[ nLevel ] = pCur.ptr();
908 return (pos.pCur = pCur.ptr()) != nullptr;
911 template <typename Func>
912 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
914 assert( gc::is_locked() );
916 unsigned int nHeight = pNode->height();
917 pNode->clear_tower();
920 marked_node_ptr p( pos.pSucc[0] );
921 pNode->next( 0 ).store( p, memory_model::memory_order_release );
922 if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
926 pNode->m_bLinked = true;
931 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
934 marked_node_ptr q( pos.pSucc[ nLevel ]);
935 if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
936 // pNode has been marked as removed while we are inserting it
939 m_Stat.onLogicDeleteWhileInsert();
943 if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
946 // Renew insert position
947 m_Stat.onRenewInsertPosition();
948 if ( !find_position( val, pos, key_comparator(), false )) {
949 // The node has been deleted while we are inserting it
950 m_Stat.onNotFoundWhileInsert();
958 template <typename Func>
959 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
961 assert( pDel != nullptr );
962 assert( gc::is_locked() );
964 marked_node_ptr pSucc;
966 // logical deletion (marking)
967 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
968 pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
971 || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
978 pSucc = pDel->next(0).load( memory_model::memory_order_relaxed );
983 int const nMask = bExtract ? 3 : 1;
984 if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
986 f( *node_traits::to_value_ptr( pDel ));
991 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
992 if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
993 marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
994 memory_model::memory_order_release, atomics::memory_order_relaxed) )
997 find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
999 m_Stat.onSlowExtract();
1001 m_Stat.onSlowErase();
1003 assert( pDel->m_bUnlinked );
1010 pDel->m_bUnlinked = true;
1013 // We cannot free the node at this moment since RCU is locked
1014 // Link deleted nodes to a chain to free later
1015 pos.dispose( pDel );
1016 m_Stat.onFastErase();
1019 m_Stat.onFastExtract();
1022 m_Stat.onEraseRetry();
1026 enum finsd_fastpath_result {
1027 find_fastpath_found,
1028 find_fastpath_not_found,
1031 template <typename Q, typename Compare, typename Func>
1032 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1035 marked_node_ptr pCur;
1036 marked_node_ptr pSucc;
1037 marked_node_ptr pNull;
1041 pPred = m_Head.head();
1042 for ( int nLevel = static_cast<int>(m_nHeight.load(memory_model::memory_order_relaxed) - 1); nLevel >= 0; --nLevel ) {
1043 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1044 if ( pCur == pNull )
1047 while ( pCur != pNull ) {
1048 if ( pCur.bits() ) {
1049 // Wait until pCur is removed
1050 unsigned int nAttempt = 0;
1051 while ( pCur.bits() && nAttempt++ < 16 ) {
1053 pCur = pPred->next(nLevel).load( memory_model::memory_order_acquire );
1057 if ( pCur.bits() ) {
1058 // Maybe, we are on deleted node sequence
1059 // Abort searching, try slow-path
1060 return find_fastpath_abort;
1065 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr() ), val );
1068 pCur = pCur->next(nLevel).load( memory_model::memory_order_acquire );
1070 else if ( nCmp == 0 ) {
1072 f( *node_traits::to_value_ptr( pCur.ptr() ), val );
1073 return find_fastpath_found;
1075 else // pCur > val - go down
1081 return find_fastpath_not_found;
1084 template <typename Q, typename Compare, typename Func>
1085 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1087 if ( find_position( val, pos, cmp, true )) {
1088 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1090 f( *node_traits::to_value_ptr( pos.pCur ), val );
1097 template <typename Q, typename Compare, typename Func>
1098 bool do_find_with( Q& val, Compare cmp, Func f )
1101 return do_find_with( val, cmp, f, pos );
1104 template <typename Q, typename Compare, typename Func>
1105 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1112 switch ( find_fastpath( val, cmp, f )) {
1113 case find_fastpath_found:
1114 m_Stat.onFindFastSuccess();
1116 case find_fastpath_not_found:
1117 m_Stat.onFindFastFailed();
1123 if ( find_slowpath( val, cmp, f, pos )) {
1124 m_Stat.onFindSlowSuccess();
1128 m_Stat.onFindSlowFailed();
1135 template <typename Q, typename Compare, typename Func>
1136 bool do_erase( Q const& val, Compare cmp, Func f )
1138 check_deadlock_policy::check();
1146 if ( !find_position( val, pos, cmp, false ) ) {
1147 m_Stat.onEraseFailed();
1151 node_type * pDel = pos.pCur;
1152 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1154 unsigned int nHeight = pDel->height();
1155 if ( try_remove_at( pDel, pos, f, false )) {
1157 m_Stat.onRemoveNode( nHeight );
1158 m_Stat.onEraseSuccess();
1162 m_Stat.onEraseFailed();
1171 template <typename Q, typename Compare>
1172 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1174 // RCU should be locked!!!
1175 assert( gc::is_locked() );
1179 if ( !find_position( key, pos, cmp, false ) ) {
1180 m_Stat.onExtractFailed();
1185 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1187 unsigned int const nHeight = pDel->height();
1189 if ( try_remove_at( pDel, pos, [](value_type const&) {}, true )) {
1191 m_Stat.onRemoveNode( nHeight );
1192 m_Stat.onExtractSuccess();
1195 m_Stat.onExtractFailed();
1200 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1203 template <typename Q>
1204 value_type * do_extract( Q const& key )
1206 check_deadlock_policy::check();
1207 value_type * pDel = nullptr;
1211 pDel = do_extract_key( key, key_comparator(), pos );
1217 template <typename Q, typename Less>
1218 value_type * do_extract_with( Q const& key, Less pred )
1221 check_deadlock_policy::check();
1222 value_type * pDel = nullptr;
1226 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
1232 value_type * do_extract_min()
1234 assert( !gc::is_locked() );
1242 if ( !find_min_position( pos ) ) {
1243 m_Stat.onExtractMinFailed();
1248 unsigned int const nHeight = pDel->height();
1250 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1252 m_Stat.onRemoveNode( nHeight );
1253 m_Stat.onExtractMinSuccess();
1256 m_Stat.onExtractMinFailed();
1262 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1265 value_type * do_extract_max()
1267 assert( !gc::is_locked() );
1275 if ( !find_max_position( pos ) ) {
1276 m_Stat.onExtractMaxFailed();
1281 unsigned int const nHeight = pDel->height();
1283 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true ) ) {
1285 m_Stat.onRemoveNode( nHeight );
1286 m_Stat.onExtractMaxSuccess();
1289 m_Stat.onExtractMaxFailed();
1295 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1298 void increase_height( unsigned int nHeight )
1300 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
1301 if ( nCur < nHeight )
1302 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
1307 /// Default constructor
1309 : m_Head( c_nMaxHeight )
1310 , m_nHeight( c_nMinHeight )
1311 , m_pDeferredDelChain( nullptr )
1313 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
1315 // Barrier for head node
1316 atomics::atomic_thread_fence( memory_model::memory_order_release );
1319 /// Clears and destructs the skip-list
1326 ///@name Forward iterators (thread-safe under RCU lock)
1328 /// Forward iterator
1330 The forward iterator has some features:
1331 - it has no post-increment operator
1332 - it depends on iterator of underlying \p OrderedList
1334 You may safely use iterators in multi-threaded environment only under RCU lock.
1335 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
1337 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
1339 /// Const iterator type
1340 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
1342 /// Returns a forward iterator addressing the first element in a set
1345 return iterator( *m_Head.head() );
1348 /// Returns a forward const iterator addressing the first element in a set
1349 const_iterator begin() const
1351 return const_iterator( *m_Head.head() );
1354 /// Returns a forward const iterator addressing the first element in a set
1355 const_iterator cbegin() const
1357 return const_iterator( *m_Head.head() );
1360 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
1366 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1367 const_iterator end() const
1369 return const_iterator();
1372 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
1373 const_iterator cend() const
1375 return const_iterator();
1380 /// Inserts new node
1382 The function inserts \p val in the set if it does not contain
1383 an item with key equal to \p val.
1385 The function applies RCU lock internally.
1387 Returns \p true if \p val is placed into the set, \p false otherwise.
1389 bool insert( value_type& val )
1391 return insert( val, []( value_type& ) {} );
1394 /// Inserts new node
1396 This function is intended for derived non-intrusive containers.
1398 The function allows to split creating of new item into two part:
1399 - create item with key only
1400 - insert new item into the set
1401 - if inserting is success, calls \p f functor to initialize value-field of \p val.
1403 The functor signature is:
1405 void func( value_type& val );
1407 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
1408 \p val no any other changes could be made on this set's item by concurrent threads.
1409 The user-defined functor is called only if the inserting is success.
1411 RCU \p synchronize method can be called. RCU should not be locked.
1413 template <typename Func>
1414 bool insert( value_type& val, Func f )
1416 check_deadlock_policy::check();
1422 node_type * pNode = node_traits::to_node_ptr( val );
1423 scoped_node_ptr scp( pNode );
1424 unsigned int nHeight = pNode->height();
1425 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1426 bool bTowerMade = false;
1432 bool bFound = find_position( val, pos, key_comparator(), true );
1434 // scoped_node_ptr deletes the node tower if we create it
1438 m_Stat.onInsertFailed();
1444 build_node( pNode );
1445 nHeight = pNode->height();
1450 if ( !insert_at_position( val, pNode, pos, f )) {
1451 m_Stat.onInsertRetry();
1455 increase_height( nHeight );
1457 m_Stat.onAddNode( nHeight );
1458 m_Stat.onInsertSuccess();
1468 /// Updates the node
1470 The operation performs inserting or changing data with lock-free manner.
1472 If the item \p val is not found in the set, then \p val is inserted into the set
1473 iff \p bInsert is \p true.
1474 Otherwise, the functor \p func is called with item found.
1475 The functor signature is:
1477 void func( bool bNew, value_type& item, value_type& val );
1480 - \p bNew - \p true if the item has been inserted, \p false otherwise
1481 - \p item - item of the set
1482 - \p val - argument \p val passed into the \p %update() function
1483 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
1484 refer to the same thing.
1486 The functor can change non-key fields of the \p item; however, \p func must guarantee
1487 that during changing no any other modifications could be made on this item by concurrent threads.
1489 RCU \p synchronize method can be called. RCU should not be locked.
1491 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
1492 i.e. the node has been inserted or updated,
1493 \p second is \p true if new item has been added or \p false if the item with \p key
1496 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
1498 template <typename Func>
1499 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
1501 check_deadlock_policy::check();
1504 std::pair<bool, bool> bRet( true, false );
1507 node_type * pNode = node_traits::to_node_ptr( val );
1508 scoped_node_ptr scp( pNode );
1509 unsigned int nHeight = pNode->height();
1510 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
1511 bool bTowerMade = false;
1516 bool bFound = find_position( val, pos, key_comparator(), true );
1518 // scoped_node_ptr deletes the node tower if we create it before
1522 func( false, *node_traits::to_value_ptr(pos.pCur), val );
1523 m_Stat.onUpdateExist();
1534 build_node( pNode );
1535 nHeight = pNode->height();
1540 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
1541 m_Stat.onInsertRetry();
1545 increase_height( nHeight );
1548 m_Stat.onAddNode( nHeight );
1549 m_Stat.onUpdateNew();
1558 template <typename Func>
1559 CDS_DEPRECATED("ensure() is deprecated, use update()")
1560 std::pair<bool, bool> ensure( value_type& val, Func func )
1562 return update( val, func, true );
1566 /// Unlinks the item \p val from the set
1568 The function searches the item \p val in the set and unlink it from the set
1569 if it is found and is equal to \p val.
1571 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
1572 and deletes the item found. \p %unlink() searches an item by key and deletes it
1573 only if \p val is an item of that set, i.e. the pointer to item found
1574 is equal to <tt> &val </tt>.
1576 RCU \p synchronize method can be called. RCU should not be locked.
1578 The \ref disposer specified in \p Traits class template parameter is called
1579 by garbage collector \p GC asynchronously.
1581 The function returns \p true if success and \p false otherwise.
1583 bool unlink( value_type& val )
1585 check_deadlock_policy::check();
1593 if ( !find_position( val, pos, key_comparator(), false ) ) {
1594 m_Stat.onUnlinkFailed();
1598 node_type * pDel = pos.pCur;
1599 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1601 unsigned int nHeight = pDel->height();
1603 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
1605 m_Stat.onRemoveNode( nHeight );
1606 m_Stat.onUnlinkSuccess();
1610 m_Stat.onUnlinkFailed();
1619 /// Extracts the item from the set with specified \p key
1620 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1621 The function searches an item with key equal to \p key in the set,
1622 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1623 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1625 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1627 RCU \p synchronize method can be called. RCU should NOT be locked.
1628 The function does not call the disposer for the item found.
1629 The disposer will be implicitly invoked when the returned object is destroyed or when
1630 its \p release() member function is called.
1633 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1637 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1642 // Dispose returned item.
1647 template <typename Q>
1648 exempt_ptr extract( Q const& key )
1650 return exempt_ptr( do_extract( key ));
1653 /// Extracts the item from the set with comparing functor \p pred
1655 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1656 \p Less has the interface like \p std::less.
1657 \p pred must imply the same element order as the comparator used for building the set.
1659 template <typename Q, typename Less>
1660 exempt_ptr extract_with( Q const& key, Less pred )
1662 return exempt_ptr( do_extract_with( key, pred ));
1665 /// Extracts an item with minimal key from the list
1667 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1668 If the skip-list is empty the function returns an empty \p exempt_ptr.
1670 RCU \p synchronize method can be called. RCU should NOT be locked.
1671 The function does not call the disposer for the item found.
1672 The disposer will be implicitly invoked when the returned object is destroyed or when
1673 its \p release() member function is manually called.
1676 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1680 typename skip_list::exempt_ptr ep(theList.extract_min());
1685 // Dispose returned item.
1690 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1691 It means that the function gets leftmost item and tries to unlink it.
1692 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1693 So, the function returns the item with minimum key at the moment of list traversing.
1695 exempt_ptr extract_min()
1697 return exempt_ptr( do_extract_min());
1700 /// Extracts an item with maximal key from the list
1702 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1703 If the skip-list is empty the function returns an empty \p exempt_ptr.
1705 RCU \p synchronize method can be called. RCU should NOT be locked.
1706 The function does not call the disposer for the item found.
1707 The disposer will be implicitly invoked when the returned object is destroyed or when
1708 its \p release() member function is manually called.
1711 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1715 typename skip_list::exempt_ptr ep( theList.extract_max() );
1719 // Dispose returned item.
1724 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1725 It means that the function gets rightmost item and tries to unlink it.
1726 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1727 So, the function returns the item with maximum key at the moment of list traversing.
1729 exempt_ptr extract_max()
1731 return exempt_ptr( do_extract_max());
1734 /// Deletes the item from the set
1735 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1736 The function searches an item with key equal to \p key in the set,
1737 unlinks it from the set, and returns \p true.
1738 If the item with key equal to \p key is not found the function return \p false.
1740 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1742 RCU \p synchronize method can be called. RCU should not be locked.
1744 template <typename Q>
1745 bool erase( const Q& key )
1747 return do_erase( key, key_comparator(), [](value_type const&) {} );
1750 /// Delete the item from the set with comparing functor \p pred
1752 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1753 but \p pred predicate is used for key comparing.
1754 \p Less has the interface like \p std::less.
1755 \p pred must imply the same element order as the comparator used for building the set.
1757 template <typename Q, typename Less>
1758 bool erase_with( const Q& key, Less pred )
1761 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1764 /// Deletes the item from the set
1765 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1766 The function searches an item with key equal to \p key in the set,
1767 call \p f functor with item found, unlinks it from the set, and returns \p true.
1768 The \ref disposer specified in \p Traits class template parameter is called
1769 by garbage collector \p GC asynchronously.
1771 The \p Func interface is
1774 void operator()( value_type const& item );
1777 If the item with key equal to \p key is not found the function return \p false.
1779 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1781 RCU \p synchronize method can be called. RCU should not be locked.
1783 template <typename Q, typename Func>
1784 bool erase( Q const& key, Func f )
1786 return do_erase( key, key_comparator(), f );
1789 /// Delete the item from the set with comparing functor \p pred
1791 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1792 but \p pred predicate is used for key comparing.
1793 \p Less has the interface like \p std::less.
1794 \p pred must imply the same element order as the comparator used for building the set.
1796 template <typename Q, typename Less, typename Func>
1797 bool erase_with( Q const& key, Less pred, Func f )
1800 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1804 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1805 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1806 The interface of \p Func functor is:
1809 void operator()( value_type& item, Q& key );
1812 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1814 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1815 that \p item cannot be disposed during functor is executing.
1816 The functor does not serialize simultaneous access to the set \p item. If such access is
1817 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1819 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1820 can modify both arguments.
1822 The function applies RCU lock internally.
1824 The function returns \p true if \p key is found, \p false otherwise.
1826 template <typename Q, typename Func>
1827 bool find( Q& key, Func f )
1829 return do_find_with( key, key_comparator(), f );
1832 template <typename Q, typename Func>
1833 bool find( Q const& key, Func f )
1835 return do_find_with( key, key_comparator(), f );
1839 /// Finds the key \p key with comparing functor \p pred
1841 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1842 but \p cmp is used for key comparison.
1843 \p Less functor has the interface like \p std::less.
1844 \p cmp must imply the same element order as the comparator used for building the set.
1846 template <typename Q, typename Less, typename Func>
1847 bool find_with( Q& key, Less pred, Func f )
1850 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1853 template <typename Q, typename Less, typename Func>
1854 bool find_with( Q const& key, Less pred, Func f )
1857 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1861 /// Checks whether the set contains \p key
1863 The function searches the item with key equal to \p key
1864 and returns \p true if it is found, and \p false otherwise.
1866 The function applies RCU lock internally.
1868 template <typename Q>
1869 bool contains( Q const& key )
1871 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1874 template <typename Q>
1875 CDS_DEPRECATED("deprecated, use contains()")
1876 bool find( Q const& key )
1878 return contains( key );
1882 /// Checks whether the set contains \p key using \p pred predicate for searching
1884 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1885 \p Less functor has the interface like \p std::less.
1886 \p Less must imply the same element order as the comparator used for building the set.
1888 template <typename Q, typename Less>
1889 bool contains( Q const& key, Less pred )
1892 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1895 template <typename Q, typename Less>
1896 CDS_DEPRECATED("deprecated, use contains()")
1897 bool find_with( Q const& key, Less pred )
1899 return contains( key, pred );
1903 /// Finds \p key and return the item found
1904 /** \anchor cds_intrusive_SkipListSet_rcu_get
1905 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1906 If \p key is not found it returns empty \p raw_ptr.
1908 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1910 RCU should be locked before call of this function.
1911 Returned item is valid only while RCU is locked:
1913 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1916 typename skip_list::raw_ptr pVal;
1919 skip_list::rcu_lock lock;
1921 pVal = theList.get( 5 );
1927 // You can manually release pVal after RCU-locked section
1931 template <typename Q>
1932 raw_ptr get( Q const& key )
1934 assert( gc::is_locked());
1937 value_type * pFound;
1938 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1939 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1940 return raw_ptr( raw_ptr_disposer( pos ));
1943 /// Finds \p key and return the item found
1945 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1946 but \p pred is used for comparing the keys.
1948 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1950 \p pred must imply the same element order as the comparator used for building the set.
1952 template <typename Q, typename Less>
1953 raw_ptr get_with( Q const& key, Less pred )
1956 assert( gc::is_locked());
1958 value_type * pFound = nullptr;
1960 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1961 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1963 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1965 return raw_ptr( raw_ptr_disposer( pos ));
1968 /// Returns item count in the set
1970 The value returned depends on item counter type provided by \p Traits template parameter.
1971 For \p atomicity::empty_item_counter the function always returns 0.
1972 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1973 member function for this purpose.
1977 return m_ItemCounter;
1980 /// Checks if the set is empty
1983 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1986 /// Clears the set (not atomic)
1988 The function unlink all items from the set.
1989 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1993 assert( set.empty() );
1995 the assertion could be raised.
1997 For each item the \p disposer will be called automatically after unlinking.
2002 while ( (ep = extract_min()) );
2005 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
2006 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
2008 return c_nMaxHeight;
2011 /// Returns const reference to internal statistics
2012 stat const& statistics() const
2018 }} // namespace cds::intrusive
2021 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H