2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
32 #define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H
34 #include <type_traits>
36 #include <cds/intrusive/details/skip_list_base.h>
37 #include <cds/opt/compare.h>
38 #include <cds/urcu/details/check_deadlock.h>
39 #include <cds/details/binary_functor_wrapper.h>
40 #include <cds/urcu/exempt_ptr.h>
41 #include <cds/urcu/raw_ptr.h>
42 #include <cds/intrusive/details/raw_ptr_disposer.h>
44 namespace cds { namespace intrusive {
49 template <class RCU, typename Tag>
50 class node< cds::urcu::gc< RCU >, Tag >
53 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
54 typedef Tag tag ; ///< tag
57 // bit 0 - the item is logically deleted
58 // bit 1 - the item is extracted (only for level 0)
59 typedef cds::details::marked_ptr<node, 3> marked_ptr; ///< marked pointer
60 typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer
61 typedef atomic_marked_ptr tower_item_type;
64 atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0)
66 node * m_pDelChain; ///< Deleted node chain (local for a thread)
68 unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1.
69 atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr
70 atomics::atomic<unsigned> m_nUnlink; ///< Unlink helper
73 /// Constructs a node of height 1 (a bottom-list node)
76 , m_pDelChain( nullptr )
78 , m_arrNext( nullptr )
82 /// Constructs a node of height \p nHeight
83 void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower )
85 assert( nHeight > 0 );
86 assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node
87 || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0
90 m_arrNext = nextTower;
92 m_nUnlink.store( nHeight, atomics::memory_order_release );
95 atomic_marked_ptr * release_tower()
97 atomic_marked_ptr * pTower = m_arrNext;
103 atomic_marked_ptr * get_tower() const
110 for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
111 next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
114 /// Access to element of next pointer array
115 atomic_marked_ptr& next( unsigned int nLevel )
117 assert( nLevel < height());
118 assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr));
120 # ifdef CDS_THREAD_SANITIZER_ENABLED
121 // TSan false positive: m_arrNext is read-only array
122 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
123 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
124 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
127 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
131 /// Access to element of next pointer array (const version)
132 atomic_marked_ptr const& next( unsigned int nLevel ) const
134 assert( nLevel < height());
135 assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr );
137 # ifdef CDS_THREAD_SANITIZER_ENABLED
138 // TSan false positive: m_arrNext is read-only array
139 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
140 atomic_marked_ptr& r = nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
141 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
144 return nLevel ? m_arrNext[ nLevel - 1] : m_pNext;
148 /// Access to element of next pointer array (same as \ref next function)
149 atomic_marked_ptr& operator[]( unsigned int nLevel )
151 return next( nLevel );
154 /// Access to element of next pointer array (same as \ref next function)
155 atomic_marked_ptr const& operator[]( unsigned int nLevel ) const
157 return next( nLevel );
160 /// Height of the node
161 unsigned int height() const
166 /// Clears internal links
169 assert( m_arrNext == nullptr );
170 m_pNext.store( marked_ptr(), atomics::memory_order_release );
171 m_pDelChain = nullptr;
174 bool is_cleared() const
176 return m_pNext == atomic_marked_ptr()
177 && m_arrNext == nullptr
181 bool level_unlinked( unsigned nCount = 1 )
183 return m_nUnlink.fetch_sub( nCount, std::memory_order_relaxed ) == 1;
186 bool is_upper_level( unsigned nLevel ) const
188 return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1;
191 } // namespace skip_list
195 namespace skip_list { namespace details {
197 template <class RCU, typename NodeTraits, typename BackOff, bool IsConst>
198 class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst >
201 typedef cds::urcu::gc< RCU > gc;
202 typedef NodeTraits node_traits;
203 typedef BackOff back_off;
204 typedef typename node_traits::node_type node_type;
205 typedef typename node_traits::value_type value_type;
206 static bool const c_isConst = IsConst;
208 typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref;
211 typedef typename node_type::marked_ptr marked_ptr;
212 typedef typename node_type::atomic_marked_ptr atomic_marked_ptr;
222 if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) {
223 // Current node is marked as deleted. So, its next pointer can point to anything
224 // In this case we interrupt our iteration and returns end() iterator.
229 marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
230 node_type * pp = p.ptr();
232 // p is marked as deleted. Spin waiting for physical removal
236 else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) {
237 // p is marked as deleted. Spin waiting for physical removal
247 public: // for internal use only!!!
248 iterator( node_type& refHead )
254 marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
260 node_type * pp = p.ptr();
261 // Logically deleted node is marked from highest level
262 if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) {
276 iterator( iterator const& s)
277 : m_pNode( s.m_pNode )
280 value_type * operator ->() const
282 assert( m_pNode != nullptr );
283 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
285 return node_traits::to_value_ptr( m_pNode );
288 value_ref operator *() const
290 assert( m_pNode != nullptr );
291 assert( node_traits::to_value_ptr( m_pNode ) != nullptr );
293 return *node_traits::to_value_ptr( m_pNode );
297 iterator& operator ++()
303 iterator& operator = (const iterator& src)
305 m_pNode = src.m_pNode;
309 template <typename Bkoff, bool C>
310 bool operator ==(iterator<gc, node_traits, Bkoff, C> const& i ) const
312 return m_pNode == i.m_pNode;
314 template <typename Bkoff, bool C>
315 bool operator !=(iterator<gc, node_traits, Bkoff, C> const& i ) const
317 return !( *this == i );
320 }} // namespace skip_list::details
323 /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU")
324 /** @ingroup cds_intrusive_map
325 @anchor cds_intrusive_SkipListSet_rcu
327 The implementation of well-known probabilistic data structure called skip-list
328 invented by W.Pugh in his papers:
329 - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees
330 - [1990] W.Pugh A Skip List Cookbook
332 A skip-list is a probabilistic data structure that provides expected logarithmic
333 time search without the need of rebalance. The skip-list is a collection of sorted
334 linked list. Nodes are ordered by key. Each node is linked into a subset of the lists.
335 Each list has a level, ranging from 0 to 32. The bottom-level list contains
336 all the nodes, and each higher-level list is a sublist of the lower-level lists.
337 Each node is created with a random top level (with a random height), and belongs
338 to all lists up to that level. The probability that a node has the height 1 is 1/2.
339 The probability that a node has the height N is 1/2 ** N (more precisely,
340 the distribution depends on an random generator provided, but our generators
343 The lock-free variant of skip-list is implemented according to book
344 - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming",
345 chapter 14.4 "A Lock-Free Concurrent Skiplist".
347 <b>Template arguments</b>:
348 - \p RCU - one of \ref cds_urcu_gc "RCU type"
349 - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook)
350 or it must have a member of type \p skip_list::node (for \p skip_list::member_hook).
351 - \p Traits - set traits, default is \p skip_list::traits
352 It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction
353 instead of \p Traits template argument.
355 @note Before including <tt><cds/intrusive/skip_list_rcu.h></tt> you should include appropriate RCU header file,
356 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
360 The class supports a forward iterator (\ref iterator and \ref const_iterator).
361 The iteration is ordered.
363 You may iterate over skip-list set items only under RCU lock.
364 Only in this case the iterator is thread-safe since
365 while RCU is locked any set's item cannot be reclaimed.
367 @note The requirement of RCU lock during iterating means that any type of modification of the skip list
368 (i.e. inserting, erasing and so on) is not possible.
370 @warning The iterator object cannot be passed between threads.
372 Example how to use skip-list set iterators:
374 // First, you should include the header for RCU type you have chosen
375 #include <cds/urcu/general_buffered.h>
376 #include <cds/intrusive/skip_list_rcu.h>
378 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
384 // Traits for your skip-list.
385 // At least, you should define cds::opt::less or cds::opt::compare for Foo struct
386 struct my_traits: public cds::intrusive::skip_list::traits
390 typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set;
392 my_skiplist_set theSet;
398 // Apply RCU locking manually
399 typename rcu_type::scoped_lock sl;
401 for ( auto it = theList.begin(); it != theList.end(); ++it ) {
405 // rcu_type::scoped_lock destructor releases RCU lock implicitly
409 The iterator class supports the following minimalistic interface:
416 iterator( iterator const& s);
418 value_type * operator ->() const;
419 value_type& operator *() const;
422 iterator& operator ++();
425 iterator& operator = (const iterator& src);
427 bool operator ==(iterator const& i ) const;
428 bool operator !=(iterator const& i ) const;
431 Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced.
435 You should incorporate skip_list::node into your struct \p T and provide
436 appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you
437 define a struct based on \p skip_list::traits.
439 Example for <tt>cds::urcu::general_buffered<></tt> RCU and base hook:
441 // First, you should include the header for RCU type you have chosen
442 #include <cds/urcu/general_buffered.h>
444 // Include RCU skip-list specialization
445 #include <cds/intrusive/skip_list_rcu.h>
448 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
450 // Data stored in skip list
451 struct my_data: public cds::intrusive::skip_list::node< rcu_type >
460 // my_data compare functor
462 int operator()( const my_data& d1, const my_data& d2 )
464 return d1.strKey.compare( d2.strKey );
467 int operator()( const my_data& d, const std::string& s )
469 return d.strKey.compare(s);
472 int operator()( const std::string& s, const my_data& d )
474 return s.compare( d.strKey );
479 struct my_traits: public cds::intrusive::skip_list::traits
481 typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook;
482 typedef my_data_cmp compare;
485 // Declare skip-list set type
486 typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set;
489 Equivalent option-based code:
491 #include <cds/urcu/general_buffered.h>
492 #include <cds/intrusive/skip_list_rcu.h>
494 typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type;
503 // Declare option-based skip-list set
504 typedef cds::intrusive::SkipListSet< rcu_type
506 , typename cds::intrusive::skip_list::make_traits<
507 cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > >
508 ,cds::intrusive::opt::compare< my_data_cmp >
517 #ifdef CDS_DOXYGEN_INVOKED
518 ,typename Traits = skip_list::traits
523 class SkipListSet< cds::urcu::gc< RCU >, T, Traits >
526 typedef cds::urcu::gc< RCU > gc; ///< Garbage collector
527 typedef T value_type; ///< type of value stored in the skip-list
528 typedef Traits traits; ///< Traits template parameter
530 typedef typename traits::hook hook; ///< hook type
531 typedef typename hook::node_type node_type; ///< node type
533 # ifdef CDS_DOXYGEN_INVOKED
534 typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less
536 typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator;
539 typedef typename traits::disposer disposer; ///< disposer
540 typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits
542 typedef typename traits::item_counter item_counter; ///< Item counting policy used
543 typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option
544 typedef typename traits::random_level_generator random_level_generator; ///< random level generator
545 typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node
546 typedef typename traits::back_off back_off; ///< Back-off strategy
547 typedef typename traits::stat stat; ///< internal statistics type
548 typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy
549 typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock
550 static CDS_CONSTEXPR const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking
553 /// Max node height. The actual node height should be in range <tt>[0 .. c_nMaxHeight)</tt>
555 The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound
556 but it should be no more than 32 (\ref skip_list::c_nHeightLimit).
558 static unsigned int const c_nMaxHeight = std::conditional<
559 (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit),
560 std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >,
561 std::integral_constant< unsigned int, skip_list::c_nHeightLimit >
565 static unsigned int const c_nMinHeight = 5;
569 typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer
570 typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer
574 typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder;
576 typedef typename std::conditional<
577 std::is_same< typename traits::internal_node_builder, cds::opt::none >::value
578 ,intrusive_node_builder
579 ,typename traits::internal_node_builder
580 >::type node_builder;
582 typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr;
584 static void dispose_node( value_type * pVal )
588 typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal));
594 void operator()( value_type * pVal )
596 dispose_node( pVal );
600 static void dispose_chain( node_type * pChain )
603 assert( !gc::is_locked());
605 auto f = [&pChain]() -> cds::urcu::retired_ptr {
606 node_type * p = pChain;
608 pChain = p->m_pDelChain;
609 return cds::urcu::make_retired_ptr<node_disposer>( node_traits::to_value_ptr( p ));
611 return cds::urcu::make_retired_ptr<node_disposer>( static_cast<value_type *>(nullptr));
613 gc::batch_retire(std::ref(f));
618 node_type * pPrev[ c_nMaxHeight ];
619 node_type * pSucc[ c_nMaxHeight ];
620 node_type * pNext[ c_nMaxHeight ];
623 node_type * pDelChain;
626 : pDelChain( nullptr )
631 dispose_chain( pDelChain );
634 void dispose( node_type * p )
636 assert( p != nullptr );
637 assert( p->m_pDelChain == nullptr );
639 p->m_pDelChain = pDelChain;
644 typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy;
648 skip_list::details::head_node< node_type > m_Head; ///< head tower (max height)
650 item_counter m_ItemCounter; ///< item counter
651 random_level_generator m_RandomLevelGen; ///< random level generator instance
652 atomics::atomic<unsigned int> m_nHeight; ///< estimated high level
653 atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
654 mutable stat m_Stat; ///< internal statistics
658 unsigned int random_level()
660 // Random generator produces a number from range [0..31]
661 // We need a number from range [1..32]
662 return m_RandomLevelGen() + 1;
665 template <typename Q>
666 node_type * build_node( Q v )
668 return node_builder::make_tower( v, m_RandomLevelGen );
673 using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node
677 struct chain_disposer {
678 void operator()( node_type * pChain ) const
680 dispose_chain( pChain );
683 typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer;
687 /// Result of \p get(), \p get_with() functions - pointer to the node found
688 typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr;
691 /// Default constructor
693 : m_Head( c_nMaxHeight )
694 , m_nHeight( c_nMinHeight )
695 , m_pDeferredDelChain( nullptr )
697 static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
699 // Barrier for head node
700 atomics::atomic_thread_fence( memory_model::memory_order_release );
703 /// Clears and destructs the skip-list
710 ///@name Forward iterators (thread-safe under RCU lock)
714 The forward iterator has some features:
715 - it has no post-increment operator
716 - it depends on iterator of underlying \p OrderedList
718 You may safely use iterators in multi-threaded environment only under RCU lock.
719 Otherwise, a crash is possible if another thread deletes the element the iterator points to.
721 typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator;
723 /// Const iterator type
724 typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator;
726 /// Returns a forward iterator addressing the first element in a set
729 return iterator( *m_Head.head());
732 /// Returns a forward const iterator addressing the first element in a set
733 const_iterator begin() const
735 return const_iterator( *m_Head.head());
738 /// Returns a forward const iterator addressing the first element in a set
739 const_iterator cbegin() const
741 return const_iterator( *m_Head.head());
744 /// Returns a forward iterator that addresses the location succeeding the last element in a set.
750 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
751 const_iterator end() const
753 return const_iterator();
756 /// Returns a forward const iterator that addresses the location succeeding the last element in a set.
757 const_iterator cend() const
759 return const_iterator();
766 The function inserts \p val in the set if it does not contain
767 an item with key equal to \p val.
769 The function applies RCU lock internally.
771 Returns \p true if \p val is placed into the set, \p false otherwise.
773 bool insert( value_type& val )
775 return insert( val, []( value_type& ) {} );
780 This function is intended for derived non-intrusive containers.
782 The function allows to split creating of new item into two part:
783 - create item with key only
784 - insert new item into the set
785 - if inserting is success, calls \p f functor to initialize value-field of \p val.
787 The functor signature is:
789 void func( value_type& val );
791 where \p val is the item inserted. User-defined functor \p f should guarantee that during changing
792 \p val no any other changes could be made on this set's item by concurrent threads.
793 The user-defined functor is called only if the inserting is success.
795 RCU \p synchronize method can be called. RCU should not be locked.
797 template <typename Func>
798 bool insert( value_type& val, Func f )
800 check_deadlock_policy::check();
806 node_type * pNode = node_traits::to_node_ptr( val );
807 scoped_node_ptr scp( pNode );
808 unsigned int nHeight = pNode->height();
809 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
810 bool bTowerMade = false;
816 bool bFound = find_position( val, pos, key_comparator(), true );
818 // scoped_node_ptr deletes the node tower if we create it
822 m_Stat.onInsertFailed();
829 nHeight = pNode->height();
834 if ( !insert_at_position( val, pNode, pos, f )) {
835 m_Stat.onInsertRetry();
839 increase_height( nHeight );
841 m_Stat.onAddNode( nHeight );
842 m_Stat.onInsertSuccess();
854 The operation performs inserting or changing data with lock-free manner.
856 If the item \p val is not found in the set, then \p val is inserted into the set
857 iff \p bInsert is \p true.
858 Otherwise, the functor \p func is called with item found.
859 The functor signature is:
861 void func( bool bNew, value_type& item, value_type& val );
864 - \p bNew - \p true if the item has been inserted, \p false otherwise
865 - \p item - item of the set
866 - \p val - argument \p val passed into the \p %update() function
867 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
868 refer to the same thing.
870 The functor can change non-key fields of the \p item; however, \p func must guarantee
871 that during changing no any other modifications could be made on this item by concurrent threads.
873 RCU \p synchronize method can be called. RCU should not be locked.
875 Returns std::pair<bool, bool> where \p first is \p true if operation is successful,
876 i.e. the node has been inserted or updated,
877 \p second is \p true if new item has been added or \p false if the item with \p key
880 @warning See \ref cds_intrusive_item_creating "insert item troubleshooting"
882 template <typename Func>
883 std::pair<bool, bool> update( value_type& val, Func func, bool bInsert = true )
885 check_deadlock_policy::check();
888 std::pair<bool, bool> bRet( true, false );
891 node_type * pNode = node_traits::to_node_ptr( val );
892 scoped_node_ptr scp( pNode );
893 unsigned int nHeight = pNode->height();
894 bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr;
895 bool bTowerMade = false;
900 bool bFound = find_position( val, pos, key_comparator(), true );
902 // scoped_node_ptr deletes the node tower if we create it before
906 func( false, *node_traits::to_value_ptr(pos.pCur), val );
907 m_Stat.onUpdateExist();
919 nHeight = pNode->height();
924 if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) {
925 m_Stat.onInsertRetry();
929 increase_height( nHeight );
932 m_Stat.onAddNode( nHeight );
933 m_Stat.onUpdateNew();
942 template <typename Func>
943 CDS_DEPRECATED("ensure() is deprecated, use update()")
944 std::pair<bool, bool> ensure( value_type& val, Func func )
946 return update( val, func, true );
950 /// Unlinks the item \p val from the set
952 The function searches the item \p val in the set and unlink it from the set
953 if it is found and is equal to \p val.
955 Difference between \p erase() and \p %unlink() functions: \p erase() finds <i>a key</i>
956 and deletes the item found. \p %unlink() searches an item by key and deletes it
957 only if \p val is an item of that set, i.e. the pointer to item found
958 is equal to <tt> &val </tt>.
960 RCU \p synchronize method can be called. RCU should not be locked.
962 The \ref disposer specified in \p Traits class template parameter is called
963 by garbage collector \p GC asynchronously.
965 The function returns \p true if success and \p false otherwise.
967 bool unlink( value_type& val )
969 check_deadlock_policy::check();
977 if ( !find_position( val, pos, key_comparator(), false )) {
978 m_Stat.onUnlinkFailed();
982 node_type * pDel = pos.pCur;
983 assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 );
985 unsigned int nHeight = pDel->height();
987 if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) {
989 m_Stat.onRemoveNode( nHeight );
990 m_Stat.onUnlinkSuccess();
994 m_Stat.onUnlinkFailed();
1003 /// Extracts the item from the set with specified \p key
1004 /** \anchor cds_intrusive_SkipListSet_rcu_extract
1005 The function searches an item with key equal to \p key in the set,
1006 unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
1007 If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr.
1009 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1011 RCU \p synchronize method can be called. RCU should NOT be locked.
1012 The function does not call the disposer for the item found.
1013 The disposer will be implicitly invoked when the returned object is destroyed or when
1014 its \p release() member function is called.
1017 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1021 typename skip_list::exempt_ptr ep( theList.extract( 5 ));
1026 // Dispose returned item.
1031 template <typename Q>
1032 exempt_ptr extract( Q const& key )
1034 return exempt_ptr( do_extract( key ));
1037 /// Extracts the item from the set with comparing functor \p pred
1039 The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing.
1040 \p Less has the interface like \p std::less.
1041 \p pred must imply the same element order as the comparator used for building the set.
1043 template <typename Q, typename Less>
1044 exempt_ptr extract_with( Q const& key, Less pred )
1046 return exempt_ptr( do_extract_with( key, pred ));
1049 /// Extracts an item with minimal key from the list
1051 The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1052 If the skip-list is empty the function returns an empty \p exempt_ptr.
1054 RCU \p synchronize method can be called. RCU should NOT be locked.
1055 The function does not call the disposer for the item found.
1056 The disposer will be implicitly invoked when the returned object is destroyed or when
1057 its \p release() member function is manually called.
1060 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1064 typename skip_list::exempt_ptr ep(theList.extract_min());
1069 // Dispose returned item.
1074 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> minimum key.
1075 It means that the function gets leftmost item and tries to unlink it.
1076 During unlinking, a concurrent thread may insert an item with key less than leftmost item's key.
1077 So, the function returns the item with minimum key at the moment of list traversing.
1079 exempt_ptr extract_min()
1081 return exempt_ptr( do_extract_min());
1084 /// Extracts an item with maximal key from the list
1086 The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item.
1087 If the skip-list is empty the function returns an empty \p exempt_ptr.
1089 RCU \p synchronize method can be called. RCU should NOT be locked.
1090 The function does not call the disposer for the item found.
1091 The disposer will be implicitly invoked when the returned object is destroyed or when
1092 its \p release() member function is manually called.
1095 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1099 typename skip_list::exempt_ptr ep( theList.extract_max());
1103 // Dispose returned item.
1108 @note Due the concurrent nature of the list, the function extracts <i>nearly</i> maximal key.
1109 It means that the function gets rightmost item and tries to unlink it.
1110 During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key.
1111 So, the function returns the item with maximum key at the moment of list traversing.
1113 exempt_ptr extract_max()
1115 return exempt_ptr( do_extract_max());
1118 /// Deletes the item from the set
1119 /** \anchor cds_intrusive_SkipListSet_rcu_erase
1120 The function searches an item with key equal to \p key in the set,
1121 unlinks it from the set, and returns \p true.
1122 If the item with key equal to \p key is not found the function return \p false.
1124 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1126 RCU \p synchronize method can be called. RCU should not be locked.
1128 template <typename Q>
1129 bool erase( const Q& key )
1131 return do_erase( key, key_comparator(), [](value_type const&) {} );
1134 /// Delete the item from the set with comparing functor \p pred
1136 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)"
1137 but \p pred predicate is used for key comparing.
1138 \p Less has the interface like \p std::less.
1139 \p pred must imply the same element order as the comparator used for building the set.
1141 template <typename Q, typename Less>
1142 bool erase_with( const Q& key, Less pred )
1145 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type const&) {} );
1148 /// Deletes the item from the set
1149 /** \anchor cds_intrusive_SkipListSet_rcu_erase_func
1150 The function searches an item with key equal to \p key in the set,
1151 call \p f functor with item found, unlinks it from the set, and returns \p true.
1152 The \ref disposer specified in \p Traits class template parameter is called
1153 by garbage collector \p GC asynchronously.
1155 The \p Func interface is
1158 void operator()( value_type const& item );
1161 If the item with key equal to \p key is not found the function return \p false.
1163 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1165 RCU \p synchronize method can be called. RCU should not be locked.
1167 template <typename Q, typename Func>
1168 bool erase( Q const& key, Func f )
1170 return do_erase( key, key_comparator(), f );
1173 /// Delete the item from the set with comparing functor \p pred
1175 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)"
1176 but \p pred predicate is used for key comparing.
1177 \p Less has the interface like \p std::less.
1178 \p pred must imply the same element order as the comparator used for building the set.
1180 template <typename Q, typename Less, typename Func>
1181 bool erase_with( Q const& key, Less pred, Func f )
1184 return do_erase( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1188 /** @anchor cds_intrusive_SkipListSet_rcu_find_func
1189 The function searches the item with key equal to \p key and calls the functor \p f for item found.
1190 The interface of \p Func functor is:
1193 void operator()( value_type& item, Q& key );
1196 where \p item is the item found, \p key is the <tt>find</tt> function argument.
1198 The functor can change non-key fields of \p item. Note that the functor is only guarantee
1199 that \p item cannot be disposed during functor is executing.
1200 The functor does not serialize simultaneous access to the set \p item. If such access is
1201 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
1203 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
1204 can modify both arguments.
1206 The function applies RCU lock internally.
1208 The function returns \p true if \p key is found, \p false otherwise.
1210 template <typename Q, typename Func>
1211 bool find( Q& key, Func f )
1213 return do_find_with( key, key_comparator(), f );
1216 template <typename Q, typename Func>
1217 bool find( Q const& key, Func f )
1219 return do_find_with( key, key_comparator(), f );
1223 /// Finds the key \p key with comparing functor \p pred
1225 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)"
1226 but \p cmp is used for key comparison.
1227 \p Less functor has the interface like \p std::less.
1228 \p cmp must imply the same element order as the comparator used for building the set.
1230 template <typename Q, typename Less, typename Func>
1231 bool find_with( Q& key, Less pred, Func f )
1234 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1237 template <typename Q, typename Less, typename Func>
1238 bool find_with( Q const& key, Less pred, Func f )
1241 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), f );
1245 /// Checks whether the set contains \p key
1247 The function searches the item with key equal to \p key
1248 and returns \p true if it is found, and \p false otherwise.
1250 The function applies RCU lock internally.
1252 template <typename Q>
1253 bool contains( Q const& key )
1255 return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} );
1258 template <typename Q>
1259 CDS_DEPRECATED("deprecated, use contains()")
1260 bool find( Q const& key )
1262 return contains( key );
1266 /// Checks whether the set contains \p key using \p pred predicate for searching
1268 The function is similar to <tt>contains( key )</tt> but \p pred is used for key comparing.
1269 \p Less functor has the interface like \p std::less.
1270 \p Less must imply the same element order as the comparator used for building the set.
1272 template <typename Q, typename Less>
1273 bool contains( Q const& key, Less pred )
1276 return do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(), [](value_type& , Q const& ) {} );
1279 template <typename Q, typename Less>
1280 CDS_DEPRECATED("deprecated, use contains()")
1281 bool find_with( Q const& key, Less pred )
1283 return contains( key, pred );
1287 /// Finds \p key and return the item found
1288 /** \anchor cds_intrusive_SkipListSet_rcu_get
1289 The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found.
1290 If \p key is not found it returns empty \p raw_ptr.
1292 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
1294 RCU should be locked before call of this function.
1295 Returned item is valid only while RCU is locked:
1297 typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list;
1300 typename skip_list::raw_ptr pVal;
1303 skip_list::rcu_lock lock;
1305 pVal = theList.get( 5 );
1311 // You can manually release pVal after RCU-locked section
1315 template <typename Q>
1316 raw_ptr get( Q const& key )
1318 assert( gc::is_locked());
1321 value_type * pFound;
1322 if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1323 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1324 return raw_ptr( raw_ptr_disposer( pos ));
1327 /// Finds \p key and return the item found
1329 The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)"
1330 but \p pred is used for comparing the keys.
1332 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
1334 \p pred must imply the same element order as the comparator used for building the set.
1336 template <typename Q, typename Less>
1337 raw_ptr get_with( Q const& key, Less pred )
1340 assert( gc::is_locked());
1342 value_type * pFound = nullptr;
1344 if ( do_find_with( key, cds::opt::details::make_comparator_from_less<Less>(),
1345 [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos ))
1347 return raw_ptr( pFound, raw_ptr_disposer( pos ));
1349 return raw_ptr( raw_ptr_disposer( pos ));
1352 /// Returns item count in the set
1354 The value returned depends on item counter type provided by \p Traits template parameter.
1355 For \p atomicity::empty_item_counter the function always returns 0.
1356 Therefore, the function is not suitable for checking the set emptiness, use \p empty()
1357 member function for this purpose.
1361 return m_ItemCounter;
1364 /// Checks if the set is empty
1367 return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr;
1370 /// Clears the set (not atomic)
1372 The function unlink all items from the set.
1373 The function is not atomic, thus, in multi-threaded environment with parallel insertions
1377 assert( set.empty());
1379 the assertion could be raised.
1381 For each item the \p disposer will be called automatically after unlinking.
1386 while ( (ep = extract_min()));
1389 /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32.
1390 static CDS_CONSTEXPR unsigned int max_height() CDS_NOEXCEPT
1392 return c_nMaxHeight;
1395 /// Returns const reference to internal statistics
1396 stat const& statistics() const
1404 bool is_extracted( marked_node_ptr const p ) const
1406 return ( p.bits() & 2 ) != 0;
1409 void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos )
1411 marked_node_ptr p( pCur.ptr());
1413 if ( pCur->is_upper_level( nLevel )
1414 && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
1415 memory_model::memory_order_release, atomics::memory_order_relaxed ))
1417 if ( pCur->level_unlinked()) {
1418 if ( !is_extracted( pSucc )) {
1419 // We cannot free the node at this moment because RCU is locked
1420 // Link deleted nodes to a chain to free later
1421 pos.dispose( pCur.ptr());
1422 m_Stat.onEraseWhileFind();
1425 m_Stat.onExtractWhileFind();
1430 template <typename Q, typename Compare >
1431 bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound )
1433 assert( gc::is_locked());
1436 marked_node_ptr pSucc;
1437 marked_node_ptr pCur;
1441 pPred = m_Head.head();
1443 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1446 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1448 // pCur.bits() means that pPred is logically deleted
1452 if ( pCur.ptr() == nullptr ) {
1453 // end of the list at level nLevel - goto next level
1457 // pSucc contains deletion mark for pCur
1458 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1460 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1463 if ( pSucc.bits()) {
1464 // pCur is marked, i.e. logically deleted.
1465 help_remove( nLevel, pPred, pCur, pSucc, pos );
1469 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
1472 else if ( nCmp == 0 && bStopIfFound )
1480 pos.pPrev[nLevel] = pPred;
1481 pos.pSucc[nLevel] = pCur.ptr();
1488 pos.pCur = pCur.ptr();
1489 return pCur.ptr() && nCmp == 0;
1492 bool find_min_position( position& pos )
1494 assert( gc::is_locked());
1497 marked_node_ptr pSucc;
1498 marked_node_ptr pCur;
1501 pPred = m_Head.head();
1503 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1505 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1506 // pCur.bits() means that pPred is logically deleted
1507 // head cannot be deleted
1508 assert( pCur.bits() == 0 );
1512 // pSucc contains deletion mark for pCur
1513 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1515 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1518 if ( pSucc.bits()) {
1519 // pCur is marked, i.e. logically deleted.
1520 help_remove( nLevel, pPred, pCur, pSucc, pos );
1526 pos.pPrev[nLevel] = pPred;
1527 pos.pSucc[nLevel] = pCur.ptr();
1529 return ( pos.pCur = pCur.ptr()) != nullptr;
1532 bool find_max_position( position& pos )
1534 assert( gc::is_locked());
1537 marked_node_ptr pSucc;
1538 marked_node_ptr pCur;
1541 pPred = m_Head.head();
1543 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1546 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1548 // pCur.bits() means that pPred is logically deleted
1552 if ( pCur.ptr() == nullptr ) {
1553 // end of the list at level nLevel - goto next level
1557 // pSucc contains deletion mark for pCur
1558 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1560 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1563 if ( pSucc.bits()) {
1564 // pCur is marked, i.e. logically deleted.
1565 help_remove( nLevel, pPred, pCur, pSucc, pos );
1577 pos.pPrev[nLevel] = pPred;
1578 pos.pSucc[nLevel] = pCur.ptr();
1581 return ( pos.pCur = pCur.ptr()) != nullptr;
1584 bool renew_insert_position( value_type& val, node_type * pNode, position& pos )
1586 assert( gc::is_locked());
1589 marked_node_ptr pSucc;
1590 marked_node_ptr pCur;
1595 pPred = m_Head.head();
1597 for ( int nLevel = static_cast<int>( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) {
1600 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1602 // pCur.bits() means that pPred is logically deleted
1606 if ( pCur.ptr() == nullptr ) {
1607 // end of the list at level nLevel - goto next level
1611 // pSucc contains deletion mark for pCur
1612 pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1614 if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr())
1617 if ( pSucc.bits()) {
1618 // pCur is marked, i.e. logically deleted.
1619 if ( pCur.ptr() == pNode ) {
1620 // Node is removing while we are inserting it
1624 // try to help deleting pCur
1625 help_remove( nLevel, pPred, pCur, pSucc, pos );
1629 nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
1638 pos.pPrev[nLevel] = pPred;
1639 pos.pSucc[nLevel] = pCur.ptr();
1645 template <typename Func>
1646 bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f )
1648 assert( gc::is_locked());
1650 unsigned int const nHeight = pNode->height();
1651 pNode->clear_tower();
1653 // Insert at level 0
1655 marked_node_ptr p( pos.pSucc[0] );
1656 pNode->next( 0 ).store( p, memory_model::memory_order_relaxed );
1657 if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
1663 // Insert at level 1..max
1664 for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) {
1667 marked_node_ptr pSucc( pos.pSucc[nLevel] );
1670 // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now
1671 if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc,
1672 memory_model::memory_order_acq_rel, atomics::memory_order_acquire ))
1674 // pNode has been marked as removed while we are inserting it
1676 assert( p.bits() != 0 );
1678 // Here pNode is linked at least level 0 so level_unlinked() cannot returns true
1679 CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
1681 // pNode is linked up to nLevel - 1
1682 // Remove it via find_position()
1683 find_position( val, pos, key_comparator(), false );
1685 m_Stat.onLogicDeleteWhileInsert();
1690 // Link pNode into the list at nLevel
1691 if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ),
1692 memory_model::memory_order_release, atomics::memory_order_relaxed ))
1698 // Renew insert position
1699 m_Stat.onRenewInsertPosition();
1701 if ( !renew_insert_position( val, pNode, pos )) {
1702 // The node has been deleted while we are inserting it
1703 // Update current height for concurent removing
1704 CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel ));
1706 m_Stat.onRemoveWhileInsert();
1708 // help to removing val
1709 find_position( val, pos, key_comparator(), false );
1717 template <typename Func>
1718 bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract )
1720 assert( pDel != nullptr );
1721 assert( gc::is_locked());
1723 marked_node_ptr pSucc;
1726 unsigned const nMask = bExtract ? 3u : 1u;
1728 // logical deletion (marking)
1729 for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) {
1730 pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed );
1731 if ( pSucc.bits() == 0 ) {
1733 while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask,
1734 memory_model::memory_order_release, atomics::memory_order_acquire ))
1736 if ( pSucc.bits() == 0 ) {
1738 m_Stat.onMarkFailed();
1740 else if ( pSucc.bits() != nMask )
1746 marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr());
1748 if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire ))
1750 f( *node_traits::to_value_ptr( pDel ));
1752 // physical deletion
1755 for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
1757 pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire );
1758 if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()),
1759 memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
1761 pDel->level_unlinked();
1766 if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ))
1767 assert( pDel != pos.pCur );
1769 find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
1772 m_Stat.onSlowExtract();
1774 m_Stat.onSlowErase();
1780 // Fast erasing success
1782 // We cannot free the node at this moment since RCU is locked
1783 // Link deleted nodes to a chain to free later
1784 pos.dispose( pDel );
1785 m_Stat.onFastErase();
1788 m_Stat.onFastExtract();
1791 else if ( p.bits()) {
1792 // Another thread is deleting pDel right now
1793 m_Stat.onEraseContention();
1797 m_Stat.onEraseRetry();
1802 enum finsd_fastpath_result {
1803 find_fastpath_found,
1804 find_fastpath_not_found,
1807 template <typename Q, typename Compare, typename Func>
1808 finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const
1811 marked_node_ptr pCur;
1812 marked_node_ptr pSucc;
1813 marked_node_ptr pNull;
1816 unsigned attempt = 0;
1819 pPred = m_Head.head();
1820 for ( int nLevel = static_cast<int>( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) {
1821 pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire );
1823 while ( pCur != pNull ) {
1825 // pPred is being removed
1826 if ( ++attempt < 4 ) {
1831 return find_fastpath_abort;
1835 int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val );
1838 pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire );
1840 else if ( nCmp == 0 ) {
1842 f( *node_traits::to_value_ptr( pCur.ptr()), val );
1843 return find_fastpath_found;
1845 else // pCur > val - go down
1851 return find_fastpath_not_found;
1854 template <typename Q, typename Compare, typename Func>
1855 bool find_slowpath( Q& val, Compare cmp, Func f, position& pos )
1857 if ( find_position( val, pos, cmp, true )) {
1858 assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 );
1860 f( *node_traits::to_value_ptr( pos.pCur ), val );
1867 template <typename Q, typename Compare, typename Func>
1868 bool do_find_with( Q& val, Compare cmp, Func f )
1871 return do_find_with( val, cmp, f, pos );
1874 template <typename Q, typename Compare, typename Func>
1875 bool do_find_with( Q& val, Compare cmp, Func f, position& pos )
1882 switch ( find_fastpath( val, cmp, f )) {
1883 case find_fastpath_found:
1884 m_Stat.onFindFastSuccess();
1886 case find_fastpath_not_found:
1887 m_Stat.onFindFastFailed();
1893 if ( find_slowpath( val, cmp, f, pos )) {
1894 m_Stat.onFindSlowSuccess();
1898 m_Stat.onFindSlowFailed();
1905 template <typename Q, typename Compare, typename Func>
1906 bool do_erase( Q const& val, Compare cmp, Func f )
1908 check_deadlock_policy::check();
1916 if ( !find_position( val, pos, cmp, false )) {
1917 m_Stat.onEraseFailed();
1921 node_type * pDel = pos.pCur;
1922 assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 );
1924 unsigned int nHeight = pDel->height();
1925 if ( try_remove_at( pDel, pos, f, false )) {
1927 m_Stat.onRemoveNode( nHeight );
1928 m_Stat.onEraseSuccess();
1932 m_Stat.onEraseFailed();
1941 template <typename Q, typename Compare>
1942 value_type * do_extract_key( Q const& key, Compare cmp, position& pos )
1944 // RCU should be locked!!!
1945 assert( gc::is_locked());
1949 if ( !find_position( key, pos, cmp, false )) {
1950 m_Stat.onExtractFailed();
1955 assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 );
1957 unsigned int const nHeight = pDel->height();
1959 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
1961 m_Stat.onRemoveNode( nHeight );
1962 m_Stat.onExtractSuccess();
1965 m_Stat.onExtractFailed();
1970 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
1973 template <typename Q>
1974 value_type * do_extract( Q const& key )
1976 check_deadlock_policy::check();
1977 value_type * pDel = nullptr;
1981 pDel = do_extract_key( key, key_comparator(), pos );
1987 template <typename Q, typename Less>
1988 value_type * do_extract_with( Q const& key, Less pred )
1991 check_deadlock_policy::check();
1992 value_type * pDel = nullptr;
1996 pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less<Less>(), pos );
2002 value_type * do_extract_min()
2004 assert( !gc::is_locked());
2012 if ( !find_min_position( pos )) {
2013 m_Stat.onExtractMinFailed();
2018 unsigned int const nHeight = pDel->height();
2020 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
2022 m_Stat.onRemoveNode( nHeight );
2023 m_Stat.onExtractMinSuccess();
2026 m_Stat.onExtractMinFailed();
2032 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
2035 value_type * do_extract_max()
2037 assert( !gc::is_locked());
2045 if ( !find_max_position( pos )) {
2046 m_Stat.onExtractMaxFailed();
2051 unsigned int const nHeight = pDel->height();
2053 if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) {
2055 m_Stat.onRemoveNode( nHeight );
2056 m_Stat.onExtractMaxSuccess();
2059 m_Stat.onExtractMaxFailed();
2065 return pDel ? node_traits::to_value_ptr( pDel ) : nullptr;
2068 void increase_height( unsigned int nHeight )
2070 unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
2071 if ( nCur < nHeight )
2072 m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
2077 node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
2079 node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr();
2080 dispose_node( node_traits::to_value_ptr( p ));
2088 }} // namespace cds::intrusive
2091 #endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H