3 #ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H
4 #define __CDS_INTRUSIVE_SPLIT_LIST_RCU_H
6 #include <cds/intrusive/details/split_list_base.h>
7 #include <cds/details/binary_functor_wrapper.h>
9 namespace cds { namespace intrusive {
11 /// Split-ordered list RCU specialization
12 /** @ingroup cds_intrusive_map
13 \anchor cds_intrusive_SplitListSet_rcu
15 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
16 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
17 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
19 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
20 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
21 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
25 Template parameters are:
26 - \p RCU - one of \ref cds_urcu_gc "RCU type"
27 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
28 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
29 the comparing functor for the type \p T and other features specific for the ordered list.
30 - \p Traits - set traits, default isd \p split_list::traits.
31 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
33 @note About reqired features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
36 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
37 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
38 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
39 MichaelList-based split-list you should include:
41 #include <cds/urcu/general_buffered.h>
42 #include <cds/intrusive/michael_list_rcu.h>
43 #include <cds/intrusive/split_list_rcu.h>
45 // Declare Michael's list for type Foo and default traits:
46 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
48 // Declare split-list based on rcu_michael_list
49 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
56 # ifdef CDS_DOXYGEN_INVOKED
57 class Traits = split_list::traits
62 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
65 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
66 typedef Traits traits; ///< Traits template parameters
68 /// Hash functor for \ref value_type and all its derivatives that you use
69 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
73 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
77 # ifdef CDS_DOXYGEN_INVOKED
78 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
80 typedef typename wrapped_ordered_list::result ordered_list;
82 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
83 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
84 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
85 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
86 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
87 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
88 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
90 typedef typename traits::item_counter item_counter; ///< Item counter type
91 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
92 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
93 typedef typename traits::stat stat; ///< Internal statistics
96 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
97 typedef split_list::node<list_node_type> node_type; ///< split-list node type
98 typedef node_type dummy_node_type; ///< dummy node type
100 /// Split-list node traits
102 This traits is intended for converting between underlying ordered list node type \ref list_node_type
103 and split-list node type \ref node_type
105 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
108 /// Bucket table implementation
109 typedef typename split_list::details::bucket_table_selector<
110 traits::dynamic_bucket_table
113 , opt::allocator< typename traits::allocator >
114 , opt::memory_model< memory_model >
115 >::type bucket_table;
121 /// Ordered list wrapper to access protected members of OrderedList
122 class ordered_list_wrapper: public ordered_list
124 typedef ordered_list base_class;
125 typedef typename base_class::auxiliary_head bucket_head_type;
128 bool insert_at( dummy_node_type * pHead, value_type& val )
130 assert( pHead != nullptr );
131 bucket_head_type h(pHead);
132 return base_class::insert_at( h, val );
135 template <typename Func>
136 bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
138 assert( pHead != nullptr );
139 bucket_head_type h(pHead);
140 return base_class::insert_at( h, val, f );
143 template <typename Func>
144 std::pair<bool, bool> ensure_at( dummy_node_type * pHead, value_type& val, Func func )
146 assert( pHead != nullptr );
147 bucket_head_type h(pHead);
148 return base_class::ensure_at( h, val, func );
151 bool unlink_at( dummy_node_type * pHead, value_type& val )
153 assert( pHead != nullptr );
154 bucket_head_type h(pHead);
155 return base_class::unlink_at( h, val );
158 template <typename Q, typename Compare, typename Func>
159 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
161 assert( pHead != nullptr );
162 bucket_head_type h(pHead);
163 return base_class::erase_at( h, val, cmp, f );
166 template <typename Q, typename Compare>
167 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
169 assert( pHead != nullptr );
170 bucket_head_type h(pHead);
171 return base_class::erase_at( h, val, cmp );
174 template <typename Q, typename Compare>
175 value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
177 assert( pHead != nullptr );
178 bucket_head_type h(pHead);
179 return base_class::extract_at( h, val, cmp );
182 template <typename Q, typename Compare, typename Func>
183 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f ) const
185 assert( pHead != nullptr );
186 bucket_head_type h(pHead);
187 return base_class::find_at( h, val, cmp, f );
190 template <typename Q, typename Compare>
191 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp ) const
193 assert( pHead != nullptr );
194 bucket_head_type h(pHead);
195 return base_class::find_at( h, val, cmp );
198 template <typename Q, typename Compare>
199 value_type * get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp ) const
201 assert( pHead != nullptr );
202 bucket_head_type h(pHead);
203 return base_class::get_at( h, val, cmp );
206 bool insert_aux_node( dummy_node_type * pNode )
208 return base_class::insert_aux_node( pNode );
210 bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode )
212 bucket_head_type h(pHead);
213 return base_class::insert_aux_node( h, pNode );
217 template <typename Less>
218 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
220 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
222 template <typename Q1, typename Q2>
223 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
225 return base_wrapper::operator()( v1.val, v2 );
228 template <typename Q1, typename Q2>
229 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
231 return base_wrapper::operator()( v1, v2.val );
237 ordered_list_wrapper m_List; ///< Ordered list containing split-list items
238 bucket_table m_Buckets; ///< bucket table
239 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
240 item_counter m_ItemCounter; ///< Item counter
241 hash m_HashFunctor; ///< Hash functor
242 stat m_Stat; ///< Internal stattistics accumulator
246 typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator;
248 dummy_node_type * alloc_dummy_node( size_t nHash )
250 m_Stat.onHeadNodeAllocated();
251 return dummy_node_allocator().New( nHash );
253 void free_dummy_node( dummy_node_type * p )
255 dummy_node_allocator().Delete( p );
256 m_Stat.onHeadNodeFreed();
259 /// Calculates hash value of \p key
260 template <typename Q>
261 size_t hash_value( Q const& key ) const
263 return m_HashFunctor( key );
266 size_t bucket_no( size_t nHash ) const
268 return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
271 static size_t parent_bucket( size_t nBucket )
273 assert( nBucket > 0 );
274 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
277 dummy_node_type * init_bucket( size_t nBucket )
279 assert( nBucket > 0 );
280 size_t nParent = parent_bucket( nBucket );
282 dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
283 if ( pParentBucket == nullptr ) {
284 pParentBucket = init_bucket( nParent );
285 m_Stat.onRecursiveInitBucket();
288 assert( pParentBucket != nullptr );
290 // Allocate a dummy node for new bucket
292 dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) );
293 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
294 m_Buckets.bucket( nBucket, pBucket );
295 m_Stat.onNewBucket();
298 free_dummy_node( pBucket );
301 // Another thread set the bucket. Wait while it done
303 // In this point, we must wait while nBucket is empty.
304 // The compiler can decide that waiting loop can be "optimized" (stripped)
305 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
307 m_Stat.onBucketInitContenton();
310 dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
312 return const_cast<dummy_node_type *>( p );
314 m_Stat.onBusyWaitBucketInit();
318 dummy_node_type * get_bucket( size_t nHash )
320 size_t nBucket = bucket_no( nHash );
322 dummy_node_type * pHead = m_Buckets.bucket( nBucket );
323 if ( pHead == nullptr )
324 pHead = init_bucket( nBucket );
326 assert( pHead->is_dummy() );
333 // GC and OrderedList::gc must be the same
334 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
336 // atomicity::empty_item_counter is not allowed as a item counter
337 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
338 "cds::atomicity::empty_item_counter is not allowed as a item counter");
340 // Initialize bucket 0
341 dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ );
343 // insert_aux_node cannot return false for empty list
344 CDS_VERIFY( m_List.insert_aux_node( pNode ));
346 m_Buckets.bucket( 0, pNode );
349 void inc_item_count()
351 size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
352 if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
354 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
358 template <typename Q, typename Compare, typename Func>
359 bool find_( Q& val, Compare cmp, Func f )
361 size_t nHash = hash_value( val );
362 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
363 dummy_node_type * pHead = get_bucket( nHash );
364 assert( pHead != nullptr );
366 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
367 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
370 template <typename Q, typename Compare>
371 bool find_value( Q const& val, Compare cmp )
373 size_t nHash = hash_value( val );
374 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
375 dummy_node_type * pHead = get_bucket( nHash );
376 assert( pHead != nullptr );
378 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
381 template <typename Q, typename Compare>
382 value_type * get_( Q const& val, Compare cmp )
384 size_t nHash = hash_value( val );
385 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
386 dummy_node_type * pHead = get_bucket( nHash );
387 assert( pHead != nullptr );
389 value_type * p = m_List.get_at( pHead, sv, cmp );
390 m_Stat.onFind( p != nullptr );
394 template <typename Q, typename Compare>
395 value_type * extract_( Q const& val, Compare cmp )
397 size_t nHash = hash_value( val );
398 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
399 dummy_node_type * pHead = get_bucket( nHash );
400 assert( pHead != nullptr );
402 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
405 m_Stat.onExtractSuccess();
408 m_Stat.onExtractFailed();
412 template <typename Q, typename Less>
413 value_type * extract_with_( Q const& val, Less pred )
416 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
419 template <typename Q, typename Compare>
420 bool erase_( const Q& val, Compare cmp )
422 size_t nHash = hash_value( val );
423 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
424 dummy_node_type * pHead = get_bucket( nHash );
425 assert( pHead != nullptr );
427 if ( m_List.erase_at( pHead, sv, cmp ) ) {
429 m_Stat.onEraseSuccess();
432 m_Stat.onEraseFailed();
436 template <typename Q, typename Compare, typename Func>
437 bool erase_( Q const& val, Compare cmp, Func f )
439 size_t nHash = hash_value( val );
440 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
441 dummy_node_type * pHead = get_bucket( nHash );
442 assert( pHead != nullptr );
444 if ( m_List.erase_at( pHead, sv, cmp, f )) {
446 m_Stat.onEraseSuccess();
449 m_Stat.onEraseFailed();
456 /// Initialize split-ordered list of default capacity
458 The default capacity is defined in bucket table constructor.
459 See split_list::expandable_bucket_table, split_list::static_ducket_table
460 which selects by split_list::dynamic_bucket_table option.
463 : m_nBucketCountLog2(1)
468 /// Initialize split-ordered list
470 size_t nItemCount ///< estimate average of item count
471 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
473 : m_Buckets( nItemCount, nLoadFactor )
474 , m_nBucketCountLog2(1)
482 The function inserts \p val in the set if it does not contain
483 an item with key equal to \p val.
485 The function makes RCU lock internally.
487 Returns \p true if \p val is placed into the set, \p false otherwise.
489 bool insert( value_type& val )
491 size_t nHash = hash_value( val );
492 dummy_node_type * pHead = get_bucket( nHash );
493 assert( pHead != nullptr );
495 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
497 if ( m_List.insert_at( pHead, val )) {
499 m_Stat.onInsertSuccess();
502 m_Stat.onInsertFailed();
508 This function is intended for derived non-intrusive containers.
510 The function allows to split creating of new item into two part:
511 - create item with key only
512 - insert new item into the set
513 - if inserting is success, calls \p f functor to initialize value-field of \p val.
515 The functor signature is:
517 void func( value_type& val );
519 where \p val is the item inserted.
521 The function makes RCU lock internally.
523 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
524 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
527 template <typename Func>
528 bool insert( value_type& val, Func f )
530 size_t nHash = hash_value( val );
531 dummy_node_type * pHead = get_bucket( nHash );
532 assert( pHead != nullptr );
534 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
536 if ( m_List.insert_at( pHead, val, f )) {
538 m_Stat.onInsertSuccess();
541 m_Stat.onInsertFailed();
545 /// Ensures that the \p val exists in the set
547 The operation performs inserting or changing data with lock-free manner.
549 If the item \p val is not found in the set, then \p val is inserted into the set.
550 Otherwise, the functor \p func is called with item found.
551 The functor signature is:
553 void func( bool bNew, value_type& item, value_type& val );
556 - \p bNew - \p true if the item has been inserted, \p false otherwise
557 - \p item - item of the set
558 - \p val - argument \p val passed into the \p ensure function
559 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
560 refers to the same thing.
562 The function makes RCU lock internally.
564 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
565 \p second is \p true if new item has been added or \p false if the item with \p key
566 already is in the set.
568 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
569 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
572 template <typename Func>
573 std::pair<bool, bool> ensure( value_type& val, Func func )
575 size_t nHash = hash_value( val );
576 dummy_node_type * pHead = get_bucket( nHash );
577 assert( pHead != nullptr );
579 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
581 std::pair<bool, bool> bRet = m_List.ensure_at( pHead, val, func );
582 if ( bRet.first && bRet.second ) {
584 m_Stat.onEnsureNew();
587 m_Stat.onEnsureExist();
591 /// Unlinks the item \p val from the set
593 The function searches the item \p val in the set and unlinks it from the set
594 if it is found and is equal to \p val.
596 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
597 and deletes the item found. \p unlink finds an item by key and deletes it
598 only if \p val is an item of that set, i.e. the pointer to item found
599 is equal to <tt> &val </tt>.
601 RCU \p synchronize method can be called, therefore, RCU should not be locked.
603 The function returns \p true if success and \p false otherwise.
605 bool unlink( value_type& val )
607 size_t nHash = hash_value( val );
608 dummy_node_type * pHead = get_bucket( nHash );
609 assert( pHead != nullptr );
611 if ( m_List.unlink_at( pHead, val ) ) {
613 m_Stat.onEraseSuccess();
616 m_Stat.onEraseFailed();
620 /// Deletes the item from the set
621 /** \anchor cds_intrusive_SplitListSet_rcu_erase
622 The function searches an item with key equal to \p key in the set,
623 unlinks it from the set, and returns \p true.
624 If the item with key equal to \p key is not found the function return \p false.
626 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
627 and deletes the item found. \p unlink finds an item by key and deletes it
628 only if \p key is an item of that set, i.e. the pointer to item found
629 is equal to <tt> &key </tt>.
631 RCU \p synchronize method can be called, therefore, RCU should not be locked.
633 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
635 template <typename Q>
636 bool erase( Q const& key )
638 return erase_( key, key_comparator() );
641 /// Deletes the item from the set using \p pred for searching
643 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
644 but \p cmp is used for key compare.
645 \p Less has the interface like \p std::less.
646 \p pred must imply the same element order as the comparator used for building the set.
648 template <typename Q, typename Less>
649 bool erase_with( Q const& key, Less pred )
652 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
655 /// Deletes the item from the set
656 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
657 The function searches an item with key equal to \p key in the set,
658 call \p f functor with item found, unlinks it from the set, and returns \p true.
659 The \ref disposer specified by \p OrderedList class template parameter is called
660 by garbage collector \p GC asynchronously.
662 The \p Func interface is
665 void operator()( value_type const& item );
669 If the item with key equal to \p key is not found the function return \p false.
671 RCU \p synchronize method can be called, therefore, RCU should not be locked.
673 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
675 template <typename Q, typename Func>
676 bool erase( Q const& key, Func f )
678 return erase_( key, key_comparator(), f );
681 /// Deletes the item from the set using \p pred for searching
683 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
684 but \p cmp is used for key compare.
685 \p Less has the interface like \p std::less.
686 \p pred must imply the same element order as the comparator used for building the set.
688 template <typename Q, typename Less, typename Func>
689 bool erase_with( Q const& key, Less pred, Func f )
692 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
695 /// Extracts an item from the set
696 /** \anchor cds_intrusive_SplitListSet_rcu_extract
697 The function searches an item with key equal to \p key in the set,
698 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
699 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
701 @note The function does NOT call RCU read-side lock or synchronization,
702 and does NOT dispose the item found. It just excludes the item from the set
703 and returns a pointer to item found.
704 You should lock RCU before calling of the function, and you should synchronize RCU
705 outside the RCU lock before reusing returned pointer.
708 typedef cds::urcu::gc< general_buffered<> > rcu;
709 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
710 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
712 rcu_splitlist_set theSet;
715 rcu_splitlist_set::exempt_ptr p;
717 // first, we should lock RCU
718 rcu_splitlist_set::rcu_lock lock;
720 // Now, you can apply extract function
721 // Note that you must not delete the item found inside the RCU lock
722 p = theList.extract( 10 );
724 // do something with p
729 // We may safely release p here
730 // release() passes the pointer to RCU reclamation cycle:
731 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
735 template <typename Q>
736 exempt_ptr extract( Q const& key )
738 return exempt_ptr(extract_( key, key_comparator() ));
741 /// Extracts an item from the set using \p pred for searching
743 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_extract "extract(exempt_ptr&, Q const&)"
744 but \p pred is used for key compare.
745 \p Less functor has the interface like \p std::less.
746 \p pred must imply the same element order as the comparator used for building the set.
748 template <typename Q, typename Less>
749 exempt_ptr extract_with( Q const& key, Less pred )
751 return exempt_ptr( extract_with_( key, pred ));
754 /// Finds the key \p key
755 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
756 The function searches the item with key equal to \p key and calls the functor \p f for item found.
757 The interface of \p Func functor is:
760 void operator()( value_type& item, Q& key );
763 where \p item is the item found, \p key is the <tt>find</tt> function argument.
765 The functor can change non-key fields of \p item. Note that the functor is only guarantee
766 that \p item cannot be disposed during functor is executing.
767 The functor does not serialize simultaneous access to the set \p item. If such access is
768 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
770 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
771 can modify both arguments.
773 Note the hash functor specified for class \p Traits template parameter
774 should accept a parameter of type \p Q that can be not the same as \p value_type.
776 The function applies RCU lock internally.
778 The function returns \p true if \p key is found, \p false otherwise.
780 template <typename Q, typename Func>
781 bool find( Q& key, Func f )
783 return find_( key, key_comparator(), f );
786 template <typename Q, typename Func>
787 bool find( Q const& key, Func f )
789 return find_( key, key_comparator(), f );
793 /// Finds the key \p key with \p pred predicate for comparing
795 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
796 but \p cmp is used for key compare.
797 \p Less has the interface like \p std::less.
798 \p cmp must imply the same element order as the comparator used for building the set.
800 template <typename Q, typename Less, typename Func>
801 bool find_with( Q& key, Less pred, Func f )
804 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
807 template <typename Q, typename Less, typename Func>
808 bool find_with( Q const& key, Less pred, Func f )
811 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
815 /// Finds the key \p key
816 /** \anchor cds_intrusive_SplitListSet_rcu_find_val
817 The function searches the item with key equal to \p key
818 and returns \p true if \p key found or \p false otherwise.
820 template <typename Q>
821 bool find( Q const& key )
823 return find_value( key, key_comparator() );
826 /// Finds the key \p key with \p pred predicate for comparing
828 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)"
829 but \p cmp is used for key compare.
830 \p Less has the interface like \p std::less.
831 \p pred must imply the same element order as the comparator used for building the set.
833 template <typename Q, typename Less>
834 bool find_with( Q const& key, Less pred )
837 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
840 /// Finds the key \p key and return the item found
841 /** \anchor cds_intrusive_SplitListSet_rcu_get
842 The function searches the item with key equal to \p key and returns the pointer to item found.
843 If \p key is not found it returns \p nullptr.
845 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
847 RCU should be locked before call of this function.
848 Returned item is valid only while RCU is locked:
850 cds::intrusive::SplitListSet< your_template_parameters > theSet;
854 hash_set::rcu_lock lock;
856 foo * pVal = theSet.get( 5 );
861 // Unlock RCU by rcu_lock destructor
862 // pVal can be retired by disposer at any time after RCU has been unlocked
866 template <typename Q>
867 value_type * get( Q const& key )
869 return get_( key, key_comparator() );
872 /// Finds the key \p key and return the item found
874 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
875 but \p pred is used for comparing the keys.
877 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
879 \p pred must imply the same element order as the comparator used for building the set.
881 template <typename Q, typename Less>
882 value_type * get_with( Q const& key, Less pred )
885 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
889 /// Returns item count in the set
892 return m_ItemCounter;
895 /// Checks if the set is empty
897 Emptiness is checked by item counting: if item count is zero then the set is empty.
898 Thus, the correct item counting feature is an important part of split-list set implementation.
905 /// Clears the set (not atomic)
908 iterator it = begin();
909 while ( it != end() ) {
917 /// Returns internal statistics
918 stat const& statistics() const
925 template <bool IsConst>
927 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
929 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
930 typedef typename iterator_base_class::list_iterator list_iterator;
933 : iterator_base_class()
936 iterator_type( iterator_type const& src )
937 : iterator_base_class( src )
940 // This ctor should be protected...
941 iterator_type( list_iterator itCur, list_iterator itEnd )
942 : iterator_base_class( itCur, itEnd )
949 The forward iterator for a split-list has some features:
950 - it has no post-increment operator
951 - it depends on iterator of underlying \p OrderedList
952 - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data.
953 - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent
954 deleting operations it is no guarantee that you iterate all item in the split-list.
956 Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container
957 for debug purpose only.
959 typedef iterator_type<false> iterator;
960 /// Const forward iterator
962 For iterator's features and requirements see \ref iterator
964 typedef iterator_type<true> const_iterator;
966 /// Returns a forward iterator addressing the first element in a split-list
968 For empty list \code begin() == end() \endcode
972 return iterator( m_List.begin(), m_List.end() );
975 /// Returns an iterator that addresses the location succeeding the last element in a split-list
977 Do not use the value returned by <tt>end</tt> function to access any item.
979 The returned value can be used only to control reaching the end of the split-list.
980 For empty list \code begin() == end() \endcode
984 return iterator( m_List.end(), m_List.end() );
987 /// Returns a forward const iterator addressing the first element in a split-list
988 const_iterator begin() const
992 /// Returns a forward const iterator addressing the first element in a split-list
993 const_iterator cbegin() const
995 return const_iterator( m_List.cbegin(), m_List.cend() );
998 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
999 const_iterator end() const
1003 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1004 const_iterator cend() const
1006 return const_iterator( m_List.cend(), m_List.cend() );
1011 }} // namespace cds::intrusive
1013 #endif // #ifndef __CDS_INTRUSIVE_SPLIT_LIST_RCU_H