3 #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
4 #define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H
8 #include <cds/intrusive/details/split_list_base.h>
9 #include <cds/details/binary_functor_wrapper.h>
11 namespace cds { namespace intrusive {
13 /// Split-ordered list RCU specialization
14 /** @ingroup cds_intrusive_map
15 \anchor cds_intrusive_SplitListSet_rcu
17 Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see
18 - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"
19 - [2008] Nir Shavit "The Art of Multiprocessor Programming"
21 The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original
22 recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets
23 without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description".
27 Template parameters are:
28 - \p RCU - one of \ref cds_urcu_gc "RCU type"
29 - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList.
30 The intrusive ordered list implementation specifies the type \p T stored in the hash-set,
31 the comparing functor for the type \p T and other features specific for the ordered list.
32 - \p Traits - set traits, default isd \p split_list::traits.
33 Instead of defining \p Traits struct you may use option-based syntax with \p split_list::make_traits metafunction.
35 @note About reqired features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description".
38 Before including <tt><cds/intrusive/split_list_rcu.h></tt> you should include appropriate RCU header file,
39 see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files.
40 For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and
41 MichaelList-based split-list you should include:
43 #include <cds/urcu/general_buffered.h>
44 #include <cds/intrusive/michael_list_rcu.h>
45 #include <cds/intrusive/split_list_rcu.h>
47 // Declare Michael's list for type Foo and default traits:
48 typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list;
50 // Declare split-list based on rcu_michael_list
51 typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list;
58 # ifdef CDS_DOXYGEN_INVOKED
59 class Traits = split_list::traits
64 class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits >
67 typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector
68 typedef Traits traits; ///< Traits template parameters
70 /// Hash functor for \ref value_type and all its derivatives that you use
71 typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash;
74 typedef cds::intrusive::split_list::implementation_tag implementation_tag;
79 typedef split_list::details::rebind_list_traits<OrderedList, traits> wrapped_ordered_list;
83 # ifdef CDS_DOXYGEN_INVOKED
84 typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list
86 typedef typename wrapped_ordered_list::result ordered_list;
88 typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list
89 typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor
90 typedef typename ordered_list::disposer disposer; ///< Node disposer functor
91 typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock
92 typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node
93 typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function
94 /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that
95 static CDS_CONSTEXPR const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal;
97 typedef typename traits::item_counter item_counter; ///< Item counter type
98 typedef typename traits::back_off back_off; ///< back-off strategy for spinning
99 typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option
100 typedef typename traits::stat stat; ///< Internal statistics
103 typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list
104 typedef split_list::node<list_node_type> node_type; ///< split-list node type
105 typedef node_type dummy_node_type; ///< dummy node type
107 /// Split-list node traits
109 This traits is intended for converting between underlying ordered list node type \ref list_node_type
110 and split-list node type \ref node_type
112 typedef split_list::node_traits<typename ordered_list::node_traits> node_traits;
115 /// Bucket table implementation
116 typedef typename split_list::details::bucket_table_selector<
117 traits::dynamic_bucket_table
120 , opt::allocator< typename traits::allocator >
121 , opt::memory_model< memory_model >
122 >::type bucket_table;
128 /// Ordered list wrapper to access protected members of OrderedList
129 class ordered_list_wrapper: public ordered_list
131 typedef ordered_list base_class;
132 typedef typename base_class::auxiliary_head bucket_head_type;
135 bool insert_at( dummy_node_type * pHead, value_type& val )
137 assert( pHead != nullptr );
138 bucket_head_type h(pHead);
139 return base_class::insert_at( h, val );
142 template <typename Func>
143 bool insert_at( dummy_node_type * pHead, value_type& val, Func f )
145 assert( pHead != nullptr );
146 bucket_head_type h(pHead);
147 return base_class::insert_at( h, val, f );
150 template <typename Func>
151 std::pair<bool, bool> ensure_at( dummy_node_type * pHead, value_type& val, Func func )
153 assert( pHead != nullptr );
154 bucket_head_type h(pHead);
155 return base_class::ensure_at( h, val, func );
158 bool unlink_at( dummy_node_type * pHead, value_type& val )
160 assert( pHead != nullptr );
161 bucket_head_type h(pHead);
162 return base_class::unlink_at( h, val );
165 template <typename Q, typename Compare, typename Func>
166 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp, Func f )
168 assert( pHead != nullptr );
169 bucket_head_type h(pHead);
170 return base_class::erase_at( h, val, cmp, f );
173 template <typename Q, typename Compare>
174 bool erase_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const& val, Compare cmp )
176 assert( pHead != nullptr );
177 bucket_head_type h(pHead);
178 return base_class::erase_at( h, val, cmp );
181 template <typename Q, typename Compare>
182 value_type * extract_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
184 assert( pHead != nullptr );
185 bucket_head_type h(pHead);
186 return base_class::extract_at( h, val, cmp );
189 template <typename Q, typename Compare, typename Func>
190 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp, Func f )
192 assert( pHead != nullptr );
193 bucket_head_type h(pHead);
194 return base_class::find_at( h, val, cmp, f );
197 template <typename Q, typename Compare>
198 bool find_at( dummy_node_type * pHead, split_list::details::search_value_type<Q> const & val, Compare cmp )
200 assert( pHead != nullptr );
201 bucket_head_type h(pHead);
202 return base_class::find_at( h, val, cmp );
205 template <typename Q, typename Compare>
206 raw_ptr get_at( dummy_node_type * pHead, split_list::details::search_value_type<Q>& val, Compare cmp )
208 assert( pHead != nullptr );
209 bucket_head_type h(pHead);
210 return base_class::get_at( h, val, cmp );
213 bool insert_aux_node( dummy_node_type * pNode )
215 return base_class::insert_aux_node( pNode );
217 bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode )
219 bucket_head_type h(pHead);
220 return base_class::insert_aux_node( h, pNode );
224 template <typename Less>
225 struct less_wrapper: public cds::opt::details::make_comparator_from_less<Less>
227 typedef cds::opt::details::make_comparator_from_less<Less> base_wrapper;
229 template <typename Q1, typename Q2>
230 int operator()( split_list::details::search_value_type<Q1> const& v1, Q2 const& v2 ) const
232 return base_wrapper::operator()( v1.val, v2 );
235 template <typename Q1, typename Q2>
236 int operator()( Q1 const& v1, split_list::details::search_value_type<Q2> const& v2 ) const
238 return base_wrapper::operator()( v1, v2.val );
244 ordered_list_wrapper m_List; ///< Ordered list containing split-list items
245 bucket_table m_Buckets; ///< bucket table
246 atomics::atomic<size_t> m_nBucketCountLog2; ///< log2( current bucket count )
247 atomics::atomic<size_t> m_nMaxItemCount; ///< number of items container can hold, before we have to resize
248 item_counter m_ItemCounter; ///< Item counter
249 hash m_HashFunctor; ///< Hash functor
250 stat m_Stat; ///< Internal stattistics accumulator
254 typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator;
256 dummy_node_type * alloc_dummy_node( size_t nHash )
258 m_Stat.onHeadNodeAllocated();
259 return dummy_node_allocator().New( nHash );
261 void free_dummy_node( dummy_node_type * p )
263 dummy_node_allocator().Delete( p );
264 m_Stat.onHeadNodeFreed();
267 /// Calculates hash value of \p key
268 template <typename Q>
269 size_t hash_value( Q const& key ) const
271 return m_HashFunctor( key );
274 size_t bucket_no( size_t nHash ) const
276 return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 );
279 static size_t parent_bucket( size_t nBucket )
281 assert( nBucket > 0 );
282 return nBucket & ~( 1 << bitop::MSBnz( nBucket ) );
285 dummy_node_type * init_bucket( size_t nBucket )
287 assert( nBucket > 0 );
288 size_t nParent = parent_bucket( nBucket );
290 dummy_node_type * pParentBucket = m_Buckets.bucket( nParent );
291 if ( pParentBucket == nullptr ) {
292 pParentBucket = init_bucket( nParent );
293 m_Stat.onRecursiveInitBucket();
296 assert( pParentBucket != nullptr );
298 // Allocate a dummy node for new bucket
300 dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) );
301 if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) {
302 m_Buckets.bucket( nBucket, pBucket );
303 m_Stat.onNewBucket();
306 free_dummy_node( pBucket );
309 // Another thread set the bucket. Wait while it done
311 // In this point, we must wait while nBucket is empty.
312 // The compiler can decide that waiting loop can be "optimized" (stripped)
313 // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer.
315 m_Stat.onBucketInitContenton();
318 dummy_node_type volatile * p = m_Buckets.bucket( nBucket );
320 return const_cast<dummy_node_type *>( p );
322 m_Stat.onBusyWaitBucketInit();
326 dummy_node_type * get_bucket( size_t nHash )
328 size_t nBucket = bucket_no( nHash );
330 dummy_node_type * pHead = m_Buckets.bucket( nBucket );
331 if ( pHead == nullptr )
332 pHead = init_bucket( nBucket );
334 assert( pHead->is_dummy() );
341 // GC and OrderedList::gc must be the same
342 static_assert( std::is_same<gc, typename ordered_list::gc>::value, "GC and OrderedList::gc must be the same");
344 // atomicity::empty_item_counter is not allowed as a item counter
345 static_assert( !std::is_same<item_counter, cds::atomicity::empty_item_counter>::value,
346 "cds::atomicity::empty_item_counter is not allowed as a item counter");
348 // Initialize bucket 0
349 dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ );
351 // insert_aux_node cannot return false for empty list
352 CDS_VERIFY( m_List.insert_aux_node( pNode ));
354 m_Buckets.bucket( 0, pNode );
357 static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor )
359 return nBucketCount * nLoadFactor;
362 void inc_item_count()
364 size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed);
365 if ( ++m_ItemCounter <= nMaxCount )
368 size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed);
369 const size_t nBucketCount = static_cast<size_t>(1) << sz;
370 if ( nBucketCount < m_Buckets.capacity() ) {
371 // we may grow the bucket table
372 const size_t nLoadFactor = m_Buckets.load_factor();
373 if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor ))
374 return; // someone already have updated m_nBucketCountLog2, so stop here
376 m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ),
377 memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
378 m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed );
381 m_nMaxItemCount.store( std::numeric_limits<size_t>::max(), memory_model::memory_order_relaxed );
384 template <typename Q, typename Compare, typename Func>
385 bool find_( Q& val, Compare cmp, Func f )
387 size_t nHash = hash_value( val );
388 split_list::details::search_value_type<Q> sv( val, split_list::regular_hash( nHash ));
389 dummy_node_type * pHead = get_bucket( nHash );
390 assert( pHead != nullptr );
392 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp,
393 [&f](value_type& item, split_list::details::search_value_type<Q>& val){ f(item, val.val ); }));
396 template <typename Q, typename Compare>
397 bool find_value( Q const& val, Compare cmp )
399 size_t nHash = hash_value( val );
400 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
401 dummy_node_type * pHead = get_bucket( nHash );
402 assert( pHead != nullptr );
404 return m_Stat.onFind( m_List.find_at( pHead, sv, cmp ));
407 template <typename Q, typename Compare>
408 raw_ptr get_( Q const& val, Compare cmp )
410 size_t nHash = hash_value( val );
411 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
412 dummy_node_type * pHead = get_bucket( nHash );
413 assert( pHead != nullptr );
415 raw_ptr p = m_List.get_at( pHead, sv, cmp );
416 m_Stat.onFind( !!p );
420 template <typename Q, typename Compare>
421 value_type * extract_( Q const& val, Compare cmp )
423 size_t nHash = hash_value( val );
424 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
425 dummy_node_type * pHead = get_bucket( nHash );
426 assert( pHead != nullptr );
428 value_type * pNode = m_List.extract_at( pHead, sv, cmp );
431 m_Stat.onExtractSuccess();
434 m_Stat.onExtractFailed();
438 template <typename Q, typename Less>
439 value_type * extract_with_( Q const& val, Less pred )
442 return extract_( val, typename wrapped_ordered_list::template make_compare_from_less<Less>());
445 template <typename Q, typename Compare>
446 bool erase_( const Q& val, Compare cmp )
448 size_t nHash = hash_value( val );
449 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
450 dummy_node_type * pHead = get_bucket( nHash );
451 assert( pHead != nullptr );
453 if ( m_List.erase_at( pHead, sv, cmp ) ) {
455 m_Stat.onEraseSuccess();
458 m_Stat.onEraseFailed();
462 template <typename Q, typename Compare, typename Func>
463 bool erase_( Q const& val, Compare cmp, Func f )
465 size_t nHash = hash_value( val );
466 split_list::details::search_value_type<Q const> sv( val, split_list::regular_hash( nHash ));
467 dummy_node_type * pHead = get_bucket( nHash );
468 assert( pHead != nullptr );
470 if ( m_List.erase_at( pHead, sv, cmp, f )) {
472 m_Stat.onEraseSuccess();
475 m_Stat.onEraseFailed();
482 /// Initialize split-ordered list of default capacity
484 The default capacity is defined in bucket table constructor.
485 See split_list::expandable_bucket_table, split_list::static_ducket_table
486 which selects by split_list::dynamic_bucket_table option.
489 : m_nBucketCountLog2(1)
490 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
495 /// Initialize split-ordered list
497 size_t nItemCount ///< estimate average of item count
498 , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1.
500 : m_Buckets( nItemCount, nLoadFactor )
501 , m_nBucketCountLog2(1)
502 , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) )
510 The function inserts \p val in the set if it does not contain
511 an item with key equal to \p val.
513 The function makes RCU lock internally.
515 Returns \p true if \p val is placed into the set, \p false otherwise.
517 bool insert( value_type& val )
519 size_t nHash = hash_value( val );
520 dummy_node_type * pHead = get_bucket( nHash );
521 assert( pHead != nullptr );
523 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
525 if ( m_List.insert_at( pHead, val )) {
527 m_Stat.onInsertSuccess();
530 m_Stat.onInsertFailed();
536 This function is intended for derived non-intrusive containers.
538 The function allows to split creating of new item into two part:
539 - create item with key only
540 - insert new item into the set
541 - if inserting is success, calls \p f functor to initialize value-field of \p val.
543 The functor signature is:
545 void func( value_type& val );
547 where \p val is the item inserted.
549 The function makes RCU lock internally.
551 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
552 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
555 template <typename Func>
556 bool insert( value_type& val, Func f )
558 size_t nHash = hash_value( val );
559 dummy_node_type * pHead = get_bucket( nHash );
560 assert( pHead != nullptr );
562 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
564 if ( m_List.insert_at( pHead, val, f )) {
566 m_Stat.onInsertSuccess();
569 m_Stat.onInsertFailed();
573 /// Ensures that the \p val exists in the set
575 The operation performs inserting or changing data with lock-free manner.
577 If the item \p val is not found in the set, then \p val is inserted into the set.
578 Otherwise, the functor \p func is called with item found.
579 The functor signature is:
581 void func( bool bNew, value_type& item, value_type& val );
584 - \p bNew - \p true if the item has been inserted, \p false otherwise
585 - \p item - item of the set
586 - \p val - argument \p val passed into the \p ensure function
587 If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments
588 refers to the same thing.
590 The function makes RCU lock internally.
592 Returns std::pair<bool, bool> where \p first is \p true if operation is successfull,
593 \p second is \p true if new item has been added or \p false if the item with \p key
594 already is in the set.
596 @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting".
597 \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level
600 template <typename Func>
601 std::pair<bool, bool> ensure( value_type& val, Func func )
603 size_t nHash = hash_value( val );
604 dummy_node_type * pHead = get_bucket( nHash );
605 assert( pHead != nullptr );
607 node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash );
609 std::pair<bool, bool> bRet = m_List.ensure_at( pHead, val, func );
610 if ( bRet.first && bRet.second ) {
612 m_Stat.onEnsureNew();
615 m_Stat.onEnsureExist();
619 /// Unlinks the item \p val from the set
621 The function searches the item \p val in the set and unlinks it from the set
622 if it is found and is equal to \p val.
624 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
625 and deletes the item found. \p unlink finds an item by key and deletes it
626 only if \p val is an item of that set, i.e. the pointer to item found
627 is equal to <tt> &val </tt>.
629 RCU \p synchronize method can be called, therefore, RCU should not be locked.
631 The function returns \p true if success and \p false otherwise.
633 bool unlink( value_type& val )
635 size_t nHash = hash_value( val );
636 dummy_node_type * pHead = get_bucket( nHash );
637 assert( pHead != nullptr );
639 if ( m_List.unlink_at( pHead, val ) ) {
641 m_Stat.onEraseSuccess();
644 m_Stat.onEraseFailed();
648 /// Deletes the item from the set
649 /** \anchor cds_intrusive_SplitListSet_rcu_erase
650 The function searches an item with key equal to \p key in the set,
651 unlinks it from the set, and returns \p true.
652 If the item with key equal to \p key is not found the function return \p false.
654 Difference between \ref erase and \p unlink functions: \p erase finds <i>a key</i>
655 and deletes the item found. \p unlink finds an item by key and deletes it
656 only if \p key is an item of that set, i.e. the pointer to item found
657 is equal to <tt> &key </tt>.
659 RCU \p synchronize method can be called, therefore, RCU should not be locked.
661 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
663 template <typename Q>
664 bool erase( Q const& key )
666 return erase_( key, key_comparator() );
669 /// Deletes the item from the set using \p pred for searching
671 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)"
672 but \p cmp is used for key compare.
673 \p Less has the interface like \p std::less.
674 \p pred must imply the same element order as the comparator used for building the set.
676 template <typename Q, typename Less>
677 bool erase_with( Q const& key, Less pred )
680 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
683 /// Deletes the item from the set
684 /** \anchor cds_intrusive_SplitListSet_rcu_erase_func
685 The function searches an item with key equal to \p key in the set,
686 call \p f functor with item found, unlinks it from the set, and returns \p true.
687 The \ref disposer specified by \p OrderedList class template parameter is called
688 by garbage collector \p GC asynchronously.
690 The \p Func interface is
693 void operator()( value_type const& item );
697 If the item with key equal to \p key is not found the function return \p false.
699 RCU \p synchronize method can be called, therefore, RCU should not be locked.
701 Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type.
703 template <typename Q, typename Func>
704 bool erase( Q const& key, Func f )
706 return erase_( key, key_comparator(), f );
709 /// Deletes the item from the set using \p pred for searching
711 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)"
712 but \p cmp is used for key compare.
713 \p Less has the interface like \p std::less.
714 \p pred must imply the same element order as the comparator used for building the set.
716 template <typename Q, typename Less, typename Func>
717 bool erase_with( Q const& key, Less pred, Func f )
720 return erase_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
723 /// Extracts an item from the set
724 /** \anchor cds_intrusive_SplitListSet_rcu_extract
725 The function searches an item with key equal to \p key in the set,
726 unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found.
727 If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr.
729 Depends on \p bucket_type you should or should not lock RCU before calling of this function:
730 - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked
731 - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked
732 See ordered list implementation for details.
735 typedef cds::urcu::gc< general_buffered<> > rcu;
736 typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list;
737 typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set;
739 rcu_splitlist_set theSet;
742 rcu_splitlist_set::exempt_ptr p;
744 // For MichaelList we should not lock RCU
746 // Now, you can apply extract function
747 // Note that you must not delete the item found inside the RCU lock
748 p = theList.extract( 10 );
750 // do something with p
754 // We may safely release p here
755 // release() passes the pointer to RCU reclamation cycle:
756 // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list.
760 template <typename Q>
761 exempt_ptr extract( Q const& key )
763 return exempt_ptr(extract_( key, key_comparator() ));
766 /// Extracts an item from the set using \p pred for searching
768 The function is an analog of \p extract(Q const&) but \p pred is used for key compare.
769 \p Less functor has the interface like \p std::less.
770 \p pred must imply the same element order as the comparator used for building the set.
772 template <typename Q, typename Less>
773 exempt_ptr extract_with( Q const& key, Less pred )
775 return exempt_ptr( extract_with_( key, pred ));
778 /// Finds the key \p key
779 /** \anchor cds_intrusive_SplitListSet_rcu_find_func
780 The function searches the item with key equal to \p key and calls the functor \p f for item found.
781 The interface of \p Func functor is:
784 void operator()( value_type& item, Q& key );
787 where \p item is the item found, \p key is the <tt>find</tt> function argument.
789 The functor can change non-key fields of \p item. Note that the functor is only guarantee
790 that \p item cannot be disposed during functor is executing.
791 The functor does not serialize simultaneous access to the set \p item. If such access is
792 possible you must provide your own synchronization schema on item level to exclude unsafe item modifications.
794 The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor
795 can modify both arguments.
797 Note the hash functor specified for class \p Traits template parameter
798 should accept a parameter of type \p Q that can be not the same as \p value_type.
800 The function applies RCU lock internally.
802 The function returns \p true if \p key is found, \p false otherwise.
804 template <typename Q, typename Func>
805 bool find( Q& key, Func f )
807 return find_( key, key_comparator(), f );
810 template <typename Q, typename Func>
811 bool find( Q const& key, Func f )
813 return find_( key, key_comparator(), f );
817 /// Finds the key \p key with \p pred predicate for comparing
819 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)"
820 but \p cmp is used for key compare.
821 \p Less has the interface like \p std::less.
822 \p cmp must imply the same element order as the comparator used for building the set.
824 template <typename Q, typename Less, typename Func>
825 bool find_with( Q& key, Less pred, Func f )
828 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
831 template <typename Q, typename Less, typename Func>
832 bool find_with( Q const& key, Less pred, Func f )
835 return find_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>(), f );
839 /// Finds the key \p key
840 /** \anchor cds_intrusive_SplitListSet_rcu_find_val
841 The function searches the item with key equal to \p key
842 and returns \p true if \p key found or \p false otherwise.
844 template <typename Q>
845 bool find( Q const& key )
847 return find_value( key, key_comparator() );
850 /// Finds the key \p key with \p pred predicate for comparing
852 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_val "find(Q const&)"
853 but \p cmp is used for key compare.
854 \p Less has the interface like \p std::less.
855 \p pred must imply the same element order as the comparator used for building the set.
857 template <typename Q, typename Less>
858 bool find_with( Q const& key, Less pred )
861 return find_value( key, typename wrapped_ordered_list::template make_compare_from_less<Less>() );
864 /// Finds the key \p key and return the item found
865 /** \anchor cds_intrusive_SplitListSet_rcu_get
866 The function searches the item with key equal to \p key and returns the pointer to item found.
867 If \p key is not found it returns \p nullptr.
869 Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type.
871 RCU should be locked before call of this function.
872 Returned item is valid only while RCU is locked:
874 typedef cds::intrusive::SplitListSet< your_template_parameters > set_class;
877 typename set_class::raw_ptr rp;
880 hash_set::rcu_lock lock;
882 rp = theSet.get( 5 );
887 // Unlock RCU by rcu_lock destructor
888 // rp can be retired by disposer at any time after RCU has been unlocked
892 template <typename Q>
893 raw_ptr get( Q const& key )
895 return get_( key, key_comparator() );
898 /// Finds the key \p key and return the item found
900 The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)"
901 but \p pred is used for comparing the keys.
903 \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q
905 \p pred must imply the same element order as the comparator used for building the set.
907 template <typename Q, typename Less>
908 raw_ptr get_with( Q const& key, Less pred )
911 return get_( key, typename wrapped_ordered_list::template make_compare_from_less<Less>());
915 /// Returns item count in the set
918 return m_ItemCounter;
921 /// Checks if the set is empty
923 Emptiness is checked by item counting: if item count is zero then the set is empty.
924 Thus, the correct item counting feature is an important part of split-list set implementation.
931 /// Clears the set (not atomic)
934 iterator it = begin();
935 while ( it != end() ) {
943 /// Returns internal statistics
944 stat const& statistics() const
951 template <bool IsConst>
953 :public split_list::details::iterator_type<node_traits, ordered_list, IsConst>
955 typedef split_list::details::iterator_type<node_traits, ordered_list, IsConst> iterator_base_class;
956 typedef typename iterator_base_class::list_iterator list_iterator;
959 : iterator_base_class()
962 iterator_type( iterator_type const& src )
963 : iterator_base_class( src )
966 // This ctor should be protected...
967 iterator_type( list_iterator itCur, list_iterator itEnd )
968 : iterator_base_class( itCur, itEnd )
975 The forward iterator for a split-list has some features:
976 - it has no post-increment operator
977 - it depends on iterator of underlying \p OrderedList
978 - The iterator cannot be moved across thread boundary since it may contain GC's guard that is thread-private GC data.
979 - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent
980 deleting operations it is no guarantee that you iterate all item in the split-list.
982 Therefore, the use of iterators in concurrent environment is not good idea. Use the iterator on the concurrent container
983 for debug purpose only.
985 typedef iterator_type<false> iterator;
986 /// Const forward iterator
988 For iterator's features and requirements see \ref iterator
990 typedef iterator_type<true> const_iterator;
992 /// Returns a forward iterator addressing the first element in a split-list
994 For empty list \code begin() == end() \endcode
998 return iterator( m_List.begin(), m_List.end() );
1001 /// Returns an iterator that addresses the location succeeding the last element in a split-list
1003 Do not use the value returned by <tt>end</tt> function to access any item.
1005 The returned value can be used only to control reaching the end of the split-list.
1006 For empty list \code begin() == end() \endcode
1010 return iterator( m_List.end(), m_List.end() );
1013 /// Returns a forward const iterator addressing the first element in a split-list
1014 const_iterator begin() const
1018 /// Returns a forward const iterator addressing the first element in a split-list
1019 const_iterator cbegin() const
1021 return const_iterator( m_List.cbegin(), m_List.cend() );
1024 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1025 const_iterator end() const
1029 /// Returns an const iterator that addresses the location succeeding the last element in a split-list
1030 const_iterator cend() const
1032 return const_iterator( m_List.cend(), m_List.cend() );
1037 }} // namespace cds::intrusive
1039 #endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H