3 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
4 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
6 #include <cds/intrusive/details/base.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/details/allocator.h>
9 #include <cds/algo/int_algo.h>
10 #include <cds/algo/bitop.h>
12 namespace cds { namespace intrusive {
14 /// Split-ordered list related definitions
15 /** @ingroup cds_intrusive_helper
17 namespace split_list {
19 struct implementation_tag;
22 /// Split-ordered list node
25 - OrderedListNode - node type for underlying ordered list
27 template <typename OrderedListNode>
28 struct node: public OrderedListNode
31 typedef OrderedListNode base_class;
34 size_t m_nHash ; ///< Hash value for node
36 /// Default constructor
43 /// Initializes dummy node with \p nHash value
50 /// Checks if the node is dummy node
53 return (m_nHash & 1) == 0;
57 /// SplitListSet internal statistics. May be used for debugging or profiling
59 Template argument \p Counter defines type of counter.
60 Default is \p cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed
61 strict event counting.
62 You may use stronger type of counter like as \p cds::atomicity::item_counter,
63 or even integral type, for example, \p int.
65 template <typename Counter = cds::atomicity::event_counter >
68 typedef Counter counter_type; ///< Counter type
70 counter_type m_nInsertSuccess; ///< Count of success inserting
71 counter_type m_nInsertFailed; ///< Count of failed inserting
72 counter_type m_nEnsureNew; ///< Count of new item created by \p ensure() member function
73 counter_type m_nEnsureExist; ///< Count of \p ensure() call for existing item
74 counter_type m_nEraseSuccess; ///< Count of success erasing of items
75 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
76 counter_type m_nExtractSuccess; ///< Count of success extracting of items
77 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
78 counter_type m_nFindSuccess; ///< Count of success finding
79 counter_type m_nFindFailed; ///< Count of failed finding
80 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
81 counter_type m_nHeadNodeFreed; ///< Count of freed head node
82 counter_type m_nBucketCount; ///< Current bucket count
83 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
84 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
85 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
88 void onInsertSuccess() { ++m_nInsertSuccess; }
89 void onInsertFailed() { ++m_nInsertFailed; }
90 void onEnsureNew() { ++m_nEnsureNew; }
91 void onEnsureExist() { ++m_nEnsureExist; }
92 void onEraseSuccess() { ++m_nEraseSuccess; }
93 void onEraseFailed() { ++m_nEraseFailed; }
94 void onExtractSuccess() { ++m_nExtractSuccess; }
95 void onExtractFailed() { ++m_nExtractFailed; }
96 void onFindSuccess() { ++m_nFindSuccess; }
97 void onFindFailed() { ++m_nFindFailed; }
98 bool onFind(bool bSuccess)
106 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
107 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
108 void onNewBucket() { ++m_nBucketCount; }
109 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
110 void onBucketInitContenton() { ++m_nInitBucketContention; }
111 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
115 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
118 void onInsertSuccess() const {}
119 void onInsertFailed() const {}
120 void onEnsureNew() const {}
121 void onEnsureExist() const {}
122 void onEraseSuccess() const {}
123 void onEraseFailed() const {}
124 void onExtractSuccess() const {}
125 void onExtractFailed() const {}
126 void onFindSuccess() const {}
127 void onFindFailed() const {}
128 bool onFind( bool bSuccess ) const { return bSuccess; }
129 void onHeadNodeAllocated() const {}
130 void onHeadNodeFreed() const {}
131 void onNewBucket() const {}
132 void onRecursiveInitBucket() const {}
133 void onBucketInitContenton() const {}
134 void onBusyWaitBucketInit() const {}
138 /// SplitListSet traits
143 Hash function converts the key fields of struct \p T stored in the split list
144 into hash value of type \p size_t that is an index in hash table.
146 Hash typedef is mandatory and has no predefined one.
148 typedef opt::none hash;
152 The item counting is an important part of \p SplitListSet algorithm:
153 the <tt>empty()</tt> member function depends on correct item counting.
154 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
156 Default is \p cds::atomicity::item_counter.
158 typedef cds::atomicity::item_counter item_counter;
160 /// Bucket table allocator
162 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
164 typedef CDS_DEFAULT_ALLOCATOR allocator;
166 /// Internal statistics (by default, disabled)
168 Possible statistics types are: \p split_list::stat (enable internal statistics),
169 \p split_list::empty_stat (the default, internal statistics disabled),
170 user-provided class that supports \p %split_list::stat interface.
172 typedef split_list::empty_stat stat;
175 /// C++ memory ordering model
177 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
178 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
180 typedef opt::v::relaxed_ordering memory_model;
182 /// What type of bucket table is used
184 \p true - use \p split_list::expandable_bucket_table that can be expanded
185 if the load factor of the set is exhausted.
186 \p false - use \p split_list::static_bucket_table that cannot be expanded
187 and is allocated in \p SplitListSet constructor.
191 static const bool dynamic_bucket_table = true;
193 /// Back-off strategy
194 typedef cds::backoff::Default back_off;
197 /// [value-option] Split-list dynamic bucket table option
199 The option is used to select bucket table implementation.
200 Possible values of \p Value are:
201 - \p true - select \p expandable_bucket_table
202 - \p false - select \p static_bucket_table
204 template <bool Value>
205 struct dynamic_bucket_table
208 template <typename Base> struct pack: public Base
210 enum { dynamic_bucket_table = Value };
215 /// Metafunction converting option list to \p split_list::traits
217 Available \p Options:
218 - \p opt::hash - mandatory option, specifies hash functor.
219 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
221 - \p opt::memory_model - C++ memory model for atomic operations.
222 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
223 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
224 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
225 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
226 Dynamic bucket table expands its size up to maximum bucket count when necessary
227 - \p opt::back_off - back-off strategy used for spinning, defult is \p cds::backoff::Default.
228 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
229 To enable internal statistics use \p split_list::stat.
231 template <typename... Options>
233 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
236 /// Static bucket table
238 Non-resizeable bucket table for \p SplitListSet class.
239 The capacity of table (max bucket count) is defined in the constructor call.
242 - \p GC - garbage collector
243 - \p Node - node type, must be a type based on \p split_list::node
244 - \p Options... - options
247 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
248 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
250 template <typename GC, typename Node, typename... Options>
251 class static_bucket_table
254 struct default_options
256 typedef CDS_DEFAULT_ALLOCATOR allocator;
257 typedef opt::v::relaxed_ordering memory_model;
259 typedef typename opt::make_options< default_options, Options... >::type options;
263 typedef GC gc; ///< Garbage collector
264 typedef Node node_type; ///< Bucket node type
265 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
267 /// Bucket table allocator
268 typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator;
270 /// Memory model for atomic operations
271 typedef typename options::memory_model memory_model;
274 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
275 const size_t m_nCapacity; ///< Bucket table capacity
276 table_entry * m_Table; ///< Bucket table
280 void allocate_table()
282 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
287 bucket_table_allocator().Delete( m_Table, m_nCapacity );
292 /// Constructs bucket table for 512K buckets. Load factor is 1.
293 static_bucket_table()
295 , m_nCapacity( 512 * 1024 )
300 /// Creates the table with specified size rounded up to nearest power-of-two
302 size_t nItemCount, ///< Max expected item count in split-ordered list
303 size_t nLoadFactor ///< Load factor
305 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ),
306 m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
308 // m_nCapacity must be power of 2
309 assert( cds::beans::is_power2( m_nCapacity ) );
313 /// Destroys bucket table
314 ~static_bucket_table()
319 /// Returns head node of bucket \p nBucket
320 node_type * bucket( size_t nBucket ) const
322 assert( nBucket < capacity() );
323 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
326 /// Set \p pNode as a head of bucket \p nBucket
327 void bucket( size_t nBucket, node_type * pNode )
329 assert( nBucket < capacity() );
330 assert( bucket( nBucket ) == nullptr );
332 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
335 /// Returns the capacity of the bucket table
336 size_t capacity() const
341 /// Returns the load factor, i.e. average count of items per bucket
342 size_t load_factor() const
344 return m_nLoadFactor;
348 /// Expandable bucket table
350 This bucket table can dynamically grow its capacity when necessary
351 up to maximum bucket count.
354 - \p GC - garbage collector
355 - \p Node - node type, must be derived from \p split_list::node
356 - \p Options... - options
359 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
360 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
362 template <typename GC, typename Node, typename... Options>
363 class expandable_bucket_table
366 struct default_options
368 typedef CDS_DEFAULT_ALLOCATOR allocator;
369 typedef opt::v::relaxed_ordering memory_model;
371 typedef typename opt::make_options< default_options, Options... >::type options;
374 typedef GC gc; ///< Garbage collector
375 typedef Node node_type; ///< Bucket node type
376 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
378 /// Memory model for atomic operations
379 typedef typename options::memory_model memory_model;
382 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
385 /// Bucket table allocator
386 typedef cds::details::Allocator< segment_type, typename options::allocator > bucket_table_allocator;
388 /// Bucket table segment allocator
389 typedef cds::details::Allocator< table_entry, typename options::allocator > segment_allocator;
392 /// Bucket table metrics
394 size_t nSegmentCount; ///< max count of segments in bucket table
395 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
396 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
397 size_t nLoadFactor; ///< load factor
398 size_t nCapacity; ///< max capacity of bucket table
402 : nSegmentCount(1024)
404 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
406 , nCapacity( nSegmentCount * nSegmentSize )
410 const metrics m_metrics; ///< Dynamic bucket table metrics
413 segment_type * m_Segments; ///< bucket table - array of segments
417 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
421 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
422 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
424 size_t nBucketCount = (size_t)( ((float) nItemCount) / m.nLoadFactor );
425 if ( nBucketCount <= 2 ) {
429 else if ( nBucketCount <= 1024 ) {
431 m.nSegmentSize = ((size_t) 1) << beans::log2ceil( nBucketCount );
434 nBucketCount = beans::log2ceil( nBucketCount );
436 m.nSegmentSize = ((size_t) 1) << ( nBucketCount / 2 );
437 if ( nBucketCount & 1 )
439 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
442 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
443 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
444 assert( m.nSegmentSizeLog2 != 0 ) ; //
448 segment_type * allocate_table()
450 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
453 void destroy_table( segment_type * pTable )
455 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
458 table_entry * allocate_segment()
460 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
463 void destroy_segment( table_entry * pSegment )
465 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
470 // m_nSegmentSize must be 2**N
471 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
472 assert( ( ((size_t) 1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
474 // m_nSegmentCount must be 2**K
475 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
477 m_Segments = allocate_table();
483 /// Constructs bucket table for 512K buckets. Load factor is 1.
484 expandable_bucket_table()
485 : m_metrics( calc_metrics( 512 * 1024, 1 ))
490 /// Creates the table with specified capacity rounded up to nearest power-of-two
491 expandable_bucket_table(
492 size_t nItemCount, ///< Max expected item count in split-ordered list
493 size_t nLoadFactor ///< Load factor
495 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
500 /// Destroys bucket table
501 ~expandable_bucket_table()
503 segment_type * pSegments = m_Segments;
504 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
505 table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
506 if ( pEntry != nullptr )
507 destroy_segment( pEntry );
509 destroy_table( pSegments );
512 /// Returns head node of the bucket \p nBucket
513 node_type * bucket( size_t nBucket ) const
515 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
516 assert( nSegment < m_metrics.nSegmentCount );
518 table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
519 if ( pSegment == nullptr )
520 return nullptr; // uninitialized bucket
521 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
524 /// Set \p pNode as a head of bucket \p nBucket
525 void bucket( size_t nBucket, node_type * pNode )
527 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
528 assert( nSegment < m_metrics.nSegmentCount );
530 segment_type& segment = m_Segments[nSegment];
531 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
532 table_entry * pNewSegment = allocate_segment();
533 table_entry * pNull = nullptr;
534 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
535 destroy_segment( pNewSegment );
538 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
541 /// Returns the capacity of the bucket table
542 size_t capacity() const
544 return m_metrics.nCapacity;
547 /// Returns the load factor, i.e. average count of items per bucket
548 size_t load_factor() const
550 return m_metrics.nLoadFactor;
554 /// Split-list node traits
556 This traits is intended for converting between underlying ordered list node type
557 and split-list node type
560 - \p BaseNodeTraits - node traits of base ordered list type
562 template <class BaseNodeTraits>
563 struct node_traits: private BaseNodeTraits
565 typedef BaseNodeTraits base_class; ///< Base ordered list node type
566 typedef typename base_class::value_type value_type; ///< Value type
567 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
568 typedef node<base_node_type> node_type; ///< Spit-list node type
570 /// Convert value reference to node pointer
571 static node_type * to_node_ptr( value_type& v )
573 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
576 /// Convert value pointer to node pointer
577 static node_type * to_node_ptr( value_type * v )
579 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
582 /// Convert value reference to node pointer (const version)
583 static node_type const * to_node_ptr( value_type const& v )
585 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
588 /// Convert value pointer to node pointer (const version)
589 static node_type const * to_node_ptr( value_type const * v )
591 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
594 /// Convert node refernce to value pointer
595 static value_type * to_value_ptr( node_type& n )
597 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
600 /// Convert node pointer to value pointer
601 static value_type * to_value_ptr( node_type * n )
603 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
606 /// Convert node reference to value pointer (const version)
607 static const value_type * to_value_ptr( node_type const & n )
609 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
612 /// Convert node pointer to value pointer (const version)
613 static const value_type * to_value_ptr( node_type const * n )
615 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
621 template <bool Value, typename GC, typename Node, typename... Options>
622 struct bucket_table_selector;
624 template <typename GC, typename Node, typename... Options>
625 struct bucket_table_selector< true, GC, Node, Options...>
627 typedef expandable_bucket_table<GC, Node, Options...> type;
630 template <typename GC, typename Node, typename... Options>
631 struct bucket_table_selector< false, GC, Node, Options...>
633 typedef static_bucket_table<GC, Node, Options...> type;
636 template <typename GC, class Alloc >
637 struct dummy_node_disposer {
638 template <typename Node>
639 void operator()( Node * p )
641 typedef cds::details::Allocator< Node, Alloc > node_deallocator;
642 node_deallocator().Delete( p );
646 template <typename Q>
647 struct search_value_type
652 search_value_type( Q& v, size_t h )
658 template <class OrderedList, class Traits>
659 class rebind_list_traits
661 typedef OrderedList native_ordered_list;
662 typedef Traits traits;
664 typedef typename native_ordered_list::gc gc;
665 typedef typename native_ordered_list::key_comparator native_key_comparator;
666 typedef typename native_ordered_list::node_type node_type;
667 typedef typename native_ordered_list::value_type value_type;
668 typedef typename native_ordered_list::node_traits node_traits;
669 typedef typename native_ordered_list::disposer native_disposer;
671 typedef split_list::node<node_type> splitlist_node_type;
674 int operator()( value_type const& v1, value_type const& v2 ) const
676 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
677 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
678 if ( n1->m_nHash != n2->m_nHash )
679 return n1->m_nHash < n2->m_nHash ? -1 : 1;
681 if ( n1->is_dummy() ) {
682 assert( n2->is_dummy() );
686 assert( !n1->is_dummy() && !n2->is_dummy() );
688 return native_key_comparator()( v1, v2 );
691 template <typename Q>
692 int operator()( value_type const& v, search_value_type<Q> const& q ) const
694 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
695 if ( n->m_nHash != q.nHash )
696 return n->m_nHash < q.nHash ? -1 : 1;
698 assert( !n->is_dummy() );
699 return native_key_comparator()( v, q.val );
702 template <typename Q>
703 int operator()( search_value_type<Q> const& q, value_type const& v ) const
705 return -operator()( v, q );
709 struct wrapped_disposer
711 void operator()( value_type * v )
713 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
714 if ( p->is_dummy() ) {
715 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( p );
716 dummy_node_disposer<gc, typename traits::allocator>()( p );
719 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( v );
720 native_disposer()( v );
726 template <typename Less>
727 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
729 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
731 template <typename Q>
732 int operator()( value_type const& v, search_value_type<Q> const& q ) const
734 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
735 if ( n->m_nHash != q.nHash )
736 return n->m_nHash < q.nHash ? -1 : 1;
738 assert( !n->is_dummy() );
739 return base_class()( v, q.val );
742 template <typename Q>
743 int operator()( search_value_type<Q> const& q, value_type const& v ) const
745 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
746 if ( n->m_nHash != q.nHash )
747 return q.nHash < n->m_nHash ? -1 : 1;
749 assert( !n->is_dummy() );
750 return base_class()( q.val, v );
753 template <typename Q1, typename Q2>
754 int operator()( Q1 const& v1, Q2 const& v2 ) const
756 return base_class()( v1, v2 );
760 typedef typename native_ordered_list::template rebind_traits<
761 opt::compare< key_compare >
762 ,opt::disposer< wrapped_disposer >
763 ,opt::boundary_node_type< splitlist_node_type >
767 template <typename OrderedList, bool IsConst>
768 struct select_list_iterator;
770 template <typename OrderedList>
771 struct select_list_iterator<OrderedList, false>
773 typedef typename OrderedList::iterator type;
776 template <typename OrderedList>
777 struct select_list_iterator<OrderedList, true>
779 typedef typename OrderedList::const_iterator type;
782 template <typename NodeTraits, typename OrderedList, bool IsConst>
785 typedef OrderedList ordered_list_type;
786 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
789 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
790 typedef NodeTraits node_traits;
793 list_iterator m_itCur;
794 list_iterator m_itEnd;
797 typedef typename list_iterator::value_ptr value_ptr;
798 typedef typename list_iterator::value_ref value_ref;
804 iterator_type( iterator_type const& src )
805 : m_itCur( src.m_itCur )
806 , m_itEnd( src.m_itEnd )
809 // This ctor should be protected...
810 iterator_type( list_iterator itCur, list_iterator itEnd )
815 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
820 value_ptr operator ->() const
822 return m_itCur.operator->();
825 value_ref operator *() const
827 return m_itCur.operator*();
831 iterator_type& operator ++()
833 if ( m_itCur != m_itEnd ) {
836 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
841 iterator_type& operator = (iterator_type const& src)
843 m_itCur = src.m_itCur;
844 m_itEnd = src.m_itEnd;
849 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
851 return m_itCur == i.m_itCur;
854 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
856 return m_itCur != i.m_itCur;
859 } // namespace details
865 /// Reverses bit order in \p nHash
866 static inline size_t reverse_bits( size_t nHash )
868 return bitop::RBO( nHash );
871 static inline size_t regular_hash( size_t nHash )
873 return reverse_bits( nHash ) | size_t(1);
876 static inline size_t dummy_hash( size_t nHash )
878 return reverse_bits( nHash ) & ~size_t(1);
882 } // namespace split_list
885 // Forward declaration
886 template <class GC, class OrderedList, class Traits = split_list::traits>
890 }} // namespace cds::intrusive
892 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H