2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
41 namespace cds { namespace intrusive {
43 /// Split-ordered list related definitions
44 /** @ingroup cds_intrusive_helper
46 namespace split_list {
47 /// Split-ordered list node
50 - \p OrderedListNode - node type for underlying ordered list
52 template <typename OrderedListNode>
53 struct node: public OrderedListNode
56 typedef OrderedListNode base_class;
59 size_t m_nHash ; ///< Hash value for node
61 /// Default constructor
68 /// Initializes dummy node with \p nHash value
75 /// Checks if the node is dummy node
78 return (m_nHash & 1) == 0;
82 /// \p SplitListSet internal statistics. May be used for debugging or profiling
84 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
86 template <typename Counter = cds::atomicity::event_counter >
89 typedef Counter counter_type; ///< Counter type
91 counter_type m_nInsertSuccess; ///< Count of success inserting
92 counter_type m_nInsertFailed; ///< Count of failed inserting
93 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
94 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
95 counter_type m_nEraseSuccess; ///< Count of success erasing of items
96 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
97 counter_type m_nExtractSuccess; ///< Count of success extracting of items
98 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
99 counter_type m_nFindSuccess; ///< Count of success finding
100 counter_type m_nFindFailed; ///< Count of failed finding
101 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
102 counter_type m_nHeadNodeFreed; ///< Count of freed head node
103 counter_type m_nBucketCount; ///< Current bucket count
104 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
105 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
106 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
109 void onInsertSuccess() { ++m_nInsertSuccess; }
110 void onInsertFailed() { ++m_nInsertFailed; }
111 void onUpdateNew() { ++m_nUpdateNew; }
112 void onUpdateExist() { ++m_nUpdateExist; }
113 void onEraseSuccess() { ++m_nEraseSuccess; }
114 void onEraseFailed() { ++m_nEraseFailed; }
115 void onExtractSuccess() { ++m_nExtractSuccess; }
116 void onExtractFailed() { ++m_nExtractFailed; }
117 void onFindSuccess() { ++m_nFindSuccess; }
118 void onFindFailed() { ++m_nFindFailed; }
119 bool onFind(bool bSuccess)
127 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
128 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
129 void onNewBucket() { ++m_nBucketCount; }
130 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
131 void onBucketInitContenton() { ++m_nInitBucketContention; }
132 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
136 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
139 void onInsertSuccess() const {}
140 void onInsertFailed() const {}
141 void onUpdateNew() const {}
142 void onUpdateExist() const {}
143 void onEraseSuccess() const {}
144 void onEraseFailed() const {}
145 void onExtractSuccess() const {}
146 void onExtractFailed() const {}
147 void onFindSuccess() const {}
148 void onFindFailed() const {}
149 bool onFind( bool bSuccess ) const { return bSuccess; }
150 void onHeadNodeAllocated() const {}
151 void onHeadNodeFreed() const {}
152 void onNewBucket() const {}
153 void onRecursiveInitBucket() const {}
154 void onBucketInitContenton() const {}
155 void onBusyWaitBucketInit() const {}
159 /// SplitListSet traits
164 Hash function converts the key fields of struct \p T stored in the split list
165 into hash value of type \p size_t that is an index in hash table.
166 By default, \p std::hash is used.
168 typedef opt::none hash;
172 The item counting is an important part of \p SplitListSet algorithm:
173 the <tt>empty()</tt> member function depends on correct item counting.
174 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
176 Default is \p cds::atomicity::item_counter.
178 typedef cds::atomicity::item_counter item_counter;
180 /// Bucket table allocator
182 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
184 typedef CDS_DEFAULT_ALLOCATOR allocator;
186 /// Internal statistics (by default, disabled)
188 Possible statistics types are: \p split_list::stat (enable internal statistics),
189 \p split_list::empty_stat (the default, internal statistics disabled),
190 user-provided class that supports \p %split_list::stat interface.
192 typedef split_list::empty_stat stat;
195 /// C++ memory ordering model
197 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
198 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
200 typedef opt::v::relaxed_ordering memory_model;
202 /// What type of bucket table is used
204 \p true - use \p split_list::expandable_bucket_table that can be expanded
205 if the load factor of the set is exhausted.
206 \p false - use \p split_list::static_bucket_table that cannot be expanded
207 and is allocated in \p SplitListSet constructor.
211 static const bool dynamic_bucket_table = true;
213 /// Back-off strategy
214 typedef cds::backoff::Default back_off;
217 /// [value-option] Split-list dynamic bucket table option
219 The option is used to select bucket table implementation.
220 Possible values of \p Value are:
221 - \p true - select \p expandable_bucket_table
222 - \p false - select \p static_bucket_table
224 template <bool Value>
225 struct dynamic_bucket_table
228 template <typename Base> struct pack: public Base
230 enum { dynamic_bucket_table = Value };
235 /// Metafunction converting option list to \p split_list::traits
237 Available \p Options:
238 - \p opt::hash - mandatory option, specifies hash functor.
239 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
241 - \p opt::memory_model - C++ memory model for atomic operations.
242 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
243 or \p opt::v::sequential_consistent (sequentially consistent memory model).
244 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
245 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
246 Dynamic bucket table expands its size up to maximum bucket count when necessary
247 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
248 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
249 To enable internal statistics use \p split_list::stat.
251 template <typename... Options>
253 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
256 /// Static bucket table
258 Non-resizeable bucket table for \p SplitListSet class.
259 The capacity of table (max bucket count) is defined in the constructor call.
262 - \p GC - garbage collector
263 - \p Node - node type, must be a type based on \p split_list::node
264 - \p Options... - options
267 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
268 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
270 template <typename GC, typename Node, typename... Options>
271 class static_bucket_table
274 struct default_options
276 typedef CDS_DEFAULT_ALLOCATOR allocator;
277 typedef opt::v::relaxed_ordering memory_model;
279 typedef typename opt::make_options< default_options, Options... >::type options;
283 typedef GC gc; ///< Garbage collector
284 typedef Node node_type; ///< Bucket node type
285 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
287 /// Bucket table allocator
288 typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator;
290 /// Memory model for atomic operations
291 typedef typename options::memory_model memory_model;
294 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
295 const size_t m_nCapacity; ///< Bucket table capacity
296 table_entry * m_Table; ///< Bucket table
300 void allocate_table()
302 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
307 bucket_table_allocator().Delete( m_Table, m_nCapacity );
312 /// Constructs bucket table for 512K buckets. Load factor is 1.
313 static_bucket_table()
315 , m_nCapacity( 512 * 1024 )
320 /// Creates the table with specified size rounded up to nearest power-of-two
322 size_t nItemCount, ///< Max expected item count in split-ordered list
323 size_t nLoadFactor ///< Load factor
325 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ),
326 m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
328 // m_nCapacity must be power of 2
329 assert( cds::beans::is_power2( m_nCapacity ) );
333 /// Destroys bucket table
334 ~static_bucket_table()
339 /// Returns head node of bucket \p nBucket
340 node_type * bucket( size_t nBucket ) const
342 assert( nBucket < capacity() );
343 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
346 /// Set \p pNode as a head of bucket \p nBucket
347 void bucket( size_t nBucket, node_type * pNode )
349 assert( nBucket < capacity() );
350 assert( bucket( nBucket ) == nullptr );
352 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
355 /// Returns the capacity of the bucket table
356 size_t capacity() const
361 /// Returns the load factor, i.e. average count of items per bucket
362 size_t load_factor() const
364 return m_nLoadFactor;
368 /// Expandable bucket table
370 This bucket table can dynamically grow its capacity when necessary
371 up to maximum bucket count.
374 - \p GC - garbage collector
375 - \p Node - node type, must be derived from \p split_list::node
376 - \p Options... - options
379 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
380 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
382 template <typename GC, typename Node, typename... Options>
383 class expandable_bucket_table
386 struct default_options
388 typedef CDS_DEFAULT_ALLOCATOR allocator;
389 typedef opt::v::relaxed_ordering memory_model;
391 typedef typename opt::make_options< default_options, Options... >::type options;
394 typedef GC gc; ///< Garbage collector
395 typedef Node node_type; ///< Bucket node type
396 typedef atomics::atomic<node_type *> table_entry; ///< Table entry type
398 /// Memory model for atomic operations
399 typedef typename options::memory_model memory_model;
402 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
405 /// Bucket table allocator
406 typedef cds::details::Allocator< segment_type, typename options::allocator > bucket_table_allocator;
408 /// Bucket table segment allocator
409 typedef cds::details::Allocator< table_entry, typename options::allocator > segment_allocator;
412 /// Bucket table metrics
414 size_t nSegmentCount; ///< max count of segments in bucket table
415 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
416 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
417 size_t nLoadFactor; ///< load factor
418 size_t nCapacity; ///< max capacity of bucket table
422 : nSegmentCount(1024)
424 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
426 , nCapacity( nSegmentCount * nSegmentSize )
430 const metrics m_metrics; ///< Dynamic bucket table metrics
433 segment_type * m_Segments; ///< bucket table - array of segments
437 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
441 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
442 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
444 size_t nBucketCount = (size_t)( ((float) nItemCount) / m.nLoadFactor );
445 if ( nBucketCount <= 2 ) {
449 else if ( nBucketCount <= 1024 ) {
451 m.nSegmentSize = ((size_t) 1) << beans::log2ceil( nBucketCount );
454 nBucketCount = beans::log2ceil( nBucketCount );
456 m.nSegmentSize = ((size_t) 1) << ( nBucketCount / 2 );
457 if ( nBucketCount & 1 )
459 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
462 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
463 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
464 assert( m.nSegmentSizeLog2 != 0 ) ; //
468 segment_type * allocate_table()
470 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
473 void destroy_table( segment_type * pTable )
475 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
478 table_entry * allocate_segment()
480 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
483 void destroy_segment( table_entry * pSegment )
485 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
490 // m_nSegmentSize must be 2**N
491 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
492 assert( ( ((size_t) 1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
494 // m_nSegmentCount must be 2**K
495 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
497 m_Segments = allocate_table();
503 /// Constructs bucket table for 512K buckets. Load factor is 1.
504 expandable_bucket_table()
505 : m_metrics( calc_metrics( 512 * 1024, 1 ))
510 /// Creates the table with specified capacity rounded up to nearest power-of-two
511 expandable_bucket_table(
512 size_t nItemCount, ///< Max expected item count in split-ordered list
513 size_t nLoadFactor ///< Load factor
515 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
520 /// Destroys bucket table
521 ~expandable_bucket_table()
523 segment_type * pSegments = m_Segments;
524 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
525 table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
526 if ( pEntry != nullptr )
527 destroy_segment( pEntry );
529 destroy_table( pSegments );
532 /// Returns head node of the bucket \p nBucket
533 node_type * bucket( size_t nBucket ) const
535 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
536 assert( nSegment < m_metrics.nSegmentCount );
538 table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
539 if ( pSegment == nullptr )
540 return nullptr; // uninitialized bucket
541 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
544 /// Set \p pNode as a head of bucket \p nBucket
545 void bucket( size_t nBucket, node_type * pNode )
547 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
548 assert( nSegment < m_metrics.nSegmentCount );
550 segment_type& segment = m_Segments[nSegment];
551 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
552 table_entry * pNewSegment = allocate_segment();
553 table_entry * pNull = nullptr;
554 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
555 destroy_segment( pNewSegment );
558 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
561 /// Returns the capacity of the bucket table
562 size_t capacity() const
564 return m_metrics.nCapacity;
567 /// Returns the load factor, i.e. average count of items per bucket
568 size_t load_factor() const
570 return m_metrics.nLoadFactor;
574 /// Split-list node traits
576 This traits is intended for converting between underlying ordered list node type
577 and split-list node type
580 - \p BaseNodeTraits - node traits of base ordered list type
582 template <class BaseNodeTraits>
583 struct node_traits: private BaseNodeTraits
585 typedef BaseNodeTraits base_class; ///< Base ordered list node type
586 typedef typename base_class::value_type value_type; ///< Value type
587 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
588 typedef node<base_node_type> node_type; ///< Spit-list node type
590 /// Convert value reference to node pointer
591 static node_type * to_node_ptr( value_type& v )
593 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
596 /// Convert value pointer to node pointer
597 static node_type * to_node_ptr( value_type * v )
599 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
602 /// Convert value reference to node pointer (const version)
603 static node_type const * to_node_ptr( value_type const& v )
605 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
608 /// Convert value pointer to node pointer (const version)
609 static node_type const * to_node_ptr( value_type const * v )
611 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
614 /// Convert node reference to value pointer
615 static value_type * to_value_ptr( node_type& n )
617 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
620 /// Convert node pointer to value pointer
621 static value_type * to_value_ptr( node_type * n )
623 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
626 /// Convert node reference to value pointer (const version)
627 static const value_type * to_value_ptr( node_type const & n )
629 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
632 /// Convert node pointer to value pointer (const version)
633 static const value_type * to_value_ptr( node_type const * n )
635 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
641 template <bool Value, typename GC, typename Node, typename... Options>
642 struct bucket_table_selector;
644 template <typename GC, typename Node, typename... Options>
645 struct bucket_table_selector< true, GC, Node, Options...>
647 typedef expandable_bucket_table<GC, Node, Options...> type;
650 template <typename GC, typename Node, typename... Options>
651 struct bucket_table_selector< false, GC, Node, Options...>
653 typedef static_bucket_table<GC, Node, Options...> type;
656 template <typename GC, class Alloc >
657 struct dummy_node_disposer {
658 template <typename Node>
659 void operator()( Node * p )
661 typedef cds::details::Allocator< Node, Alloc > node_deallocator;
662 node_deallocator().Delete( p );
666 template <typename Q>
667 struct search_value_type
672 search_value_type( Q& v, size_t h )
678 template <class OrderedList, class Traits>
679 class rebind_list_traits
681 typedef OrderedList native_ordered_list;
682 typedef Traits traits;
684 typedef typename native_ordered_list::gc gc;
685 typedef typename native_ordered_list::key_comparator native_key_comparator;
686 typedef typename native_ordered_list::node_type node_type;
687 typedef typename native_ordered_list::value_type value_type;
688 typedef typename native_ordered_list::node_traits node_traits;
689 typedef typename native_ordered_list::disposer native_disposer;
691 typedef split_list::node<node_type> splitlist_node_type;
694 int operator()( value_type const& v1, value_type const& v2 ) const
696 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
697 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
698 if ( n1->m_nHash != n2->m_nHash )
699 return n1->m_nHash < n2->m_nHash ? -1 : 1;
701 if ( n1->is_dummy() ) {
702 assert( n2->is_dummy() );
706 assert( !n1->is_dummy() && !n2->is_dummy() );
708 return native_key_comparator()( v1, v2 );
711 template <typename Q>
712 int operator()( value_type const& v, search_value_type<Q> const& q ) const
714 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
715 if ( n->m_nHash != q.nHash )
716 return n->m_nHash < q.nHash ? -1 : 1;
718 assert( !n->is_dummy() );
719 return native_key_comparator()( v, q.val );
722 template <typename Q>
723 int operator()( search_value_type<Q> const& q, value_type const& v ) const
725 return -operator()( v, q );
729 struct wrapped_disposer
731 void operator()( value_type * v )
733 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
735 dummy_node_disposer<gc, typename traits::allocator>()( p );
737 native_disposer()( v );
742 template <typename Less>
743 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
745 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
747 template <typename Q>
748 int operator()( value_type const& v, search_value_type<Q> const& q ) const
750 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
751 if ( n->m_nHash != q.nHash )
752 return n->m_nHash < q.nHash ? -1 : 1;
754 assert( !n->is_dummy() );
755 return base_class()( v, q.val );
758 template <typename Q>
759 int operator()( search_value_type<Q> const& q, value_type const& v ) const
761 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
762 if ( n->m_nHash != q.nHash )
763 return q.nHash < n->m_nHash ? -1 : 1;
765 assert( !n->is_dummy() );
766 return base_class()( q.val, v );
769 template <typename Q1, typename Q2>
770 int operator()( Q1 const& v1, Q2 const& v2 ) const
772 return base_class()( v1, v2 );
776 typedef typename native_ordered_list::template rebind_traits<
777 opt::compare< key_compare >
778 ,opt::disposer< wrapped_disposer >
779 ,opt::boundary_node_type< splitlist_node_type >
783 template <typename OrderedList, bool IsConst>
784 struct select_list_iterator;
786 template <typename OrderedList>
787 struct select_list_iterator<OrderedList, false>
789 typedef typename OrderedList::iterator type;
792 template <typename OrderedList>
793 struct select_list_iterator<OrderedList, true>
795 typedef typename OrderedList::const_iterator type;
798 template <typename NodeTraits, typename OrderedList, bool IsConst>
801 typedef OrderedList ordered_list_type;
802 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
805 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
806 typedef NodeTraits node_traits;
809 list_iterator m_itCur;
810 list_iterator m_itEnd;
813 typedef typename list_iterator::value_ptr value_ptr;
814 typedef typename list_iterator::value_ref value_ref;
820 iterator_type( iterator_type const& src )
821 : m_itCur( src.m_itCur )
822 , m_itEnd( src.m_itEnd )
825 // This ctor should be protected...
826 iterator_type( list_iterator itCur, list_iterator itEnd )
831 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
835 value_ptr operator ->() const
837 return m_itCur.operator->();
840 value_ref operator *() const
842 return m_itCur.operator*();
846 iterator_type& operator ++()
848 if ( m_itCur != m_itEnd ) {
851 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
856 iterator_type& operator = (iterator_type const& src)
858 m_itCur = src.m_itCur;
859 m_itEnd = src.m_itEnd;
864 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
866 return m_itCur == i.m_itCur;
869 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
871 return m_itCur != i.m_itCur;
874 } // namespace details
880 /// Reverses bit order in \p nHash
881 static inline size_t reverse_bits( size_t nHash )
883 return bitop::RBO( nHash );
886 static inline size_t regular_hash( size_t nHash )
888 return reverse_bits( nHash ) | size_t(1);
891 static inline size_t dummy_hash( size_t nHash )
893 return reverse_bits( nHash ) & ~size_t(1);
897 } // namespace split_list
900 // Forward declaration
901 template <class GC, class OrderedList, class Traits = split_list::traits>
905 }} // namespace cds::intrusive
907 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H