3 #ifndef __CDS_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
4 #define __CDS_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
6 #include <cds/intrusive/details/base.h>
7 #include <cds/cxx11_atomic.h>
8 #include <cds/details/allocator.h>
9 #include <cds/algo/int_algo.h>
10 #include <cds/algo/bitop.h>
12 namespace cds { namespace intrusive {
14 /// Split-ordered list related definitions
15 /** @ingroup cds_intrusive_helper
17 namespace split_list {
18 /// Split-ordered list node
21 - OrderedListNode - node type for underlying ordered list
23 template <typename OrderedListNode>
24 struct node: public OrderedListNode
27 typedef OrderedListNode base_class;
30 size_t m_nHash ; ///< Hash value for node
32 /// Default constructor
39 /// Initializes dummy node with \p nHash value
46 /// Checks if the node is dummy node
49 return (m_nHash & 1) == 0;
54 /// Type traits for SplitListSet class
58 Hash function converts the key fields of struct \p T stored in the split list
59 into value of type \p size_t called hash value that is an index of hash table.
61 This is mandatory type and has no predefined one.
63 typedef opt::none hash;
67 The item counting is an important part of SplitListSet algorithm:
68 the <tt>empty()</tt> member function depends on correct item counting.
69 Therefore, atomicity::empty_item_counter is not allowed as a type of the option.
71 Default is atomicity::item_counter.
73 typedef atomicity::item_counter item_counter;
75 /// Bucket table allocator
77 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
79 typedef CDS_DEFAULT_ALLOCATOR allocator;
81 /// C++ memory model for atomic operations
83 Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model).
85 typedef opt::v::relaxed_ordering memory_model;
87 /// What type of bucket table is used
89 \p true - use split_list::expandable_bucket_table that can be expanded
90 if the load factor of the set is exhausted
91 \p false - use split_list::static_bucket_table that cannot be expanded
95 static const bool dynamic_bucket_table = true;
97 /// back-off strategy used
99 If the option is not specified, the cds::backoff::Default is used.
101 typedef cds::backoff::Default back_off;
104 /// [value-option] Split-list dynamic bucket table option
106 The option is used to select bucket table implementation.
107 Possible values of \p Value are:
108 - \p true - select \ref expandable_bucket_table implementation
109 - \p false - select \ref static_bucket_table implementation
111 template <bool Value>
112 struct dynamic_bucket_table
115 template <typename Base> struct pack: public Base
117 enum { dynamic_bucket_table = Value };
122 /// Metafunction converting option list to traits struct
124 This is a wrapper for <tt> cds::opt::make_options< type_traits, Options...> </tt>
126 Available \p Options:
127 - opt::hash - mandatory option, specifies hash functor.
128 - opt::item_counter - optional, specifies item counting policy. See type_traits::item_counter
130 - opt::memory_model - C++ memory model for atomic operations.
131 Can be opt::v::relaxed_ordering (relaxed memory model, the default) or opt::v::sequential_consistent (sequentially consisnent memory model).
132 - opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
133 - split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
134 Dynamic bucket table expands its size up to maximum bucket count when necessary
135 - opt::back_off - back-off strategy used for spinning. If the option is not specified, the cds::backoff::Default is used.
137 See \ref MichaelHashSet, \ref type_traits.
139 template <typename... Options>
141 typedef typename cds::opt::make_options< type_traits, Options...>::type type ; ///< Result of metafunction
145 /// Static bucket table
147 Non-resizeable bucket table for SplitListSet class.
148 The capacity of table (max bucket count) is defined in the constructor call.
151 - \p GC - garbage collector used
152 - \p Node - node type, must be a type based on\ref node template
153 - \p Options... - options
156 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
157 - \p opt::memory_model - memory model used. Possible types are opt::v::sequential_consistent, opt::v::relaxed_ordering
159 template <typename GC, typename Node, typename... Options>
160 class static_bucket_table
163 struct default_options
165 typedef CDS_DEFAULT_ALLOCATOR allocator;
166 typedef opt::v::relaxed_ordering memory_model;
168 typedef typename opt::make_options< default_options, Options... >::type options;
172 typedef GC gc ; ///< Garbage collector
173 typedef Node node_type ; ///< Bucket node type
174 typedef atomics::atomic<node_type *> table_entry ; ///< Table entry type
176 /// Bucket table allocator
177 typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator;
179 /// Memory model for atomic operations
180 typedef typename options::memory_model memory_model;
183 const size_t m_nLoadFactor ; ///< load factor (average count of items per bucket)
184 const size_t m_nCapacity ; ///< Bucket table capacity
185 table_entry * m_Table ; ///< Bucket table
189 void allocate_table()
191 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
196 bucket_table_allocator().Delete( m_Table, m_nCapacity );
201 /// Constructs bucket table for 512K buckets. Load factor is 1.
202 static_bucket_table()
204 , m_nCapacity( 512 * 1024 )
211 size_t nItemCount, ///< Max expected item count in split-ordered list
212 size_t nLoadFactor ///< Load factor
214 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ),
215 m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
217 // m_nCapacity must be power of 2
218 assert( cds::beans::is_power2( m_nCapacity ) );
222 /// Destroy bucket table
223 ~static_bucket_table()
228 /// Returns head node of bucket \p nBucket
229 node_type * bucket( size_t nBucket ) const
231 assert( nBucket < capacity() );
232 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
235 /// Set head node \p pNode of bucket \p nBucket
236 void bucket( size_t nBucket, node_type * pNode )
238 assert( nBucket < capacity() );
239 assert( bucket( nBucket ) == nullptr );
241 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
244 /// Returns the capacity of the bucket table
245 size_t capacity() const
250 /// Returns the load factor, i.e. average count of items per bucket
251 size_t load_factor() const
253 return m_nLoadFactor;
257 /// Expandable bucket table
259 This bucket table can dynamically grow its capacity when necessary
260 up to maximum bucket count.
263 - \p GC - garbage collector used
264 - \p Node - node type, must be an instantiation of \ref node template
265 - \p Options... - options
268 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
269 - \p opt::memory_model - memory model used. Possible types are opt::v::sequential_consistent, opt::v::relaxed_ordering
271 template <typename GC, typename Node, typename... Options>
272 class expandable_bucket_table
275 struct default_options
277 typedef CDS_DEFAULT_ALLOCATOR allocator;
278 typedef opt::v::relaxed_ordering memory_model;
280 typedef typename opt::make_options< default_options, Options... >::type options;
283 typedef GC gc ; ///< Garbage collector
284 typedef Node node_type ; ///< Bucket node type
285 typedef atomics::atomic<node_type *> table_entry ; ///< Table entry type
287 /// Memory model for atomic operations
288 typedef typename options::memory_model memory_model;
291 typedef atomics::atomic<table_entry *> segment_type ; ///< Bucket table segment type
294 /// Bucket table allocator
295 typedef cds::details::Allocator< segment_type, typename options::allocator > bucket_table_allocator;
297 /// Bucket table segment allocator
298 typedef cds::details::Allocator< table_entry, typename options::allocator > segment_allocator;
301 /// Bucket table metrics
303 size_t nSegmentCount ; ///< max count of segments in bucket table
304 size_t nSegmentSize ; ///< the segment's capacity. The capacity must be power of two.
305 size_t nSegmentSizeLog2 ; ///< log2( m_nSegmentSize )
306 size_t nLoadFactor ; ///< load factor
307 size_t nCapacity ; ///< max capacity of bucket table
310 : nSegmentCount(1024)
312 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
314 , nCapacity( nSegmentCount * nSegmentSize )
318 const metrics m_metrics ; ///< Dynamic bucket table metrics
321 //const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
322 //const size_t m_nCapacity ; ///< Bucket table capacity
323 segment_type * m_Segments ; ///< bucket table - array of segments
327 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
331 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
332 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
334 size_t nBucketCount = (size_t)( ((float) nItemCount) / m.nLoadFactor );
335 if ( nBucketCount <= 2 ) {
339 else if ( nBucketCount <= 1024 ) {
341 m.nSegmentSize = ((size_t) 1) << beans::log2ceil( nBucketCount );
344 nBucketCount = beans::log2ceil( nBucketCount );
346 m.nSegmentSize = ((size_t) 1) << ( nBucketCount / 2 );
347 if ( nBucketCount & 1 )
349 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
352 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
353 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
354 assert( m.nSegmentSizeLog2 != 0 ) ; //
358 segment_type * allocate_table()
360 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
363 void destroy_table( segment_type * pTable )
365 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
368 table_entry * allocate_segment()
370 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
373 void destroy_segment( table_entry * pSegment )
375 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
380 // m_nSegmentSize must be 2**N
381 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
382 assert( ( ((size_t) 1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
384 // m_nSegmentCount must be 2**K
385 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
387 m_Segments = allocate_table();
393 /// Constructs bucket table for 512K buckets. Load factor is 1.
394 expandable_bucket_table()
395 : m_metrics( calc_metrics( 512 * 1024, 1 ))
401 expandable_bucket_table(
402 size_t nItemCount, ///< Max expected item count in split-ordered list
403 size_t nLoadFactor ///< Load factor
405 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
410 /// Destroy bucket table
411 ~expandable_bucket_table()
413 segment_type * pSegments = m_Segments;
414 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
415 table_entry * pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
416 if ( pEntry != nullptr )
417 destroy_segment( pEntry );
419 destroy_table( pSegments );
422 /// Returns head node of the bucket \p nBucket
423 node_type * bucket( size_t nBucket ) const
425 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
426 assert( nSegment < m_metrics.nSegmentCount );
428 table_entry * pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
429 if ( pSegment == nullptr )
430 return nullptr; // uninitialized bucket
431 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
434 /// Set head node \p pNode of bucket \p nBucket
435 void bucket( size_t nBucket, node_type * pNode )
437 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
438 assert( nSegment < m_metrics.nSegmentCount );
440 segment_type& segment = m_Segments[nSegment];
441 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
442 table_entry * pNewSegment = allocate_segment();
443 table_entry * pNull = nullptr;
444 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
445 destroy_segment( pNewSegment );
448 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
451 /// Returns the capacity of the bucket table
452 size_t capacity() const
454 return m_metrics.nCapacity;
457 /// Returns the load factor, i.e. average count of items per bucket
458 size_t load_factor() const
460 return m_metrics.nLoadFactor;
464 /// Split-list node traits
466 This traits is intended for converting between underlying ordered list node type
467 and split-list node type
470 - \p BaseNodeTraits - node traits of base ordered list type
472 template <class BaseNodeTraits>
473 struct node_traits: private BaseNodeTraits
475 typedef BaseNodeTraits base_class ; ///< Base ordered list type
476 typedef typename base_class::value_type value_type ; ///< Value type
477 typedef typename base_class::node_type base_node_type ; ///< Ordered list node type
478 typedef node<base_node_type> node_type ; ///< Spit-list node type
480 /// Convert value reference to node pointer
481 static node_type * to_node_ptr( value_type& v )
483 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
486 /// Convert value pointer to node pointer
487 static node_type * to_node_ptr( value_type * v )
489 return static_cast<node_type *>( base_class::to_node_ptr( v ) );
492 /// Convert value reference to node pointer (const version)
493 static node_type const * to_node_ptr( value_type const& v )
495 return static_cast<node_type const*>( base_class::to_node_ptr( v ) );
498 /// Convert value pointer to node pointer (const version)
499 static node_type const * to_node_ptr( value_type const * v )
501 return static_cast<node_type const *>( base_class::to_node_ptr( v ) );
504 /// Convert node refernce to value pointer
505 static value_type * to_value_ptr( node_type& n )
507 return base_class::to_value_ptr( static_cast<base_node_type &>( n ) );
510 /// Convert node pointer to value pointer
511 static value_type * to_value_ptr( node_type * n )
513 return base_class::to_value_ptr( static_cast<base_node_type *>( n ) );
516 /// Convert node reference to value pointer (const version)
517 static const value_type * to_value_ptr( node_type const & n )
519 return base_class::to_value_ptr( static_cast<base_node_type const &>( n ) );
522 /// Convert node pointer to value pointer (const version)
523 static const value_type * to_value_ptr( node_type const * n )
525 return base_class::to_value_ptr( static_cast<base_node_type const *>( n ) );
532 template <bool Value, typename GC, typename Node, typename... Options>
533 struct bucket_table_selector;
535 template <typename GC, typename Node, typename... Options>
536 struct bucket_table_selector< true, GC, Node, Options...>
538 typedef expandable_bucket_table<GC, Node, Options...> type;
541 template <typename GC, typename Node, typename... Options>
542 struct bucket_table_selector< false, GC, Node, Options...>
544 typedef static_bucket_table<GC, Node, Options...> type;
547 template <typename GC, class Alloc >
548 struct dummy_node_disposer {
549 template <typename Node>
550 void operator()( Node * p )
552 typedef cds::details::Allocator< Node, Alloc > node_deallocator;
553 node_deallocator().Delete( p );
557 template <typename Q>
558 struct search_value_type
563 search_value_type( Q& v, size_t h )
575 template <class OrderedList, class Options>
576 class rebind_list_options
578 typedef OrderedList native_ordered_list;
579 typedef Options options;
581 typedef typename native_ordered_list::gc gc;
582 typedef typename native_ordered_list::key_comparator native_key_comparator;
583 typedef typename native_ordered_list::node_type node_type;
584 typedef typename native_ordered_list::value_type value_type;
585 typedef typename native_ordered_list::node_traits node_traits;
586 typedef typename native_ordered_list::disposer native_disposer;
588 typedef split_list::node<node_type> splitlist_node_type;
591 int operator()( value_type const& v1, value_type const& v2 ) const
593 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v1 ));
594 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v2 ));
595 if ( n1->m_nHash != n2->m_nHash )
596 return n1->m_nHash < n2->m_nHash ? -1 : 1;
598 if ( n1->is_dummy() ) {
599 assert( n2->is_dummy() );
603 assert( !n1->is_dummy() && !n2->is_dummy() );
605 return native_key_comparator()( v1, v2 );
608 template <typename Q>
609 int operator()( value_type const& v, search_value_type<Q> const& q ) const
611 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
612 if ( n->m_nHash != q.nHash )
613 return n->m_nHash < q.nHash ? -1 : 1;
615 assert( !n->is_dummy() );
616 return native_key_comparator()( v, q.val );
619 template <typename Q>
620 int operator()( search_value_type<Q> const& q, value_type const& v ) const
622 return -operator()( v, q );
626 struct wrapped_disposer
628 void operator()( value_type * v )
630 splitlist_node_type * p = static_cast<splitlist_node_type *>( node_traits::to_node_ptr( v ));
632 dummy_node_disposer<gc, typename options::allocator>()( p );
634 native_disposer()( v );
639 template <typename Less>
640 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
642 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
644 template <typename Q>
645 int operator()( value_type const& v, search_value_type<Q> const& q ) const
647 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
648 if ( n->m_nHash != q.nHash )
649 return n->m_nHash < q.nHash ? -1 : 1;
651 assert( !n->is_dummy() );
652 return base_class()( v, q.val );
655 template <typename Q>
656 int operator()( search_value_type<Q> const& q, value_type const& v ) const
658 splitlist_node_type const * n = static_cast<splitlist_node_type const *>( node_traits::to_node_ptr( v ));
659 if ( n->m_nHash != q.nHash )
660 return q.nHash < n->m_nHash ? -1 : 1;
662 assert( !n->is_dummy() );
663 return base_class()( q.val, v );
666 template <typename Q1, typename Q2>
667 int operator()( Q1 const& v1, Q2 const& v2 ) const
669 return base_class()( v1, v2 );
673 typedef typename native_ordered_list::template rebind_options<
674 opt::compare< key_compare >
675 ,opt::disposer< wrapped_disposer >
676 ,opt::boundary_node_type< splitlist_node_type >
680 template <typename OrderedList, bool IsConst>
681 struct select_list_iterator;
683 template <typename OrderedList>
684 struct select_list_iterator<OrderedList, false>
686 typedef typename OrderedList::iterator type;
689 template <typename OrderedList>
690 struct select_list_iterator<OrderedList, true>
692 typedef typename OrderedList::const_iterator type;
695 template <typename NodeTraits, typename OrderedList, bool IsConst>
698 typedef OrderedList ordered_list_type;
700 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
701 typedef NodeTraits node_traits;
704 list_iterator m_itCur;
705 list_iterator m_itEnd;
708 typedef typename list_iterator::value_ptr value_ptr;
709 typedef typename list_iterator::value_ref value_ref;
715 iterator_type( iterator_type const& src )
716 : m_itCur( src.m_itCur )
717 , m_itEnd( src.m_itEnd )
720 // This ctor should be protected...
721 iterator_type( list_iterator itCur, list_iterator itEnd )
726 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
731 value_ptr operator ->() const
733 return m_itCur.operator->();
736 value_ref operator *() const
738 return m_itCur.operator*();
742 iterator_type& operator ++()
744 if ( m_itCur != m_itEnd ) {
747 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
752 iterator_type& operator = (iterator_type const& src)
754 m_itCur = src.m_itCur;
755 m_itEnd = src.m_itEnd;
760 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
762 return m_itCur == i.m_itCur;
765 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
767 return m_itCur != i.m_itCur;
772 } // namespace details
778 /// Reverses bit order in \p nHash
779 static inline size_t reverse_bits( size_t nHash )
781 return bitop::RBO( nHash );
784 static inline size_t regular_hash( size_t nHash )
786 return reverse_bits( nHash ) | size_t(1);
789 static inline size_t dummy_hash( size_t nHash )
791 return reverse_bits( nHash ) & ~size_t(1);
795 } // namespace split_list
798 // Forward declaration
799 template <class GC, class OrderedList, class Traits = split_list::type_traits>
803 }} // namespace cds::intrusive
805 #endif // #ifndef __CDS_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H