2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
51 size_t m_nHash; ///< Hash value for node
53 /// Default constructor
60 /// Initializes dummy node with \p nHash value
61 hash_node( size_t nHash )
67 /// Checks if the node is dummy node
70 return (m_nHash & 1) == 0;
75 /// Split-ordered list node
78 - \p OrderedListNode - node type for underlying ordered list
80 template <typename OrderedListNode>
81 struct node: public OrderedListNode, public hash_node
84 typedef OrderedListNode base_class;
87 /// Default constructor
94 /// Initializes dummy node with \p nHash value
101 /// Checks if the node is dummy node
102 bool is_dummy() const
104 return hash_node::is_dummy();
110 struct node<void>: public hash_node
116 assert( is_dummy() );
119 /// Initializes dummy node with \p nHash value
123 assert( is_dummy() );
126 /// Checks if the node is dummy node
127 bool is_dummy() const
129 return hash_node::is_dummy();
134 /// \p SplitListSet internal statistics. May be used for debugging or profiling
136 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
138 template <typename Counter = cds::atomicity::event_counter >
141 typedef Counter counter_type; ///< Counter type
143 counter_type m_nInsertSuccess; ///< Count of success inserting
144 counter_type m_nInsertFailed; ///< Count of failed inserting
145 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
146 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
147 counter_type m_nEraseSuccess; ///< Count of success erasing of items
148 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
149 counter_type m_nExtractSuccess; ///< Count of success extracting of items
150 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
151 counter_type m_nFindSuccess; ///< Count of success finding
152 counter_type m_nFindFailed; ///< Count of failed finding
153 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
154 counter_type m_nHeadNodeFreed; ///< Count of freed head node
155 counter_type m_nBucketCount; ///< Current bucket count
156 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
157 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
158 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
159 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
162 void onInsertSuccess() { ++m_nInsertSuccess; }
163 void onInsertFailed() { ++m_nInsertFailed; }
164 void onUpdateNew() { ++m_nUpdateNew; }
165 void onUpdateExist() { ++m_nUpdateExist; }
166 void onEraseSuccess() { ++m_nEraseSuccess; }
167 void onEraseFailed() { ++m_nEraseFailed; }
168 void onExtractSuccess() { ++m_nExtractSuccess; }
169 void onExtractFailed() { ++m_nExtractFailed; }
170 void onFindSuccess() { ++m_nFindSuccess; }
171 void onFindFailed() { ++m_nFindFailed; }
172 bool onFind(bool bSuccess)
180 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
181 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
182 void onNewBucket() { ++m_nBucketCount; }
183 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
184 void onBucketInitContenton() { ++m_nInitBucketContention; }
185 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
186 void onBucketsExhausted() { ++m_nBucketsExhausted; }
190 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
193 void onInsertSuccess() const {}
194 void onInsertFailed() const {}
195 void onUpdateNew() const {}
196 void onUpdateExist() const {}
197 void onEraseSuccess() const {}
198 void onEraseFailed() const {}
199 void onExtractSuccess() const {}
200 void onExtractFailed() const {}
201 void onFindSuccess() const {}
202 void onFindFailed() const {}
203 bool onFind( bool bSuccess ) const { return bSuccess; }
204 void onHeadNodeAllocated() const {}
205 void onHeadNodeFreed() const {}
206 void onNewBucket() const {}
207 void onRecursiveInitBucket() const {}
208 void onBucketInitContenton() const {}
209 void onBusyWaitBucketInit() const {}
210 void onBucketsExhausted() const {}
214 /// SplitListSet traits
219 Hash function converts the key fields of struct \p T stored in the split list
220 into hash value of type \p size_t that is an index in hash table.
221 By default, \p std::hash is used.
223 typedef opt::none hash;
227 The item counting is an important part of \p SplitListSet algorithm:
228 the <tt>empty()</tt> member function depends on correct item counting.
229 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
231 Default is \p cds::atomicity::item_counter.
233 typedef cds::atomicity::item_counter item_counter;
235 /// Bucket table allocator
237 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
239 typedef CDS_DEFAULT_ALLOCATOR allocator;
241 /// Internal statistics (by default, disabled)
243 Possible statistics types are: \p split_list::stat (enable internal statistics),
244 \p split_list::empty_stat (the default, internal statistics disabled),
245 user-provided class that supports \p %split_list::stat interface.
247 typedef split_list::empty_stat stat;
250 /// C++ memory ordering model
252 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
253 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
255 typedef opt::v::relaxed_ordering memory_model;
257 /// What type of bucket table is used
259 \p true - use \p split_list::expandable_bucket_table that can be expanded
260 if the load factor of the set is exhausted.
261 \p false - use \p split_list::static_bucket_table that cannot be expanded
262 and is allocated in \p SplitListSet constructor.
266 static const bool dynamic_bucket_table = true;
268 /// Back-off strategy
269 typedef cds::backoff::Default back_off;
271 /// Padding; default is cache-line padding
273 padding = cds::opt::cache_line_padding
276 /// Free-list of auxiliary nodes
278 The split-list contains auxiliary nodes marked the start of buckets.
279 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
283 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
284 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
286 typedef FreeListImpl free_list;
289 /// [value-option] Split-list dynamic bucket table option
291 The option is used to select bucket table implementation.
292 Possible values of \p Value are:
293 - \p true - select \p expandable_bucket_table
294 - \p false - select \p static_bucket_table
296 template <bool Value>
297 struct dynamic_bucket_table
300 template <typename Base> struct pack: public Base
302 enum { dynamic_bucket_table = Value };
307 /// Metafunction converting option list to \p split_list::traits
309 Available \p Options:
310 - \p opt::hash - mandatory option, specifies hash functor.
311 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
313 - \p opt::memory_model - C++ memory model for atomic operations.
314 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
315 or \p opt::v::sequential_consistent (sequentially consistent memory model).
316 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
317 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
318 Dynamic bucket table expands its size up to maximum bucket count when necessary
319 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
320 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
321 To enable internal statistics use \p split_list::stat.
322 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
323 - \p opt::free_list - a free-list implementation, see \p traits::free_list
325 template <typename... Options>
327 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
330 /// Static bucket table
332 Non-resizeable bucket table for \p SplitListSet class.
333 The capacity of table (max bucket count) is defined in the constructor call.
336 - \p GC - garbage collector
337 - \p Node - node type, must be a type based on \p split_list::node
338 - \p Options... - options
341 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
342 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
343 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
344 otherwise \p FreeList.
346 template <typename GC, typename Node, typename... Options>
347 class static_bucket_table
350 struct default_options
352 typedef CDS_DEFAULT_ALLOCATOR allocator;
353 typedef opt::v::relaxed_ordering memory_model;
354 typedef FreeListImpl free_list;
356 typedef typename opt::make_options< default_options, Options... >::type options;
360 typedef GC gc; ///< Garbage collector
361 typedef Node node_type; ///< Bucket node type
362 typedef typename options::allocator allocator; ///< allocator
363 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
364 typedef typename options::free_list free_list; ///< Free-list
366 /// Auxiliary node type
367 struct aux_node_type: public node_type, public free_list::node
370 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
371 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
375 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
376 const size_t m_nCapacity; ///< Bucket table capacity
377 table_entry * m_Table; ///< Bucket table
379 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
381 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
382 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
383 free_list m_freeList; ///< Free list
388 void allocate_table()
390 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
391 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
396 m_freeList.clear( []( typename free_list::node* ) {} );
397 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
398 bucket_table_allocator().Delete( m_Table, m_nCapacity );
403 /// Constructs bucket table for 512K buckets. Load factor is 1.
404 static_bucket_table()
406 , m_nCapacity( 512 * 1024 )
407 , m_nAuxNodeAllocated( 0 )
412 /// Creates the table with specified size rounded up to nearest power-of-two
414 size_t nItemCount, ///< Max expected item count in split-ordered list
415 size_t nLoadFactor ///< Load factor
417 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
418 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ) )
419 , m_nAuxNodeAllocated( 0 )
421 // m_nCapacity must be power of 2
422 assert( cds::beans::is_power2( m_nCapacity ) );
426 /// Destroys bucket table
427 ~static_bucket_table()
432 /// Returns head node of bucket \p nBucket
433 aux_node_type * bucket( size_t nBucket ) const
435 assert( nBucket < capacity() );
436 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
439 /// Set \p pNode as a head of bucket \p nBucket
440 void bucket( size_t nBucket, aux_node_type * pNode )
442 assert( nBucket < capacity() );
443 assert( bucket( nBucket ) == nullptr );
445 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
448 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
449 aux_node_type* alloc_aux_node()
451 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity() ) {
452 // alloc next free node from m_auxNode
453 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
454 if ( idx < capacity() )
455 return new( &m_auxNode[idx] ) aux_node_type();
458 // get from free-list
459 auto pFree = m_freeList.get();
461 return static_cast<aux_node_type*>( pFree );
467 /// Places node type to free-list
468 void free_aux_node( aux_node_type* p )
470 m_freeList.put( static_cast<aux_node_type*>( p ));
473 /// Returns the capacity of the bucket table
474 size_t capacity() const
479 /// Returns the load factor, i.e. average count of items per bucket
480 size_t load_factor() const
482 return m_nLoadFactor;
486 /// Expandable bucket table
488 This bucket table can dynamically grow its capacity when necessary
489 up to maximum bucket count.
492 - \p GC - garbage collector
493 - \p Node - node type, must be derived from \p split_list::node
494 - \p Options... - options
497 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
498 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
499 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
500 otherwise \p FreeList.
502 template <typename GC, typename Node, typename... Options>
503 class expandable_bucket_table
506 struct default_options
508 typedef CDS_DEFAULT_ALLOCATOR allocator;
509 typedef opt::v::relaxed_ordering memory_model;
510 typedef FreeListImpl free_list;
512 typedef typename opt::make_options< default_options, Options... >::type options;
516 typedef GC gc; ///< Garbage collector
517 typedef Node node_type; ///< Bucket node type
518 typedef typename options::allocator allocator; ///< allocator
520 /// Memory model for atomic operations
521 typedef typename options::memory_model memory_model;
524 typedef typename options::free_list free_list;
526 /// Auxiliary node type
527 class aux_node_type: public node_type, public free_list::node
532 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
533 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
535 struct aux_node_segment {
536 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
537 aux_node_segment* next_segment;
538 // aux_node_type nodes[];
542 , next_segment( nullptr )
545 aux_node_type* segment()
547 return reinterpret_cast<aux_node_type*>( this + 1 );
551 /// Bucket table metrics
553 size_t nSegmentCount; ///< max count of segments in bucket table
554 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
555 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
556 size_t nLoadFactor; ///< load factor
557 size_t nCapacity; ///< max capacity of bucket table
560 : nSegmentCount( 1024 )
561 , nSegmentSize( 512 )
562 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ) )
564 , nCapacity( nSegmentCount * nSegmentSize )
568 /// Bucket table allocator
569 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
571 /// Bucket table segment allocator
572 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
574 // Aux node segment allocator
575 typedef typename allocator::template rebind<char>::other raw_allocator;
580 /// Constructs bucket table for 512K buckets. Load factor is 1.
581 expandable_bucket_table()
582 : m_metrics( calc_metrics( 512 * 1024, 1 ))
587 /// Creates the table with specified capacity rounded up to nearest power-of-two
588 expandable_bucket_table(
589 size_t nItemCount, ///< Max expected item count in split-ordered list
590 size_t nLoadFactor ///< Load factor
592 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
597 /// Destroys bucket table
598 ~expandable_bucket_table()
600 m_freeList.clear( []( typename free_list::node* ) {} );
602 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
603 auto next_segment = aux_segment->next_segment;
604 free_aux_segment( aux_segment );
605 aux_segment = next_segment;
608 segment_type * pSegments = m_Segments;
609 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
610 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
611 if ( pEntry != nullptr )
612 destroy_segment( pEntry );
615 destroy_table( pSegments );
618 /// Returns head node of the bucket \p nBucket
619 aux_node_type * bucket( size_t nBucket ) const
621 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
622 assert( nSegment < m_metrics.nSegmentCount );
624 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
625 if ( pSegment == nullptr )
626 return nullptr; // uninitialized bucket
627 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
630 /// Set \p pNode as a head of bucket \p nBucket
631 void bucket( size_t nBucket, aux_node_type * pNode )
633 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
634 assert( nSegment < m_metrics.nSegmentCount );
636 segment_type& segment = m_Segments[nSegment];
637 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
638 table_entry* pNewSegment = allocate_segment();
639 table_entry * pNull = nullptr;
640 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
641 destroy_segment( pNewSegment );
644 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
647 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
648 aux_node_type* alloc_aux_node()
651 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_relaxed );
652 assert( aux_segment != nullptr );
654 // try to allocate from current aux segment
655 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
656 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
657 if ( idx < m_metrics.nSegmentSize )
658 return new( aux_segment->segment() + idx ) aux_node_type();
661 // try allocate from free-list
662 auto pFree = m_freeList.get();
664 return static_cast<aux_node_type*>( pFree );
666 // free-list is empty, current segment is full
667 // try to allocate new aux segment
668 // We can allocate more aux segments than we need but it is not a problem in this context
669 aux_node_segment* new_aux_segment = allocate_aux_segment();
670 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
671 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ))
672 return new( new_aux_segment->segment() ) aux_node_type();
674 free_aux_segment( new_aux_segment );
678 /// Places auxiliary node type to free-list
679 void free_aux_node( aux_node_type* p )
681 m_freeList.put( static_cast<aux_node_type*>( p ));
684 /// Returns the capacity of the bucket table
685 size_t capacity() const
687 return m_metrics.nCapacity;
690 /// Returns the load factor, i.e. average count of items per bucket
691 size_t load_factor() const
693 return m_metrics.nLoadFactor;
698 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
702 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
703 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
705 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
706 if ( nBucketCount <= 2 ) {
710 else if ( nBucketCount <= 1024 ) {
712 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
715 nBucketCount = beans::log2ceil( nBucketCount );
717 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
718 if ( nBucketCount & 1 )
720 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
723 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
724 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
725 assert( m.nSegmentSizeLog2 != 0 ); //
729 segment_type * allocate_table()
731 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
734 void destroy_table( segment_type * pTable )
736 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
739 table_entry* allocate_segment()
741 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
744 void destroy_segment( table_entry* pSegment )
746 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
749 aux_node_segment* allocate_aux_segment()
751 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
752 return new(p) aux_node_segment();
755 void free_aux_segment( aux_node_segment* p )
757 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
762 // m_nSegmentSize must be 2**N
763 assert( cds::beans::is_power2( m_metrics.nSegmentSize ) );
764 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
766 // m_nSegmentCount must be 2**K
767 assert( cds::beans::is_power2( m_metrics.nSegmentCount ) );
769 m_Segments = allocate_table();
770 m_auxNodeList = allocate_aux_segment();
776 metrics const m_metrics; ///< Dynamic bucket table metrics
777 segment_type* m_Segments; ///< bucket table - array of segments
778 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
779 free_list m_freeList; ///< List of free aux nodes
786 template <bool Value, typename GC, typename Node, typename... Options>
787 struct bucket_table_selector;
789 template <typename GC, typename Node, typename... Options>
790 struct bucket_table_selector< true, GC, Node, Options...>
792 typedef expandable_bucket_table<GC, Node, Options...> type;
795 template <typename GC, typename Node, typename... Options>
796 struct bucket_table_selector< false, GC, Node, Options...>
798 typedef static_bucket_table<GC, Node, Options...> type;
801 template <typename Q>
802 struct search_value_type
807 search_value_type( Q& v, size_t h )
813 template <class OrderedList, class Traits, bool Iterable >
814 class ordered_list_adapter;
816 template <class OrderedList, class Traits>
817 class ordered_list_adapter< OrderedList, Traits, false >
819 typedef OrderedList native_ordered_list;
820 typedef Traits traits;
822 typedef typename native_ordered_list::gc gc;
823 typedef typename native_ordered_list::key_comparator native_key_comparator;
824 typedef typename native_ordered_list::node_type node_type;
825 typedef typename native_ordered_list::value_type value_type;
826 typedef typename native_ordered_list::node_traits native_node_traits;
827 typedef typename native_ordered_list::disposer native_disposer;
829 typedef split_list::node<node_type> splitlist_node_type;
832 int operator()( value_type const& v1, value_type const& v2 ) const
834 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
835 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
836 if ( n1->m_nHash != n2->m_nHash )
837 return n1->m_nHash < n2->m_nHash ? -1 : 1;
839 if ( n1->is_dummy() ) {
840 assert( n2->is_dummy() );
844 assert( !n1->is_dummy() && !n2->is_dummy() );
846 return native_key_comparator()(v1, v2);
849 template <typename Q>
850 int operator()( value_type const& v, search_value_type<Q> const& q ) const
852 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
853 if ( n->m_nHash != q.nHash )
854 return n->m_nHash < q.nHash ? -1 : 1;
856 assert( !n->is_dummy() );
857 return native_key_comparator()(v, q.val);
860 template <typename Q>
861 int operator()( search_value_type<Q> const& q, value_type const& v ) const
863 return -operator()( v, q );
867 struct wrapped_disposer
869 void operator()( value_type * v )
871 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
872 if ( !p->is_dummy() )
873 native_disposer()(v);
878 typedef node_type ordered_list_node_type;
879 typedef splitlist_node_type aux_node;
881 struct node_traits: private native_node_traits
883 typedef native_node_traits base_class; ///< Base ordered list node type
884 typedef typename base_class::value_type value_type; ///< Value type
885 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
886 typedef node<base_node_type> node_type; ///< Split-list node type
888 /// Convert value reference to node pointer
889 static node_type * to_node_ptr( value_type& v )
891 return static_cast<node_type *>(base_class::to_node_ptr( v ));
894 /// Convert value pointer to node pointer
895 static node_type * to_node_ptr( value_type * v )
897 return static_cast<node_type *>(base_class::to_node_ptr( v ));
900 /// Convert value reference to node pointer (const version)
901 static node_type const * to_node_ptr( value_type const& v )
903 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
906 /// Convert value pointer to node pointer (const version)
907 static node_type const * to_node_ptr( value_type const * v )
909 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
912 /// Convert node reference to value pointer
913 static value_type * to_value_ptr( node_type& n )
915 return base_class::to_value_ptr( static_cast<base_node_type &>(n) );
918 /// Convert node pointer to value pointer
919 static value_type * to_value_ptr( node_type * n )
921 return base_class::to_value_ptr( static_cast<base_node_type *>(n) );
924 /// Convert node reference to value pointer (const version)
925 static const value_type * to_value_ptr( node_type const & n )
927 return base_class::to_value_ptr( static_cast<base_node_type const &>(n) );
930 /// Convert node pointer to value pointer (const version)
931 static const value_type * to_value_ptr( node_type const * n )
933 return base_class::to_value_ptr( static_cast<base_node_type const *>(n) );
937 template <typename Less>
938 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
940 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
942 template <typename Q>
943 int operator()( value_type const& v, search_value_type<Q> const& q ) const
945 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
946 if ( n->m_nHash != q.nHash )
947 return n->m_nHash < q.nHash ? -1 : 1;
949 assert( !n->is_dummy() );
950 return base_class()(v, q.val);
953 template <typename Q>
954 int operator()( search_value_type<Q> const& q, value_type const& v ) const
956 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
957 if ( n->m_nHash != q.nHash )
958 return q.nHash < n->m_nHash ? -1 : 1;
960 assert( !n->is_dummy() );
961 return base_class()(q.val, v);
964 template <typename Q1, typename Q2>
965 int operator()( Q1 const& v1, Q2 const& v2 ) const
967 return base_class()(v1, v2);
971 typedef typename native_ordered_list::template rebind_traits<
972 opt::compare< key_compare >
973 , opt::disposer< wrapped_disposer >
974 , opt::boundary_node_type< splitlist_node_type >
978 template <class OrderedList, class Traits>
979 class ordered_list_adapter< OrderedList, Traits, true >
981 typedef OrderedList native_ordered_list;
982 typedef Traits traits;
984 typedef typename native_ordered_list::gc gc;
985 typedef typename native_ordered_list::key_comparator native_key_comparator;
986 typedef typename native_ordered_list::value_type value_type;
987 typedef typename native_ordered_list::disposer native_disposer;
990 int operator()( value_type const& v1, value_type const& v2 ) const
992 hash_node const& n1 = static_cast<hash_node const&>( v1 );
993 hash_node const& n2 = static_cast<hash_node const&>( v2 );
994 if ( n1.m_nHash != n2.m_nHash )
995 return n1.m_nHash < n2.m_nHash ? -1 : 1;
997 if ( n1.is_dummy() ) {
998 assert( n2.is_dummy() );
1002 assert( !n1.is_dummy() && !n2.is_dummy() );
1004 return native_key_comparator()(v1, v2);
1007 template <typename Q>
1008 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1010 hash_node const& n = static_cast<hash_node const&>( v );
1011 if ( n.m_nHash != q.nHash )
1012 return n.m_nHash < q.nHash ? -1 : 1;
1014 assert( !n.is_dummy() );
1015 return native_key_comparator()(v, q.val);
1018 template <typename Q>
1019 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1021 return -operator()( v, q );
1025 struct wrapped_disposer
1027 void operator()( value_type * v )
1029 hash_node* p = static_cast<hash_node*>( v );
1030 if ( !p->is_dummy() )
1031 native_disposer()(v);
1036 typedef void ordered_list_node_type;
1038 struct aux_node: public native_ordered_list::node_type, public hash_node
1042 typedef typename native_ordered_list::node_type list_node_type;
1043 list_node_type::data.store( typename list_node_type::marked_data_ptr( static_cast<value_type*>( static_cast<hash_node *>( this ))), atomics::memory_order_release );
1049 static hash_node * to_node_ptr( value_type& v )
1051 return static_cast<hash_node *>( &v );
1054 static hash_node * to_node_ptr( value_type * v )
1056 return static_cast<hash_node *>( v );
1059 static hash_node const * to_node_ptr( value_type const& v )
1061 return static_cast<hash_node const*>( &v );
1064 static hash_node const * to_node_ptr( value_type const * v )
1066 return static_cast<hash_node const *>( v );
1070 template <typename Less>
1071 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1073 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1075 template <typename Q>
1076 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1078 hash_node const& n = static_cast<hash_node const&>( v );
1079 if ( n.m_nHash != q.nHash )
1080 return n.m_nHash < q.nHash ? -1 : 1;
1082 assert( !n.is_dummy() );
1083 return base_class()(v, q.val);
1086 template <typename Q>
1087 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1089 hash_node const& n = static_cast<hash_node const&>( v );
1090 if ( n.m_nHash != q.nHash )
1091 return q.nHash < n.m_nHash ? -1 : 1;
1093 assert( !n.is_dummy() );
1094 return base_class()(q.val, v);
1097 template <typename Q1, typename Q2>
1098 int operator()( Q1 const& v1, Q2 const& v2 ) const
1100 return base_class()(v1, v2);
1104 typedef typename native_ordered_list::template rebind_traits<
1105 opt::compare< key_compare >
1106 , opt::disposer< wrapped_disposer >
1107 , opt::boundary_node_type< aux_node >
1111 template <class OrderedList, class Traits>
1112 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1114 template <typename OrderedList, bool IsConst>
1115 struct select_list_iterator;
1117 template <typename OrderedList>
1118 struct select_list_iterator<OrderedList, false>
1120 typedef typename OrderedList::iterator type;
1123 template <typename OrderedList>
1124 struct select_list_iterator<OrderedList, true>
1126 typedef typename OrderedList::const_iterator type;
1129 template <typename NodeTraits, typename OrderedList, bool IsConst>
1132 typedef OrderedList ordered_list_type;
1133 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1136 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1137 typedef NodeTraits node_traits;
1140 list_iterator m_itCur;
1141 list_iterator m_itEnd;
1144 typedef typename list_iterator::value_ptr value_ptr;
1145 typedef typename list_iterator::value_ref value_ref;
1151 iterator_type( iterator_type const& src )
1152 : m_itCur( src.m_itCur )
1153 , m_itEnd( src.m_itEnd )
1156 // This ctor should be protected...
1157 iterator_type( list_iterator itCur, list_iterator itEnd )
1162 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() )
1166 value_ptr operator ->() const
1168 return m_itCur.operator->();
1171 value_ref operator *() const
1173 return m_itCur.operator*();
1177 iterator_type& operator ++()
1179 if ( m_itCur != m_itEnd ) {
1182 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy() );
1187 iterator_type& operator = (iterator_type const& src)
1189 m_itCur = src.m_itCur;
1190 m_itEnd = src.m_itEnd;
1195 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1197 return m_itCur == i.m_itCur;
1200 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1202 return m_itCur != i.m_itCur;
1205 } // namespace details
1211 /// Reverses bit order in \p nHash
1212 static inline size_t reverse_bits( size_t nHash )
1214 return bitop::RBO( nHash );
1217 static inline size_t regular_hash( size_t nHash )
1219 return reverse_bits( nHash ) | size_t(1);
1222 static inline size_t dummy_hash( size_t nHash )
1224 return reverse_bits( nHash ) & ~size_t(1);
1228 } // namespace split_list
1231 // Forward declaration
1232 template <class GC, class OrderedList, class Traits = split_list::traits>
1236 }} // namespace cds::intrusive
1238 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H