2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/bit_reversal.h>
37 #include <cds/details/allocator.h>
38 #include <cds/algo/int_algo.h>
39 #include <cds/algo/bitop.h>
40 #include <cds/opt/hash.h>
41 #include <cds/intrusive/free_list_selector.h>
43 namespace cds { namespace intrusive {
45 /// Split-ordered list related definitions
46 /** @ingroup cds_intrusive_helper
48 namespace split_list {
52 size_t m_nHash; ///< Hash value for node
54 /// Default constructor
61 /// Initializes dummy node with \p nHash value
62 hash_node( size_t nHash )
68 /// Checks if the node is dummy node
71 return (m_nHash & 1) == 0;
76 /// Split-ordered list node
79 - \p OrderedListNode - node type for underlying ordered list
81 template <typename OrderedListNode>
82 struct node: public OrderedListNode, public hash_node
85 typedef OrderedListNode base_class;
88 /// Default constructor
95 /// Initializes dummy node with \p nHash value
102 /// Checks if the node is dummy node
103 bool is_dummy() const
105 return hash_node::is_dummy();
112 struct node<void>: public hash_node
121 /// Initializes dummy node with \p nHash value
128 /// Checks if the node is dummy node
129 bool is_dummy() const
131 return hash_node::is_dummy();
136 /// \p SplitListSet internal statistics. May be used for debugging or profiling
138 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
140 template <typename Counter = cds::atomicity::event_counter >
143 typedef Counter counter_type; ///< Counter type
145 counter_type m_nInsertSuccess; ///< Count of success inserting
146 counter_type m_nInsertFailed; ///< Count of failed inserting
147 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
148 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
149 counter_type m_nEraseSuccess; ///< Count of success erasing of items
150 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
151 counter_type m_nExtractSuccess; ///< Count of success extracting of items
152 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
153 counter_type m_nFindSuccess; ///< Count of success finding
154 counter_type m_nFindFailed; ///< Count of failed finding
155 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
156 counter_type m_nHeadNodeFreed; ///< Count of freed head node
157 counter_type m_nBucketCount; ///< Current bucket count
158 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
159 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
160 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
161 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
164 void onInsertSuccess() { ++m_nInsertSuccess; }
165 void onInsertFailed() { ++m_nInsertFailed; }
166 void onUpdateNew() { ++m_nUpdateNew; }
167 void onUpdateExist() { ++m_nUpdateExist; }
168 void onEraseSuccess() { ++m_nEraseSuccess; }
169 void onEraseFailed() { ++m_nEraseFailed; }
170 void onExtractSuccess() { ++m_nExtractSuccess; }
171 void onExtractFailed() { ++m_nExtractFailed; }
172 void onFindSuccess() { ++m_nFindSuccess; }
173 void onFindFailed() { ++m_nFindFailed; }
174 bool onFind(bool bSuccess)
182 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
183 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
184 void onNewBucket() { ++m_nBucketCount; }
185 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
186 void onBucketInitContenton() { ++m_nInitBucketContention; }
187 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
188 void onBucketsExhausted() { ++m_nBucketsExhausted; }
192 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
195 void onInsertSuccess() const {}
196 void onInsertFailed() const {}
197 void onUpdateNew() const {}
198 void onUpdateExist() const {}
199 void onEraseSuccess() const {}
200 void onEraseFailed() const {}
201 void onExtractSuccess() const {}
202 void onExtractFailed() const {}
203 void onFindSuccess() const {}
204 void onFindFailed() const {}
205 bool onFind( bool bSuccess ) const { return bSuccess; }
206 void onHeadNodeAllocated() const {}
207 void onHeadNodeFreed() const {}
208 void onNewBucket() const {}
209 void onRecursiveInitBucket() const {}
210 void onBucketInitContenton() const {}
211 void onBusyWaitBucketInit() const {}
212 void onBucketsExhausted() const {}
216 /// Option to control bit reversal algorithm
218 Bit reversal is a significant part of split-list.
219 \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace.
221 template <typename Type>
222 struct bit_reversal {
224 template <typename Base>
225 struct pack: public Base
227 typedef Type bit_reversal;
232 /// SplitListSet traits
237 Hash function converts the key fields of struct \p T stored in the split list
238 into hash value of type \p size_t that is an index in hash table.
239 By default, \p std::hash is used.
241 typedef opt::none hash;
243 /// Bit reversal algorithm
245 Bit reversal is a significant part of split-list.
246 There are several predefined algorithm in \p cds::algo::bit_reversal namespace,
247 \p cds::algo::bit_reversal::lookup is the best general purpose one.
249 There are more efficient bit reversal algoritm for particular processor architecture,
250 for example, based on x86 SIMD/AVX instruction set, see <a href="http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c">here</a>
252 typedef cds::algo::bit_reversal::lookup bit_reversal;
256 The item counting is an important part of \p SplitListSet algorithm:
257 the <tt>empty()</tt> member function depends on correct item counting.
258 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
260 Default is \p cds::atomicity::item_counter.
262 typedef cds::atomicity::item_counter item_counter;
264 /// Bucket table allocator
266 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
268 typedef CDS_DEFAULT_ALLOCATOR allocator;
270 /// Internal statistics (by default, disabled)
272 Possible statistics types are: \p split_list::stat (enable internal statistics),
273 \p split_list::empty_stat (the default, internal statistics disabled),
274 user-provided class that supports \p %split_list::stat interface.
276 typedef split_list::empty_stat stat;
279 /// C++ memory ordering model
281 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
282 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
284 typedef opt::v::relaxed_ordering memory_model;
286 /// What type of bucket table is used
288 \p true - use \p split_list::expandable_bucket_table that can be expanded
289 if the load factor of the set is exhausted.
290 \p false - use \p split_list::static_bucket_table that cannot be expanded
291 and is allocated in \p SplitListSet constructor.
295 static const bool dynamic_bucket_table = true;
297 /// Back-off strategy
298 typedef cds::backoff::Default back_off;
300 /// Padding; default is cache-line padding
302 padding = cds::opt::cache_line_padding
305 /// Free-list of auxiliary nodes
307 The split-list contains auxiliary nodes marked the start of buckets.
308 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
312 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
313 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
315 typedef FreeListImpl free_list;
318 /// [value-option] Split-list dynamic bucket table option
320 The option is used to select bucket table implementation.
321 Possible values of \p Value are:
322 - \p true - select \p expandable_bucket_table
323 - \p false - select \p static_bucket_table
325 template <bool Value>
326 struct dynamic_bucket_table
329 template <typename Base> struct pack: public Base
331 enum { dynamic_bucket_table = Value };
336 /// Metafunction converting option list to \p split_list::traits
338 Available \p Options:
339 - \p opt::hash - mandatory option, specifies hash functor.
340 - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation
341 default is \p cds::algo::bit_reversal::lookup
342 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
344 - \p opt::memory_model - C++ memory model for atomic operations.
345 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
346 or \p opt::v::sequential_consistent (sequentially consistent memory model).
347 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
348 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
349 Dynamic bucket table expands its size up to maximum bucket count when necessary
350 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
351 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
352 To enable internal statistics use \p split_list::stat.
353 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
354 - \p opt::free_list - a free-list implementation, see \p traits::free_list
356 template <typename... Options>
358 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
361 /// Static bucket table
363 Non-resizeable bucket table for \p SplitListSet class.
364 The capacity of table (max bucket count) is defined in the constructor call.
367 - \p GC - garbage collector
368 - \p Node - node type, must be a type based on \p split_list::node
369 - \p Options... - options
372 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
373 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
374 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
375 otherwise \p FreeList.
377 template <typename GC, typename Node, typename... Options>
378 class static_bucket_table
381 struct default_options
383 typedef CDS_DEFAULT_ALLOCATOR allocator;
384 typedef opt::v::relaxed_ordering memory_model;
385 typedef FreeListImpl free_list;
387 typedef typename opt::make_options< default_options, Options... >::type options;
391 typedef GC gc; ///< Garbage collector
392 typedef Node node_type; ///< Bucket node type
393 typedef typename options::allocator allocator; ///< allocator
394 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
395 typedef typename options::free_list free_list; ///< Free-list
397 /// Auxiliary node type
398 struct aux_node_type: public node_type, public free_list::node
401 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
402 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
406 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
407 const size_t m_nCapacity; ///< Bucket table capacity
408 table_entry * m_Table; ///< Bucket table
410 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
412 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
413 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
414 free_list m_freeList; ///< Free list
419 void allocate_table()
421 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
422 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
427 m_freeList.clear( []( typename free_list::node* ) {} );
428 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
429 bucket_table_allocator().Delete( m_Table, m_nCapacity );
434 /// Constructs bucket table for 512K buckets. Load factor is 1.
435 static_bucket_table()
437 , m_nCapacity( 512 * 1024 )
438 , m_nAuxNodeAllocated( 0 )
443 /// Creates the table with specified size rounded up to nearest power-of-two
445 size_t nItemCount, ///< Max expected item count in split-ordered list
446 size_t nLoadFactor ///< Load factor
448 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
449 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
450 , m_nAuxNodeAllocated( 0 )
452 // m_nCapacity must be power of 2
453 assert( cds::beans::is_power2( m_nCapacity ));
457 /// Destroys bucket table
458 ~static_bucket_table()
463 /// Returns head node of bucket \p nBucket
464 aux_node_type * bucket( size_t nBucket ) const
466 assert( nBucket < capacity());
467 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
470 /// Set \p pNode as a head of bucket \p nBucket
471 void bucket( size_t nBucket, aux_node_type * pNode )
473 assert( nBucket < capacity());
474 assert( bucket( nBucket ) == nullptr );
476 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
479 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
480 aux_node_type* alloc_aux_node()
482 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
483 // alloc next free node from m_auxNode
484 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
485 if ( idx < capacity())
486 return new( &m_auxNode[idx] ) aux_node_type();
489 // get from free-list
490 auto pFree = m_freeList.get();
492 return static_cast<aux_node_type*>( pFree );
498 /// Places node type to free-list
499 void free_aux_node( aux_node_type* p )
501 m_freeList.put( static_cast<aux_node_type*>( p ));
504 /// Returns the capacity of the bucket table
505 size_t capacity() const
510 /// Returns the load factor, i.e. average count of items per bucket
511 size_t load_factor() const
513 return m_nLoadFactor;
517 /// Expandable bucket table
519 This bucket table can dynamically grow its capacity when necessary
520 up to maximum bucket count.
523 - \p GC - garbage collector
524 - \p Node - node type, must be derived from \p split_list::node
525 - \p Options... - options
528 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
529 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
530 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
531 otherwise \p FreeList.
533 template <typename GC, typename Node, typename... Options>
534 class expandable_bucket_table
537 struct default_options
539 typedef CDS_DEFAULT_ALLOCATOR allocator;
540 typedef opt::v::relaxed_ordering memory_model;
541 typedef FreeListImpl free_list;
543 typedef typename opt::make_options< default_options, Options... >::type options;
547 typedef GC gc; ///< Garbage collector
548 typedef Node node_type; ///< Bucket node type
549 typedef typename options::allocator allocator; ///< allocator
551 /// Memory model for atomic operations
552 typedef typename options::memory_model memory_model;
555 typedef typename options::free_list free_list;
557 /// Auxiliary node type
558 class aux_node_type: public node_type, public free_list::node
563 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
564 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
566 struct aux_node_segment {
567 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
568 aux_node_segment* next_segment;
569 // aux_node_type nodes[];
573 , next_segment( nullptr )
576 aux_node_type* segment()
578 return reinterpret_cast<aux_node_type*>( this + 1 );
582 /// Bucket table metrics
584 size_t nSegmentCount; ///< max count of segments in bucket table
585 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
586 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
587 size_t nLoadFactor; ///< load factor
588 size_t nCapacity; ///< max capacity of bucket table
591 : nSegmentCount( 1024 )
592 , nSegmentSize( 512 )
593 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
595 , nCapacity( nSegmentCount * nSegmentSize )
599 /// Bucket table allocator
600 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
602 /// Bucket table segment allocator
603 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
605 // Aux node segment allocator
606 typedef typename allocator::template rebind<char>::other raw_allocator;
611 /// Constructs bucket table for 512K buckets. Load factor is 1.
612 expandable_bucket_table()
613 : m_metrics( calc_metrics( 512 * 1024, 1 ))
618 /// Creates the table with specified capacity rounded up to nearest power-of-two
619 expandable_bucket_table(
620 size_t nItemCount, ///< Max expected item count in split-ordered list
621 size_t nLoadFactor ///< Load factor
623 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
628 /// Destroys bucket table
629 ~expandable_bucket_table()
631 m_freeList.clear( []( typename free_list::node* ) {} );
633 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
634 auto next_segment = aux_segment->next_segment;
635 free_aux_segment( aux_segment );
636 aux_segment = next_segment;
639 segment_type * pSegments = m_Segments;
640 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
641 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
642 if ( pEntry != nullptr )
643 destroy_segment( pEntry );
646 destroy_table( pSegments );
649 /// Returns head node of the bucket \p nBucket
650 aux_node_type * bucket( size_t nBucket ) const
652 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
653 assert( nSegment < m_metrics.nSegmentCount );
655 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
656 if ( pSegment == nullptr )
657 return nullptr; // uninitialized bucket
658 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
661 /// Set \p pNode as a head of bucket \p nBucket
662 void bucket( size_t nBucket, aux_node_type * pNode )
664 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
665 assert( nSegment < m_metrics.nSegmentCount );
667 segment_type& segment = m_Segments[nSegment];
668 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
669 table_entry* pNewSegment = allocate_segment();
670 table_entry * pNull = nullptr;
671 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
672 destroy_segment( pNewSegment );
675 assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
676 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
679 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
680 aux_node_type* alloc_aux_node()
682 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire );
685 assert( aux_segment != nullptr );
687 // try to allocate from current aux segment
688 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
689 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
690 if ( idx < m_metrics.nSegmentSize )
691 return new( aux_segment->segment() + idx ) aux_node_type();
694 // try allocate from free-list
695 auto pFree = m_freeList.get();
697 return static_cast<aux_node_type*>( pFree );
699 // free-list is empty, current segment is full
700 // try to allocate new aux segment
701 // We can allocate more aux segments than we need but it is not a problem in this context
702 aux_node_segment* new_aux_segment = allocate_aux_segment();
703 new_aux_segment->next_segment = aux_segment;
704 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
705 CDS_COMPILER_RW_BARRIER;
707 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
708 return new( new_aux_segment->segment()) aux_node_type();
710 free_aux_segment( new_aux_segment );
714 /// Places auxiliary node type to free-list
715 void free_aux_node( aux_node_type* p )
717 m_freeList.put( static_cast<aux_node_type*>( p ));
720 /// Returns the capacity of the bucket table
721 size_t capacity() const
723 return m_metrics.nCapacity;
726 /// Returns the load factor, i.e. average count of items per bucket
727 size_t load_factor() const
729 return m_metrics.nLoadFactor;
734 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
738 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
739 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
741 size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
742 if ( nBucketCount <= 2 ) {
746 else if ( nBucketCount <= 1024 ) {
748 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
751 nBucketCount = beans::log2ceil( nBucketCount );
753 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
754 if ( nBucketCount & 1 )
756 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
759 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
760 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
761 assert( m.nSegmentSizeLog2 != 0 ); //
765 segment_type * allocate_table()
767 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
770 void destroy_table( segment_type * pTable )
772 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
775 table_entry* allocate_segment()
777 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
780 void destroy_segment( table_entry* pSegment )
782 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
785 aux_node_segment* allocate_aux_segment()
787 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
788 return new(p) aux_node_segment();
791 void free_aux_segment( aux_node_segment* p )
793 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
798 // m_nSegmentSize must be 2**N
799 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
800 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
802 // m_nSegmentCount must be 2**K
803 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
805 m_Segments = allocate_table();
806 m_auxNodeList = allocate_aux_segment();
812 metrics const m_metrics; ///< Dynamic bucket table metrics
813 segment_type* m_Segments; ///< bucket table - array of segments
814 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
815 free_list m_freeList; ///< List of free aux nodes
822 template <bool Value, typename GC, typename Node, typename... Options>
823 struct bucket_table_selector;
825 template <typename GC, typename Node, typename... Options>
826 struct bucket_table_selector< true, GC, Node, Options...>
828 typedef expandable_bucket_table<GC, Node, Options...> type;
831 template <typename GC, typename Node, typename... Options>
832 struct bucket_table_selector< false, GC, Node, Options...>
834 typedef static_bucket_table<GC, Node, Options...> type;
837 template <typename Q>
838 struct search_value_type
843 search_value_type( Q& v, size_t h )
849 template <class OrderedList, class Traits, bool Iterable >
850 class ordered_list_adapter;
852 template <class OrderedList, class Traits>
853 class ordered_list_adapter< OrderedList, Traits, false >
855 typedef OrderedList native_ordered_list;
856 typedef Traits traits;
858 typedef typename native_ordered_list::gc gc;
859 typedef typename native_ordered_list::key_comparator native_key_comparator;
860 typedef typename native_ordered_list::node_type node_type;
861 typedef typename native_ordered_list::value_type value_type;
862 typedef typename native_ordered_list::node_traits native_node_traits;
863 typedef typename native_ordered_list::disposer native_disposer;
865 typedef split_list::node<node_type> splitlist_node_type;
868 int operator()( value_type const& v1, value_type const& v2 ) const
870 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
871 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
872 if ( n1->m_nHash != n2->m_nHash )
873 return n1->m_nHash < n2->m_nHash ? -1 : 1;
875 if ( n1->is_dummy()) {
876 assert( n2->is_dummy());
880 assert( !n1->is_dummy() && !n2->is_dummy());
882 return native_key_comparator()(v1, v2);
885 template <typename Q>
886 int operator()( value_type const& v, search_value_type<Q> const& q ) const
888 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
889 if ( n->m_nHash != q.nHash )
890 return n->m_nHash < q.nHash ? -1 : 1;
892 assert( !n->is_dummy());
893 return native_key_comparator()(v, q.val);
896 template <typename Q>
897 int operator()( search_value_type<Q> const& q, value_type const& v ) const
899 return -operator()( v, q );
903 struct wrapped_disposer
905 void operator()( value_type * v )
907 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
909 native_disposer()(v);
914 typedef node_type ordered_list_node_type;
915 typedef splitlist_node_type aux_node;
917 struct node_traits: private native_node_traits
919 typedef native_node_traits base_class; ///< Base ordered list node type
920 typedef typename base_class::value_type value_type; ///< Value type
921 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
922 typedef node<base_node_type> node_type; ///< Split-list node type
924 /// Convert value reference to node pointer
925 static node_type * to_node_ptr( value_type& v )
927 return static_cast<node_type *>(base_class::to_node_ptr( v ));
930 /// Convert value pointer to node pointer
931 static node_type * to_node_ptr( value_type * v )
933 return static_cast<node_type *>(base_class::to_node_ptr( v ));
936 /// Convert value reference to node pointer (const version)
937 static node_type const * to_node_ptr( value_type const& v )
939 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
942 /// Convert value pointer to node pointer (const version)
943 static node_type const * to_node_ptr( value_type const * v )
945 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
948 /// Convert node reference to value pointer
949 static value_type * to_value_ptr( node_type& n )
951 return base_class::to_value_ptr( static_cast<base_node_type &>(n));
954 /// Convert node pointer to value pointer
955 static value_type * to_value_ptr( node_type * n )
957 return base_class::to_value_ptr( static_cast<base_node_type *>(n));
960 /// Convert node reference to value pointer (const version)
961 static const value_type * to_value_ptr( node_type const & n )
963 return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
966 /// Convert node pointer to value pointer (const version)
967 static const value_type * to_value_ptr( node_type const * n )
969 return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
973 template <typename Less>
974 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
976 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
978 template <typename Q>
979 int operator()( value_type const& v, search_value_type<Q> const& q ) const
981 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
982 if ( n->m_nHash != q.nHash )
983 return n->m_nHash < q.nHash ? -1 : 1;
985 assert( !n->is_dummy());
986 return base_class()(v, q.val);
989 template <typename Q>
990 int operator()( search_value_type<Q> const& q, value_type const& v ) const
992 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
993 if ( n->m_nHash != q.nHash )
994 return q.nHash < n->m_nHash ? -1 : 1;
996 assert( !n->is_dummy());
997 return base_class()(q.val, v);
1000 int operator()( value_type const& lhs, value_type const& rhs ) const
1002 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
1003 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
1004 if ( n1->m_nHash != n2->m_nHash )
1005 return n1->m_nHash < n2->m_nHash ? -1 : 1;
1007 if ( n1->is_dummy()) {
1008 assert( n2->is_dummy());
1012 assert( !n1->is_dummy() && !n2->is_dummy());
1014 return native_key_comparator()( lhs, rhs );
1018 typedef typename native_ordered_list::template rebind_traits<
1019 opt::compare< key_compare >
1020 , opt::disposer< wrapped_disposer >
1021 , opt::boundary_node_type< splitlist_node_type >
1025 template <class OrderedList, class Traits>
1026 class ordered_list_adapter< OrderedList, Traits, true >
1028 typedef OrderedList native_ordered_list;
1029 typedef Traits traits;
1031 typedef typename native_ordered_list::gc gc;
1032 typedef typename native_ordered_list::key_comparator native_key_comparator;
1033 typedef typename native_ordered_list::value_type value_type;
1034 typedef typename native_ordered_list::disposer native_disposer;
1036 struct key_compare {
1037 int operator()( value_type const& v1, value_type const& v2 ) const
1039 hash_node const& n1 = static_cast<hash_node const&>( v1 );
1040 hash_node const& n2 = static_cast<hash_node const&>( v2 );
1041 if ( n1.m_nHash != n2.m_nHash )
1042 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1044 if ( n1.is_dummy()) {
1045 assert( n2.is_dummy());
1049 assert( !n1.is_dummy() && !n2.is_dummy());
1051 return native_key_comparator()(v1, v2);
1054 template <typename Q>
1055 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1057 hash_node const& n = static_cast<hash_node const&>( v );
1058 if ( n.m_nHash != q.nHash )
1059 return n.m_nHash < q.nHash ? -1 : 1;
1061 assert( !n.is_dummy());
1062 return native_key_comparator()(v, q.val);
1065 template <typename Q>
1066 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1068 return -operator()( v, q );
1072 struct wrapped_disposer
1074 void operator()( value_type * v )
1076 if ( !static_cast<hash_node*>( v )->is_dummy())
1077 native_disposer()( v );
1082 typedef void ordered_list_node_type;
1084 struct aux_node: public native_ordered_list::node_type, public hash_node
1088 typedef typename native_ordered_list::node_type list_node_type;
1090 list_node_type::data.store( typename list_node_type::marked_data_ptr(
1091 static_cast<value_type*>( static_cast<hash_node *>( this ))),
1092 atomics::memory_order_release
1099 static hash_node * to_node_ptr( value_type& v )
1101 return static_cast<hash_node *>( &v );
1104 static hash_node * to_node_ptr( value_type * v )
1106 return static_cast<hash_node *>( v );
1109 static hash_node const * to_node_ptr( value_type const& v )
1111 return static_cast<hash_node const*>( &v );
1114 static hash_node const * to_node_ptr( value_type const * v )
1116 return static_cast<hash_node const *>( v );
1120 template <typename Less>
1121 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1123 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1125 template <typename Q>
1126 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1128 hash_node const& n = static_cast<hash_node const&>( v );
1129 if ( n.m_nHash != q.nHash )
1130 return n.m_nHash < q.nHash ? -1 : 1;
1132 assert( !n.is_dummy());
1133 return base_class()(v, q.val);
1136 template <typename Q>
1137 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1139 hash_node const& n = static_cast<hash_node const&>( v );
1140 if ( n.m_nHash != q.nHash )
1141 return q.nHash < n.m_nHash ? -1 : 1;
1143 assert( !n.is_dummy());
1144 return base_class()(q.val, v);
1147 int operator()( value_type const& lhs, value_type const& rhs ) const
1149 hash_node const& n1 = static_cast<hash_node const&>( lhs );
1150 hash_node const& n2 = static_cast<hash_node const&>( rhs );
1151 if ( n1.m_nHash != n2.m_nHash )
1152 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1154 if ( n1.is_dummy()) {
1155 assert( n2.is_dummy());
1159 assert( !n1.is_dummy() && !n2.is_dummy());
1161 return base_class()( lhs, rhs );
1165 typedef typename native_ordered_list::template rebind_traits<
1166 opt::compare< key_compare >
1167 , opt::disposer< wrapped_disposer >
1168 , opt::boundary_node_type< aux_node >
1172 template <class OrderedList, class Traits>
1173 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1175 template <typename OrderedList, bool IsConst>
1176 struct select_list_iterator;
1178 template <typename OrderedList>
1179 struct select_list_iterator<OrderedList, false>
1181 typedef typename OrderedList::iterator type;
1184 template <typename OrderedList>
1185 struct select_list_iterator<OrderedList, true>
1187 typedef typename OrderedList::const_iterator type;
1190 template <typename NodeTraits, typename OrderedList, bool IsConst>
1193 typedef OrderedList ordered_list_type;
1194 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1197 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1198 typedef NodeTraits node_traits;
1201 list_iterator m_itCur;
1202 list_iterator m_itEnd;
1205 typedef typename list_iterator::value_ptr value_ptr;
1206 typedef typename list_iterator::value_ref value_ref;
1212 iterator_type( iterator_type const& src )
1213 : m_itCur( src.m_itCur )
1214 , m_itEnd( src.m_itEnd )
1217 // This ctor should be protected...
1218 iterator_type( list_iterator itCur, list_iterator itEnd )
1223 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
1227 value_ptr operator ->() const
1229 return m_itCur.operator->();
1232 value_ref operator *() const
1234 return m_itCur.operator*();
1238 iterator_type& operator ++()
1240 if ( m_itCur != m_itEnd ) {
1243 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
1248 iterator_type& operator = (iterator_type const& src)
1250 m_itCur = src.m_itCur;
1251 m_itEnd = src.m_itEnd;
1256 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1258 return m_itCur == i.m_itCur;
1261 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1263 return m_itCur != i.m_itCur;
1267 list_iterator const& underlying_iterator() const
1272 } // namespace details
1277 template <typename BitReversalAlgo>
1278 static inline size_t regular_hash( size_t nHash )
1280 return BitReversalAlgo()( nHash ) | size_t(1);
1283 template <typename BitReversalAlgo>
1284 static inline size_t dummy_hash( size_t nHash )
1286 return BitReversalAlgo()( nHash ) & ~size_t(1);
1290 } // namespace split_list
1293 // Forward declaration
1294 template <class GC, class OrderedList, class Traits = split_list::traits>
1298 }} // namespace cds::intrusive
1300 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H