2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/bit_reversal.h>
37 #include <cds/details/allocator.h>
38 #include <cds/algo/int_algo.h>
39 #include <cds/algo/bitop.h>
40 #include <cds/opt/hash.h>
41 #include <cds/intrusive/free_list_selector.h>
43 namespace cds { namespace intrusive {
45 /// Split-ordered list related definitions
46 /** @ingroup cds_intrusive_helper
48 namespace split_list {
52 size_t m_nHash; ///< Hash value for node
54 /// Default constructor
61 /// Initializes dummy node with \p nHash value
62 explicit hash_node( size_t nHash )
68 /// Checks if the node is dummy node
71 return (m_nHash & 1) == 0;
76 /// Split-ordered list node
79 - \p OrderedListNode - node type for underlying ordered list
81 template <typename OrderedListNode>
82 struct node: public OrderedListNode, public hash_node
85 typedef OrderedListNode base_class;
88 /// Default constructor
95 /// Initializes dummy node with \p nHash value
96 explicit node( size_t nHash )
102 /// Checks if the node is dummy node
103 bool is_dummy() const
105 return hash_node::is_dummy();
112 struct node<void>: public hash_node
121 /// Initializes dummy node with \p nHash value
122 explicit node( size_t nHash )
128 /// Checks if the node is dummy node
129 bool is_dummy() const
131 return hash_node::is_dummy();
136 /// \p SplitListSet internal statistics. May be used for debugging or profiling
138 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
140 template <typename Counter = cds::atomicity::event_counter >
143 typedef Counter counter_type; ///< Counter type
145 counter_type m_nInsertSuccess; ///< Count of success inserting
146 counter_type m_nInsertFailed; ///< Count of failed inserting
147 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
148 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
149 counter_type m_nEraseSuccess; ///< Count of success erasing of items
150 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
151 counter_type m_nExtractSuccess; ///< Count of success extracting of items
152 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
153 counter_type m_nFindSuccess; ///< Count of success finding
154 counter_type m_nFindFailed; ///< Count of failed finding
155 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
156 counter_type m_nHeadNodeFreed; ///< Count of freed head node
157 counter_type m_nBucketCount; ///< Current bucket count
158 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
159 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
160 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
161 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
164 void onInsertSuccess() { ++m_nInsertSuccess; }
165 void onInsertFailed() { ++m_nInsertFailed; }
166 void onUpdateNew() { ++m_nUpdateNew; }
167 void onUpdateExist() { ++m_nUpdateExist; }
168 void onEraseSuccess() { ++m_nEraseSuccess; }
169 void onEraseFailed() { ++m_nEraseFailed; }
170 void onExtractSuccess() { ++m_nExtractSuccess; }
171 void onExtractFailed() { ++m_nExtractFailed; }
172 void onFindSuccess() { ++m_nFindSuccess; }
173 void onFindFailed() { ++m_nFindFailed; }
174 bool onFind(bool bSuccess)
182 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
183 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
184 void onNewBucket() { ++m_nBucketCount; }
185 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
186 void onBucketInitContenton() { ++m_nInitBucketContention; }
187 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
188 void onBucketsExhausted() { ++m_nBucketsExhausted; }
192 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
195 void onInsertSuccess() const {}
196 void onInsertFailed() const {}
197 void onUpdateNew() const {}
198 void onUpdateExist() const {}
199 void onEraseSuccess() const {}
200 void onEraseFailed() const {}
201 void onExtractSuccess() const {}
202 void onExtractFailed() const {}
203 void onFindSuccess() const {}
204 void onFindFailed() const {}
205 bool onFind( bool bSuccess ) const { return bSuccess; }
206 void onHeadNodeAllocated() const {}
207 void onHeadNodeFreed() const {}
208 void onNewBucket() const {}
209 void onRecursiveInitBucket() const {}
210 void onBucketInitContenton() const {}
211 void onBusyWaitBucketInit() const {}
212 void onBucketsExhausted() const {}
216 /// Option to control bit reversal algorithm
218 Bit reversal is a significant part of split-list.
219 \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace.
221 template <typename Type>
222 struct bit_reversal {
224 template <typename Base>
225 struct pack: public Base
227 typedef Type bit_reversal;
232 /// SplitListSet traits
237 Hash function converts the key fields of struct \p T stored in the split list
238 into hash value of type \p size_t that is an index in hash table.
239 By default, \p std::hash is used.
241 typedef opt::none hash;
243 /// Bit reversal algorithm
245 Bit reversal is a significant part of split-list.
246 There are several predefined algorithm in \p cds::algo::bit_reversal namespace,
247 \p cds::algo::bit_reversal::lookup is the best general purpose one.
249 There are more efficient bit reversal algoritm for particular processor architecture,
250 for example, based on x86 SIMD/AVX instruction set, see <a href="http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c">here</a>
252 typedef cds::algo::bit_reversal::lookup bit_reversal;
256 The item counting is an important part of \p SplitListSet algorithm:
257 the <tt>empty()</tt> member function depends on correct item counting.
258 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
260 Default is \p cds::atomicity::item_counter.
262 typedef cds::atomicity::item_counter item_counter;
264 /// Bucket table allocator
266 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
268 typedef CDS_DEFAULT_ALLOCATOR allocator;
270 /// Internal statistics (by default, disabled)
272 Possible statistics types are: \p split_list::stat (enable internal statistics),
273 \p split_list::empty_stat (the default, internal statistics disabled),
274 user-provided class that supports \p %split_list::stat interface.
276 typedef split_list::empty_stat stat;
279 /// C++ memory ordering model
281 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
282 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
284 typedef opt::v::relaxed_ordering memory_model;
286 /// What type of bucket table is used
288 \p true - use \p split_list::expandable_bucket_table that can be expanded
289 if the load factor of the set is exhausted.
290 \p false - use \p split_list::static_bucket_table that cannot be expanded
291 and is allocated in \p SplitListSet constructor.
295 static const bool dynamic_bucket_table = true;
297 /// Back-off strategy
298 typedef cds::backoff::Default back_off;
300 /// Padding; default is cache-line padding
302 padding = cds::opt::cache_line_padding
305 /// Free-list of auxiliary nodes
307 The split-list contains auxiliary nodes marked the start of buckets.
308 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
312 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
313 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
315 typedef FreeListImpl free_list;
318 /// [value-option] Split-list dynamic bucket table option
320 The option is used to select bucket table implementation.
321 Possible values of \p Value are:
322 - \p true - select \p expandable_bucket_table
323 - \p false - select \p static_bucket_table
325 template <bool Value>
326 struct dynamic_bucket_table
329 template <typename Base> struct pack: public Base
331 enum { dynamic_bucket_table = Value };
336 /// Metafunction converting option list to \p split_list::traits
338 Available \p Options:
339 - \p opt::hash - mandatory option, specifies hash functor.
340 - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation
341 default is \p cds::algo::bit_reversal::lookup
342 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
344 - \p opt::memory_model - C++ memory model for atomic operations.
345 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
346 or \p opt::v::sequential_consistent (sequentially consistent memory model).
347 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
348 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
349 Dynamic bucket table expands its size up to maximum bucket count when necessary
350 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
351 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
352 To enable internal statistics use \p split_list::stat.
353 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
354 - \p opt::free_list - a free-list implementation, see \p traits::free_list
356 template <typename... Options>
358 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
361 /// Static bucket table
363 Non-resizeable bucket table for \p SplitListSet class.
364 The capacity of table (max bucket count) is defined in the constructor call.
367 - \p GC - garbage collector
368 - \p Node - node type, must be a type based on \p split_list::node
369 - \p Options... - options
372 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
373 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
374 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
375 otherwise \p FreeList.
377 template <typename GC, typename Node, typename... Options>
378 class static_bucket_table
381 struct default_options
383 typedef CDS_DEFAULT_ALLOCATOR allocator;
384 typedef opt::v::relaxed_ordering memory_model;
385 typedef FreeListImpl free_list;
387 typedef typename opt::make_options< default_options, Options... >::type options;
391 typedef GC gc; ///< Garbage collector
392 typedef Node node_type; ///< Bucket node type
393 typedef typename options::allocator allocator; ///< allocator
394 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
395 typedef typename options::free_list free_list; ///< Free-list
397 /// Auxiliary node type
398 struct aux_node_type: public node_type, public free_list::node
401 atomics::atomic<bool> m_busy;
405 m_busy.store( false, atomics::memory_order_release );
410 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
411 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
415 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
416 const size_t m_nCapacity; ///< Bucket table capacity
417 table_entry * m_Table; ///< Bucket table
419 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
421 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
422 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
423 free_list m_freeList; ///< Free list
428 void allocate_table()
430 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
431 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
436 m_freeList.clear( []( typename free_list::node* ) {} );
437 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
438 bucket_table_allocator().Delete( m_Table, m_nCapacity );
443 /// Constructs bucket table for 512K buckets. Load factor is 1.
444 static_bucket_table()
446 , m_nCapacity( 512 * 1024 )
447 , m_nAuxNodeAllocated( 0 )
452 /// Creates the table with specified size rounded up to nearest power-of-two
454 size_t nItemCount, ///< Max expected item count in split-ordered list
455 size_t nLoadFactor ///< Load factor
457 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
458 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
459 , m_nAuxNodeAllocated( 0 )
461 // m_nCapacity must be power of 2
462 assert( cds::beans::is_power2( m_nCapacity ));
466 /// Destroys bucket table
467 ~static_bucket_table()
472 /// Returns head node of bucket \p nBucket
473 aux_node_type * bucket( size_t nBucket ) const
475 assert( nBucket < capacity());
476 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
479 /// Set \p pNode as a head of bucket \p nBucket
480 void bucket( size_t nBucket, aux_node_type * pNode )
482 assert( nBucket < capacity());
483 assert( bucket( nBucket ) == nullptr );
485 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
488 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
489 aux_node_type* alloc_aux_node()
491 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
492 // alloc next free node from m_auxNode
493 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
494 if ( idx < capacity() ) {
495 CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
496 return new( &m_auxNode[idx] ) aux_node_type();
500 // get from free-list
501 auto pFree = m_freeList.get();
503 return static_cast<aux_node_type*>( pFree );
509 /// Places node type to free-list
510 void free_aux_node( aux_node_type* p )
512 m_freeList.put( static_cast<aux_node_type*>( p ));
515 /// Returns the capacity of the bucket table
516 size_t capacity() const
521 /// Returns the load factor, i.e. average count of items per bucket
522 size_t load_factor() const
524 return m_nLoadFactor;
528 /// Expandable bucket table
530 This bucket table can dynamically grow its capacity when necessary
531 up to maximum bucket count.
534 - \p GC - garbage collector
535 - \p Node - node type, must be derived from \p split_list::node
536 - \p Options... - options
539 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
540 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
541 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
542 otherwise \p FreeList.
544 template <typename GC, typename Node, typename... Options>
545 class expandable_bucket_table
548 struct default_options
550 typedef CDS_DEFAULT_ALLOCATOR allocator;
551 typedef opt::v::relaxed_ordering memory_model;
552 typedef FreeListImpl free_list;
554 typedef typename opt::make_options< default_options, Options... >::type options;
558 typedef GC gc; ///< Garbage collector
559 typedef Node node_type; ///< Bucket node type
560 typedef typename options::allocator allocator; ///< allocator
562 /// Memory model for atomic operations
563 typedef typename options::memory_model memory_model;
566 typedef typename options::free_list free_list;
568 /// Auxiliary node type
569 struct aux_node_type: public node_type, public free_list::node
572 atomics::atomic<bool> m_busy;
576 m_busy.store( false, atomics::memory_order_release );
583 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
584 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
586 struct aux_node_segment {
587 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
588 aux_node_segment* next_segment;
589 // aux_node_type nodes[];
592 : next_segment( nullptr )
594 aux_node_count.store( 0, atomics::memory_order_release );
597 aux_node_type* segment()
599 return reinterpret_cast<aux_node_type*>( this + 1 );
603 /// Bucket table metrics
605 size_t nSegmentCount; ///< max count of segments in bucket table
606 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
607 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
608 size_t nLoadFactor; ///< load factor
609 size_t nCapacity; ///< max capacity of bucket table
612 : nSegmentCount( 1024 )
613 , nSegmentSize( 512 )
614 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
616 , nCapacity( nSegmentCount * nSegmentSize )
620 /// Bucket table allocator
621 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
623 /// Bucket table segment allocator
624 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
626 // Aux node segment allocator
627 typedef typename allocator::template rebind<char>::other raw_allocator;
632 /// Constructs bucket table for 512K buckets. Load factor is 1.
633 expandable_bucket_table()
634 : m_metrics( calc_metrics( 512 * 1024, 1 ))
639 /// Creates the table with specified capacity rounded up to nearest power-of-two
640 expandable_bucket_table(
641 size_t nItemCount, ///< Max expected item count in split-ordered list
642 size_t nLoadFactor ///< Load factor
644 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
649 /// Destroys bucket table
650 ~expandable_bucket_table()
652 m_freeList.clear( []( typename free_list::node* ) {} );
654 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
655 auto next_segment = aux_segment->next_segment;
656 free_aux_segment( aux_segment );
657 aux_segment = next_segment;
660 segment_type * pSegments = m_Segments;
661 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
662 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
663 if ( pEntry != nullptr )
664 destroy_segment( pEntry );
667 destroy_table( pSegments );
670 /// Returns head node of the bucket \p nBucket
671 aux_node_type * bucket( size_t nBucket ) const
673 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
674 assert( nSegment < m_metrics.nSegmentCount );
676 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
677 if ( pSegment == nullptr )
678 return nullptr; // uninitialized bucket
679 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
682 /// Set \p pNode as a head of bucket \p nBucket
683 void bucket( size_t nBucket, aux_node_type * pNode )
685 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
686 assert( nSegment < m_metrics.nSegmentCount );
688 segment_type& segment = m_Segments[nSegment];
689 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
690 table_entry* pNewSegment = allocate_segment();
691 table_entry * pNull = nullptr;
692 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
693 destroy_segment( pNewSegment );
696 assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
697 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
700 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
701 aux_node_type* alloc_aux_node()
703 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire );
706 assert( aux_segment != nullptr );
708 // try to allocate from current aux segment
709 if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
710 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
711 if ( idx < m_metrics.nSegmentSize ) {
712 CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
713 return new( aux_segment->segment() + idx ) aux_node_type();
717 // try allocate from free-list
718 auto pFree = m_freeList.get();
720 return static_cast<aux_node_type*>( pFree );
722 // free-list is empty, current segment is full
723 // try to allocate new aux segment
724 // We can allocate more aux segments than we need but it is not a problem in this context
725 aux_node_segment* new_aux_segment = allocate_aux_segment();
726 new_aux_segment->next_segment = aux_segment;
727 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
729 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
730 CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
731 return new( new_aux_segment->segment() ) aux_node_type();
734 free_aux_segment( new_aux_segment );
738 /// Places auxiliary node type to free-list
739 void free_aux_node( aux_node_type* p )
741 m_freeList.put( static_cast<aux_node_type*>( p ));
744 /// Returns the capacity of the bucket table
745 size_t capacity() const
747 return m_metrics.nCapacity;
750 /// Returns the load factor, i.e. average count of items per bucket
751 size_t load_factor() const
753 return m_metrics.nLoadFactor;
758 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
762 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
763 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
765 size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
766 if ( nBucketCount <= 2 ) {
770 else if ( nBucketCount <= 1024 ) {
772 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
775 nBucketCount = beans::log2ceil( nBucketCount );
777 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
778 if ( nBucketCount & 1 )
780 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
783 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
784 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
785 assert( m.nSegmentSizeLog2 != 0 ); //
789 segment_type * allocate_table()
791 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
794 void destroy_table( segment_type * pTable )
796 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
799 table_entry* allocate_segment()
801 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
804 void destroy_segment( table_entry* pSegment )
806 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
809 aux_node_segment* allocate_aux_segment()
811 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
812 CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
813 return new(p) aux_node_segment();
816 void free_aux_segment( aux_node_segment* p )
818 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
823 // m_nSegmentSize must be 2**N
824 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
825 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
827 // m_nSegmentCount must be 2**K
828 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
830 m_Segments = allocate_table();
831 m_auxNodeList = allocate_aux_segment();
837 metrics const m_metrics; ///< Dynamic bucket table metrics
838 segment_type* m_Segments; ///< bucket table - array of segments
839 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
840 free_list m_freeList; ///< List of free aux nodes
847 template <bool Value, typename GC, typename Node, typename... Options>
848 struct bucket_table_selector;
850 template <typename GC, typename Node, typename... Options>
851 struct bucket_table_selector< true, GC, Node, Options...>
853 typedef expandable_bucket_table<GC, Node, Options...> type;
856 template <typename GC, typename Node, typename... Options>
857 struct bucket_table_selector< false, GC, Node, Options...>
859 typedef static_bucket_table<GC, Node, Options...> type;
862 template <typename Q>
863 struct search_value_type
868 search_value_type( Q& v, size_t h )
874 template <class OrderedList, class Traits, bool Iterable >
875 class ordered_list_adapter;
877 template <class OrderedList, class Traits>
878 class ordered_list_adapter< OrderedList, Traits, false >
880 typedef OrderedList native_ordered_list;
881 typedef Traits traits;
883 typedef typename native_ordered_list::gc gc;
884 typedef typename native_ordered_list::key_comparator native_key_comparator;
885 typedef typename native_ordered_list::node_type node_type;
886 typedef typename native_ordered_list::value_type value_type;
887 typedef typename native_ordered_list::node_traits native_node_traits;
888 typedef typename native_ordered_list::disposer native_disposer;
890 typedef split_list::node<node_type> splitlist_node_type;
893 int operator()( value_type const& v1, value_type const& v2 ) const
895 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
896 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
897 if ( n1->m_nHash != n2->m_nHash )
898 return n1->m_nHash < n2->m_nHash ? -1 : 1;
900 if ( n1->is_dummy()) {
901 assert( n2->is_dummy());
905 assert( !n1->is_dummy() && !n2->is_dummy());
907 return native_key_comparator()(v1, v2);
910 template <typename Q>
911 int operator()( value_type const& v, search_value_type<Q> const& q ) const
913 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
914 if ( n->m_nHash != q.nHash )
915 return n->m_nHash < q.nHash ? -1 : 1;
917 assert( !n->is_dummy());
918 return native_key_comparator()(v, q.val);
921 template <typename Q>
922 int operator()( search_value_type<Q> const& q, value_type const& v ) const
924 return -operator()( v, q );
928 struct wrapped_disposer
930 void operator()( value_type * v )
932 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
934 native_disposer()(v);
939 typedef node_type ordered_list_node_type;
940 typedef splitlist_node_type aux_node;
942 struct node_traits: private native_node_traits
944 typedef native_node_traits base_class; ///< Base ordered list node type
945 typedef typename base_class::value_type value_type; ///< Value type
946 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
947 typedef node<base_node_type> node_type; ///< Split-list node type
949 /// Convert value reference to node pointer
950 static node_type * to_node_ptr( value_type& v )
952 return static_cast<node_type *>(base_class::to_node_ptr( v ));
955 /// Convert value pointer to node pointer
956 static node_type * to_node_ptr( value_type * v )
958 return static_cast<node_type *>(base_class::to_node_ptr( v ));
961 /// Convert value reference to node pointer (const version)
962 static node_type const * to_node_ptr( value_type const& v )
964 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
967 /// Convert value pointer to node pointer (const version)
968 static node_type const * to_node_ptr( value_type const * v )
970 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
973 /// Convert node reference to value pointer
974 static value_type * to_value_ptr( node_type& n )
976 return base_class::to_value_ptr( static_cast<base_node_type &>(n));
979 /// Convert node pointer to value pointer
980 static value_type * to_value_ptr( node_type * n )
982 return base_class::to_value_ptr( static_cast<base_node_type *>(n));
985 /// Convert node reference to value pointer (const version)
986 static const value_type * to_value_ptr( node_type const & n )
988 return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
991 /// Convert node pointer to value pointer (const version)
992 static const value_type * to_value_ptr( node_type const * n )
994 return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
998 template <typename Less>
999 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1001 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1003 template <typename Q>
1004 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1006 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
1007 if ( n->m_nHash != q.nHash )
1008 return n->m_nHash < q.nHash ? -1 : 1;
1010 assert( !n->is_dummy());
1011 return base_class()(v, q.val);
1014 template <typename Q>
1015 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1017 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
1018 if ( n->m_nHash != q.nHash )
1019 return q.nHash < n->m_nHash ? -1 : 1;
1021 assert( !n->is_dummy());
1022 return base_class()(q.val, v);
1025 int operator()( value_type const& lhs, value_type const& rhs ) const
1027 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
1028 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
1029 if ( n1->m_nHash != n2->m_nHash )
1030 return n1->m_nHash < n2->m_nHash ? -1 : 1;
1032 if ( n1->is_dummy()) {
1033 assert( n2->is_dummy());
1037 assert( !n1->is_dummy() && !n2->is_dummy());
1039 return native_key_comparator()( lhs, rhs );
1043 typedef typename native_ordered_list::template rebind_traits<
1044 opt::compare< key_compare >
1045 , opt::disposer< wrapped_disposer >
1046 , opt::boundary_node_type< splitlist_node_type >
1050 template <class OrderedList, class Traits>
1051 class ordered_list_adapter< OrderedList, Traits, true >
1053 typedef OrderedList native_ordered_list;
1054 typedef Traits traits;
1056 typedef typename native_ordered_list::gc gc;
1057 typedef typename native_ordered_list::key_comparator native_key_comparator;
1058 typedef typename native_ordered_list::value_type value_type;
1059 typedef typename native_ordered_list::disposer native_disposer;
1061 struct key_compare {
1062 int operator()( value_type const& v1, value_type const& v2 ) const
1064 hash_node const& n1 = static_cast<hash_node const&>( v1 );
1065 hash_node const& n2 = static_cast<hash_node const&>( v2 );
1066 if ( n1.m_nHash != n2.m_nHash )
1067 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1069 if ( n1.is_dummy()) {
1070 assert( n2.is_dummy());
1074 assert( !n1.is_dummy() && !n2.is_dummy());
1076 return native_key_comparator()(v1, v2);
1079 template <typename Q>
1080 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1082 hash_node const& n = static_cast<hash_node const&>( v );
1083 if ( n.m_nHash != q.nHash )
1084 return n.m_nHash < q.nHash ? -1 : 1;
1086 assert( !n.is_dummy());
1087 return native_key_comparator()(v, q.val);
1090 template <typename Q>
1091 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1093 return -operator()( v, q );
1097 struct wrapped_disposer
1099 void operator()( value_type * v )
1101 if ( !static_cast<hash_node*>( v )->is_dummy())
1102 native_disposer()( v );
1107 typedef void ordered_list_node_type;
1109 struct aux_node: public native_ordered_list::node_type, public hash_node
1113 typedef typename native_ordered_list::node_type list_node_type;
1115 list_node_type::data.store( typename list_node_type::marked_data_ptr(
1116 static_cast<value_type*>( static_cast<hash_node *>( this ))),
1117 atomics::memory_order_release
1124 static hash_node * to_node_ptr( value_type& v )
1126 return static_cast<hash_node *>( &v );
1129 static hash_node * to_node_ptr( value_type * v )
1131 return static_cast<hash_node *>( v );
1134 static hash_node const * to_node_ptr( value_type const& v )
1136 return static_cast<hash_node const*>( &v );
1139 static hash_node const * to_node_ptr( value_type const * v )
1141 return static_cast<hash_node const *>( v );
1145 template <typename Less>
1146 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1148 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1150 template <typename Q>
1151 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1153 hash_node const& n = static_cast<hash_node const&>( v );
1154 if ( n.m_nHash != q.nHash )
1155 return n.m_nHash < q.nHash ? -1 : 1;
1157 assert( !n.is_dummy());
1158 return base_class()(v, q.val);
1161 template <typename Q>
1162 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1164 hash_node const& n = static_cast<hash_node const&>( v );
1165 if ( n.m_nHash != q.nHash )
1166 return q.nHash < n.m_nHash ? -1 : 1;
1168 assert( !n.is_dummy());
1169 return base_class()(q.val, v);
1172 int operator()( value_type const& lhs, value_type const& rhs ) const
1174 hash_node const& n1 = static_cast<hash_node const&>( lhs );
1175 hash_node const& n2 = static_cast<hash_node const&>( rhs );
1176 if ( n1.m_nHash != n2.m_nHash )
1177 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1179 if ( n1.is_dummy()) {
1180 assert( n2.is_dummy());
1184 assert( !n1.is_dummy() && !n2.is_dummy());
1186 return base_class()( lhs, rhs );
1190 typedef typename native_ordered_list::template rebind_traits<
1191 opt::compare< key_compare >
1192 , opt::disposer< wrapped_disposer >
1193 , opt::boundary_node_type< aux_node >
1197 template <class OrderedList, class Traits>
1198 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1200 template <typename OrderedList, bool IsConst>
1201 struct select_list_iterator;
1203 template <typename OrderedList>
1204 struct select_list_iterator<OrderedList, false>
1206 typedef typename OrderedList::iterator type;
1209 template <typename OrderedList>
1210 struct select_list_iterator<OrderedList, true>
1212 typedef typename OrderedList::const_iterator type;
1215 template <typename NodeTraits, typename OrderedList, bool IsConst>
1218 typedef OrderedList ordered_list_type;
1219 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1222 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1223 typedef NodeTraits node_traits;
1226 list_iterator m_itCur;
1227 list_iterator m_itEnd;
1230 typedef typename list_iterator::value_ptr value_ptr;
1231 typedef typename list_iterator::value_ref value_ref;
1237 iterator_type( iterator_type const& src )
1238 : m_itCur( src.m_itCur )
1239 , m_itEnd( src.m_itEnd )
1242 // This ctor should be protected...
1243 iterator_type( list_iterator itCur, list_iterator itEnd )
1248 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
1252 value_ptr operator ->() const
1254 return m_itCur.operator->();
1257 value_ref operator *() const
1259 return m_itCur.operator*();
1263 iterator_type& operator ++()
1265 if ( m_itCur != m_itEnd ) {
1268 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
1273 iterator_type& operator = (iterator_type const& src)
1275 m_itCur = src.m_itCur;
1276 m_itEnd = src.m_itEnd;
1281 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1283 return m_itCur == i.m_itCur;
1286 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1288 return m_itCur != i.m_itCur;
1292 list_iterator const& underlying_iterator() const
1297 } // namespace details
1302 template <typename BitReversalAlgo>
1303 static inline size_t regular_hash( size_t nHash )
1305 return BitReversalAlgo()( nHash ) | size_t(1);
1308 template <typename BitReversalAlgo>
1309 static inline size_t dummy_hash( size_t nHash )
1311 return BitReversalAlgo()( nHash ) & ~size_t(1);
1315 } // namespace split_list
1318 // Forward declaration
1319 template <class GC, class OrderedList, class Traits = split_list::traits>
1323 }} // namespace cds::intrusive
1325 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H