2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/bit_reversal.h>
37 #include <cds/details/allocator.h>
38 #include <cds/algo/int_algo.h>
39 #include <cds/algo/bitop.h>
40 #include <cds/opt/hash.h>
41 #include <cds/intrusive/free_list_selector.h>
42 #include <cds/details/size_t_cast.h>
44 namespace cds { namespace intrusive {
46 /// Split-ordered list related definitions
47 /** @ingroup cds_intrusive_helper
49 namespace split_list {
53 size_t m_nHash; ///< Hash value for node
55 /// Default constructor
62 /// Initializes dummy node with \p nHash value
63 explicit hash_node( size_t nHash )
69 /// Checks if the node is dummy node
72 return (m_nHash & 1) == 0;
77 /// Split-ordered list node
80 - \p OrderedListNode - node type for underlying ordered list
82 template <typename OrderedListNode>
83 struct node: public OrderedListNode, public hash_node
86 typedef OrderedListNode base_class;
89 /// Default constructor
96 /// Initializes dummy node with \p nHash value
97 explicit node( size_t nHash )
103 /// Checks if the node is dummy node
104 bool is_dummy() const
106 return hash_node::is_dummy();
113 struct node<void>: public hash_node
122 /// Initializes dummy node with \p nHash value
123 explicit node( size_t nHash )
129 /// Checks if the node is dummy node
130 bool is_dummy() const
132 return hash_node::is_dummy();
137 /// \p SplitListSet internal statistics. May be used for debugging or profiling
139 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
141 template <typename Counter = cds::atomicity::event_counter >
144 typedef Counter counter_type; ///< Counter type
146 counter_type m_nInsertSuccess; ///< Count of success inserting
147 counter_type m_nInsertFailed; ///< Count of failed inserting
148 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
149 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
150 counter_type m_nEraseSuccess; ///< Count of success erasing of items
151 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
152 counter_type m_nExtractSuccess; ///< Count of success extracting of items
153 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
154 counter_type m_nFindSuccess; ///< Count of success finding
155 counter_type m_nFindFailed; ///< Count of failed finding
156 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
157 counter_type m_nHeadNodeFreed; ///< Count of freed head node
158 counter_type m_nBucketCount; ///< Current bucket count
159 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
160 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
161 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
162 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
165 void onInsertSuccess() { ++m_nInsertSuccess; }
166 void onInsertFailed() { ++m_nInsertFailed; }
167 void onUpdateNew() { ++m_nUpdateNew; }
168 void onUpdateExist() { ++m_nUpdateExist; }
169 void onEraseSuccess() { ++m_nEraseSuccess; }
170 void onEraseFailed() { ++m_nEraseFailed; }
171 void onExtractSuccess() { ++m_nExtractSuccess; }
172 void onExtractFailed() { ++m_nExtractFailed; }
173 void onFindSuccess() { ++m_nFindSuccess; }
174 void onFindFailed() { ++m_nFindFailed; }
175 bool onFind(bool bSuccess)
183 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
184 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
185 void onNewBucket() { ++m_nBucketCount; }
186 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
187 void onBucketInitContenton() { ++m_nInitBucketContention; }
188 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
189 void onBucketsExhausted() { ++m_nBucketsExhausted; }
193 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
196 void onInsertSuccess() const {}
197 void onInsertFailed() const {}
198 void onUpdateNew() const {}
199 void onUpdateExist() const {}
200 void onEraseSuccess() const {}
201 void onEraseFailed() const {}
202 void onExtractSuccess() const {}
203 void onExtractFailed() const {}
204 void onFindSuccess() const {}
205 void onFindFailed() const {}
206 bool onFind( bool bSuccess ) const { return bSuccess; }
207 void onHeadNodeAllocated() const {}
208 void onHeadNodeFreed() const {}
209 void onNewBucket() const {}
210 void onRecursiveInitBucket() const {}
211 void onBucketInitContenton() const {}
212 void onBusyWaitBucketInit() const {}
213 void onBucketsExhausted() const {}
217 /// Option to control bit reversal algorithm
219 Bit reversal is a significant part of split-list.
220 \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace.
222 template <typename Type>
223 struct bit_reversal {
225 template <typename Base>
226 struct pack: public Base
228 typedef Type bit_reversal;
233 /// SplitListSet traits
238 Hash function converts the key fields of struct \p T stored in the split list
239 into hash value of type \p size_t that is an index in hash table.
240 By default, \p std::hash is used.
242 typedef opt::none hash;
244 /// Bit reversal algorithm
246 Bit reversal is a significant part of split-list.
247 There are several predefined algorithm in \p cds::algo::bit_reversal namespace,
248 \p cds::algo::bit_reversal::lookup is the best general purpose one.
250 There are more efficient bit reversal algoritm for particular processor architecture,
251 for example, based on x86 SIMD/AVX instruction set, see <a href="http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c">here</a>
253 typedef cds::algo::bit_reversal::lookup bit_reversal;
257 The item counting is an important part of \p SplitListSet algorithm:
258 the <tt>empty()</tt> member function depends on correct item counting.
259 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
261 Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter
263 typedef cds::atomicity::item_counter item_counter;
265 /// Bucket table allocator
267 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
269 typedef CDS_DEFAULT_ALLOCATOR allocator;
271 /// Internal statistics (by default, disabled)
273 Possible statistics types are: \p split_list::stat (enable internal statistics),
274 \p split_list::empty_stat (the default, internal statistics disabled),
275 user-provided class that supports \p %split_list::stat interface.
277 typedef split_list::empty_stat stat;
280 /// C++ memory ordering model
282 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
283 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
285 typedef opt::v::relaxed_ordering memory_model;
287 /// What type of bucket table is used
289 \p true - use \p split_list::expandable_bucket_table that can be expanded
290 if the load factor of the set is exhausted.
291 \p false - use \p split_list::static_bucket_table that cannot be expanded
292 and is allocated in \p SplitListSet constructor.
296 static const bool dynamic_bucket_table = true;
298 /// Back-off strategy
299 typedef cds::backoff::Default back_off;
301 /// Padding; default is cache-line padding
303 padding = cds::opt::cache_line_padding
306 /// Free-list of auxiliary nodes
308 The split-list contains auxiliary nodes marked the start of buckets.
309 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
313 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
314 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
316 typedef FreeListImpl free_list;
319 /// [value-option] Split-list dynamic bucket table option
321 The option is used to select bucket table implementation.
322 Possible values of \p Value are:
323 - \p true - select \p expandable_bucket_table
324 - \p false - select \p static_bucket_table
326 template <bool Value>
327 struct dynamic_bucket_table
330 template <typename Base> struct pack: public Base
332 enum { dynamic_bucket_table = Value };
337 /// Metafunction converting option list to \p split_list::traits
339 Available \p Options:
340 - \p opt::hash - mandatory option, specifies hash functor.
341 - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation
342 default is \p cds::algo::bit_reversal::lookup
343 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
345 - \p opt::memory_model - C++ memory model for atomic operations.
346 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
347 or \p opt::v::sequential_consistent (sequentially consistent memory model).
348 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
349 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
350 Dynamic bucket table expands its size up to maximum bucket count when necessary
351 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
352 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
353 To enable internal statistics use \p split_list::stat.
354 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
355 - \p opt::free_list - a free-list implementation, see \p traits::free_list
357 template <typename... Options>
359 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
362 /// Static bucket table
364 Non-resizeable bucket table for \p SplitListSet class.
365 The capacity of table (max bucket count) is defined in the constructor call.
368 - \p GC - garbage collector
369 - \p Node - node type, must be a type based on \p split_list::node
370 - \p Options... - options
373 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
374 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
375 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
376 otherwise \p FreeList.
378 template <typename GC, typename Node, typename... Options>
379 class static_bucket_table
382 struct default_options
384 typedef CDS_DEFAULT_ALLOCATOR allocator;
385 typedef opt::v::relaxed_ordering memory_model;
386 typedef FreeListImpl free_list;
388 typedef typename opt::make_options< default_options, Options... >::type options;
392 typedef GC gc; ///< Garbage collector
393 typedef Node node_type; ///< Bucket node type
394 typedef typename options::allocator allocator; ///< allocator
395 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
396 typedef typename options::free_list free_list; ///< Free-list
398 /// Auxiliary node type
399 struct aux_node_type: public node_type, public free_list::node
402 atomics::atomic<bool> m_busy;
406 m_busy.store( false, atomics::memory_order_release );
411 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
412 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
416 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
417 const size_t m_nCapacity; ///< Bucket table capacity
418 table_entry * m_Table; ///< Bucket table
420 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
422 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
423 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
424 free_list m_freeList; ///< Free list
429 void allocate_table()
431 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
432 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
437 m_freeList.clear( []( typename free_list::node* ) {} );
438 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
439 bucket_table_allocator().Delete( m_Table, m_nCapacity );
444 /// Constructs bucket table for 512K buckets. Load factor is 1.
445 static_bucket_table()
447 , m_nCapacity( 512 * 1024 )
448 , m_nAuxNodeAllocated( 0 )
453 /// Creates the table with specified size rounded up to nearest power-of-two
455 size_t nItemCount, ///< Max expected item count in split-ordered list
456 size_t nLoadFactor ///< Load factor
458 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
459 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
460 , m_nAuxNodeAllocated( 0 )
462 // m_nCapacity must be power of 2
463 assert( cds::beans::is_power2( m_nCapacity ));
467 /// Destroys bucket table
468 ~static_bucket_table()
473 /// Returns head node of bucket \p nBucket
474 aux_node_type * bucket( size_t nBucket ) const
476 assert( nBucket < capacity());
477 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
480 /// Set \p pNode as a head of bucket \p nBucket
481 void bucket( size_t nBucket, aux_node_type * pNode )
483 assert( nBucket < capacity());
484 assert( bucket( nBucket ) == nullptr );
486 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
489 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
490 aux_node_type* alloc_aux_node()
492 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
493 // alloc next free node from m_auxNode
494 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
495 if ( idx < capacity() ) {
496 CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
497 return new( &m_auxNode[idx] ) aux_node_type();
501 // get from free-list
502 auto pFree = m_freeList.get();
504 return static_cast<aux_node_type*>( pFree );
510 /// Places node type to free-list
511 void free_aux_node( aux_node_type* p )
513 m_freeList.put( static_cast<aux_node_type*>( p ));
516 /// Returns the capacity of the bucket table
517 size_t capacity() const
522 /// Returns the load factor, i.e. average count of items per bucket
523 size_t load_factor() const
525 return m_nLoadFactor;
529 /// Expandable bucket table
531 This bucket table can dynamically grow its capacity when necessary
532 up to maximum bucket count.
535 - \p GC - garbage collector
536 - \p Node - node type, must be derived from \p split_list::node
537 - \p Options... - options
540 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
541 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
542 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
543 otherwise \p FreeList.
545 template <typename GC, typename Node, typename... Options>
546 class expandable_bucket_table
549 struct default_options
551 typedef CDS_DEFAULT_ALLOCATOR allocator;
552 typedef opt::v::relaxed_ordering memory_model;
553 typedef FreeListImpl free_list;
555 typedef typename opt::make_options< default_options, Options... >::type options;
559 typedef GC gc; ///< Garbage collector
560 typedef Node node_type; ///< Bucket node type
561 typedef typename options::allocator allocator; ///< allocator
563 /// Memory model for atomic operations
564 typedef typename options::memory_model memory_model;
567 typedef typename options::free_list free_list;
569 /// Auxiliary node type
570 struct aux_node_type: public node_type, public free_list::node
573 atomics::atomic<bool> m_busy;
577 m_busy.store( false, atomics::memory_order_release );
584 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
585 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
587 struct aux_node_segment {
588 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
589 aux_node_segment* next_segment;
590 // aux_node_type nodes[];
593 : next_segment( nullptr )
595 aux_node_count.store( 0, atomics::memory_order_release );
598 aux_node_type* segment()
600 return reinterpret_cast<aux_node_type*>( this + 1 );
604 /// Bucket table metrics
606 size_t nSegmentCount; ///< max count of segments in bucket table
607 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
608 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
609 size_t nLoadFactor; ///< load factor
610 size_t nCapacity; ///< max capacity of bucket table
613 : nSegmentCount( 1024 )
614 , nSegmentSize( 512 )
615 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
617 , nCapacity( nSegmentCount * nSegmentSize )
621 /// Bucket table allocator
622 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
624 /// Bucket table segment allocator
625 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
627 // Aux node segment allocator
628 typedef typename allocator::template rebind<char>::other raw_allocator;
633 /// Constructs bucket table for 512K buckets. Load factor is 1.
634 expandable_bucket_table()
635 : m_metrics( calc_metrics( 512 * 1024, 1 ))
640 /// Creates the table with specified capacity rounded up to nearest power-of-two
641 expandable_bucket_table(
642 size_t nItemCount, ///< Max expected item count in split-ordered list
643 size_t nLoadFactor ///< Load factor
645 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
650 /// Destroys bucket table
651 ~expandable_bucket_table()
653 m_freeList.clear( []( typename free_list::node* ) {} );
655 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
656 auto next_segment = aux_segment->next_segment;
657 free_aux_segment( aux_segment );
658 aux_segment = next_segment;
661 segment_type * pSegments = m_Segments;
662 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
663 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
664 if ( pEntry != nullptr )
665 destroy_segment( pEntry );
668 destroy_table( pSegments );
671 /// Returns head node of the bucket \p nBucket
672 aux_node_type * bucket( size_t nBucket ) const
674 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
675 assert( nSegment < m_metrics.nSegmentCount );
677 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
678 if ( pSegment == nullptr )
679 return nullptr; // uninitialized bucket
680 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
683 /// Set \p pNode as a head of bucket \p nBucket
684 void bucket( size_t nBucket, aux_node_type * pNode )
686 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
687 assert( nSegment < m_metrics.nSegmentCount );
689 segment_type& segment = m_Segments[nSegment];
690 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
691 table_entry* pNewSegment = allocate_segment();
692 table_entry * pNull = nullptr;
693 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
694 destroy_segment( pNewSegment );
697 assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
698 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
701 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
702 aux_node_type* alloc_aux_node()
704 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire );
707 assert( aux_segment != nullptr );
709 // try to allocate from current aux segment
710 if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
711 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
712 if ( idx < m_metrics.nSegmentSize ) {
713 CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
714 return new( aux_segment->segment() + idx ) aux_node_type();
718 // try allocate from free-list
719 auto pFree = m_freeList.get();
721 return static_cast<aux_node_type*>( pFree );
723 // free-list is empty, current segment is full
724 // try to allocate new aux segment
725 // We can allocate more aux segments than we need but it is not a problem in this context
726 aux_node_segment* new_aux_segment = allocate_aux_segment();
727 new_aux_segment->next_segment = aux_segment;
728 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
730 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
731 CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
732 return new( new_aux_segment->segment() ) aux_node_type();
735 free_aux_segment( new_aux_segment );
739 /// Places auxiliary node type to free-list
740 void free_aux_node( aux_node_type* p )
742 m_freeList.put( static_cast<aux_node_type*>( p ));
745 /// Returns the capacity of the bucket table
746 size_t capacity() const
748 return m_metrics.nCapacity;
751 /// Returns the load factor, i.e. average count of items per bucket
752 size_t load_factor() const
754 return m_metrics.nLoadFactor;
759 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
763 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
764 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
766 size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor;
767 if ( nBucketCount <= 2 ) {
771 else if ( nBucketCount <= 1024 ) {
773 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
776 nBucketCount = beans::log2ceil( nBucketCount );
778 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
779 if ( nBucketCount & 1 )
781 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
784 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
785 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
786 assert( m.nSegmentSizeLog2 != 0 ); //
790 segment_type * allocate_table()
792 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
795 void destroy_table( segment_type * pTable )
797 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
800 table_entry* allocate_segment()
802 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
805 void destroy_segment( table_entry* pSegment )
807 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
810 aux_node_segment* allocate_aux_segment()
812 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
813 CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
814 return new(p) aux_node_segment();
817 void free_aux_segment( aux_node_segment* p )
819 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
824 // m_nSegmentSize must be 2**N
825 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
826 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
828 // m_nSegmentCount must be 2**K
829 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
831 m_Segments = allocate_table();
832 m_auxNodeList = allocate_aux_segment();
838 metrics const m_metrics; ///< Dynamic bucket table metrics
839 segment_type* m_Segments; ///< bucket table - array of segments
840 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
841 free_list m_freeList; ///< List of free aux nodes
848 template <bool Value, typename GC, typename Node, typename... Options>
849 struct bucket_table_selector;
851 template <typename GC, typename Node, typename... Options>
852 struct bucket_table_selector< true, GC, Node, Options...>
854 typedef expandable_bucket_table<GC, Node, Options...> type;
857 template <typename GC, typename Node, typename... Options>
858 struct bucket_table_selector< false, GC, Node, Options...>
860 typedef static_bucket_table<GC, Node, Options...> type;
863 template <typename Q>
864 struct search_value_type
869 search_value_type( Q& v, size_t h )
875 template <class OrderedList, class Traits, bool Iterable >
876 class ordered_list_adapter;
878 template <class OrderedList, class Traits>
879 class ordered_list_adapter< OrderedList, Traits, false >
881 typedef OrderedList native_ordered_list;
882 typedef Traits traits;
884 typedef typename native_ordered_list::gc gc;
885 typedef typename native_ordered_list::key_comparator native_key_comparator;
886 typedef typename native_ordered_list::node_type node_type;
887 typedef typename native_ordered_list::value_type value_type;
888 typedef typename native_ordered_list::node_traits native_node_traits;
889 typedef typename native_ordered_list::disposer native_disposer;
891 typedef split_list::node<node_type> splitlist_node_type;
894 int operator()( value_type const& v1, value_type const& v2 ) const
896 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
897 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
898 if ( n1->m_nHash != n2->m_nHash )
899 return n1->m_nHash < n2->m_nHash ? -1 : 1;
901 if ( n1->is_dummy()) {
902 assert( n2->is_dummy());
906 assert( !n1->is_dummy() && !n2->is_dummy());
908 return native_key_comparator()(v1, v2);
911 template <typename Q>
912 int operator()( value_type const& v, search_value_type<Q> const& q ) const
914 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
915 if ( n->m_nHash != q.nHash )
916 return n->m_nHash < q.nHash ? -1 : 1;
918 assert( !n->is_dummy());
919 return native_key_comparator()(v, q.val);
922 template <typename Q>
923 int operator()( search_value_type<Q> const& q, value_type const& v ) const
925 return -operator()( v, q );
929 struct wrapped_disposer
931 void operator()( value_type * v )
933 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
935 native_disposer()(v);
940 typedef node_type ordered_list_node_type;
941 typedef splitlist_node_type aux_node;
943 struct node_traits: private native_node_traits
945 typedef native_node_traits base_class; ///< Base ordered list node type
946 typedef typename base_class::value_type value_type; ///< Value type
947 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
948 typedef node<base_node_type> node_type; ///< Split-list node type
950 /// Convert value reference to node pointer
951 static node_type * to_node_ptr( value_type& v )
953 return static_cast<node_type *>(base_class::to_node_ptr( v ));
956 /// Convert value pointer to node pointer
957 static node_type * to_node_ptr( value_type * v )
959 return static_cast<node_type *>(base_class::to_node_ptr( v ));
962 /// Convert value reference to node pointer (const version)
963 static node_type const * to_node_ptr( value_type const& v )
965 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
968 /// Convert value pointer to node pointer (const version)
969 static node_type const * to_node_ptr( value_type const * v )
971 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
974 /// Convert node reference to value pointer
975 static value_type * to_value_ptr( node_type& n )
977 return base_class::to_value_ptr( static_cast<base_node_type &>(n));
980 /// Convert node pointer to value pointer
981 static value_type * to_value_ptr( node_type * n )
983 return base_class::to_value_ptr( static_cast<base_node_type *>(n));
986 /// Convert node reference to value pointer (const version)
987 static const value_type * to_value_ptr( node_type const & n )
989 return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
992 /// Convert node pointer to value pointer (const version)
993 static const value_type * to_value_ptr( node_type const * n )
995 return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
999 template <typename Less>
1000 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1002 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1004 template <typename Q>
1005 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1007 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
1008 if ( n->m_nHash != q.nHash )
1009 return n->m_nHash < q.nHash ? -1 : 1;
1011 assert( !n->is_dummy());
1012 return base_class()(v, q.val);
1015 template <typename Q>
1016 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1018 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
1019 if ( n->m_nHash != q.nHash )
1020 return q.nHash < n->m_nHash ? -1 : 1;
1022 assert( !n->is_dummy());
1023 return base_class()(q.val, v);
1026 int operator()( value_type const& lhs, value_type const& rhs ) const
1028 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
1029 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
1030 if ( n1->m_nHash != n2->m_nHash )
1031 return n1->m_nHash < n2->m_nHash ? -1 : 1;
1033 if ( n1->is_dummy()) {
1034 assert( n2->is_dummy());
1038 assert( !n1->is_dummy() && !n2->is_dummy());
1040 return native_key_comparator()( lhs, rhs );
1044 typedef typename native_ordered_list::template rebind_traits<
1045 opt::compare< key_compare >
1046 , opt::disposer< wrapped_disposer >
1047 , opt::boundary_node_type< splitlist_node_type >
1051 template <class OrderedList, class Traits>
1052 class ordered_list_adapter< OrderedList, Traits, true >
1054 typedef OrderedList native_ordered_list;
1055 typedef Traits traits;
1057 typedef typename native_ordered_list::gc gc;
1058 typedef typename native_ordered_list::key_comparator native_key_comparator;
1059 typedef typename native_ordered_list::value_type value_type;
1060 typedef typename native_ordered_list::disposer native_disposer;
1062 struct key_compare {
1063 int operator()( value_type const& v1, value_type const& v2 ) const
1065 hash_node const& n1 = static_cast<hash_node const&>( v1 );
1066 hash_node const& n2 = static_cast<hash_node const&>( v2 );
1067 if ( n1.m_nHash != n2.m_nHash )
1068 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1070 if ( n1.is_dummy()) {
1071 assert( n2.is_dummy());
1075 assert( !n1.is_dummy() && !n2.is_dummy());
1077 return native_key_comparator()(v1, v2);
1080 template <typename Q>
1081 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1083 hash_node const& n = static_cast<hash_node const&>( v );
1084 if ( n.m_nHash != q.nHash )
1085 return n.m_nHash < q.nHash ? -1 : 1;
1087 assert( !n.is_dummy());
1088 return native_key_comparator()(v, q.val);
1091 template <typename Q>
1092 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1094 return -operator()( v, q );
1098 struct wrapped_disposer
1100 void operator()( value_type * v )
1102 if ( !static_cast<hash_node*>( v )->is_dummy())
1103 native_disposer()( v );
1108 typedef void ordered_list_node_type;
1110 struct aux_node: public native_ordered_list::node_type, public hash_node
1114 typedef typename native_ordered_list::node_type list_node_type;
1116 list_node_type::data.store( typename list_node_type::marked_data_ptr(
1117 static_cast<value_type*>( static_cast<hash_node *>( this ))),
1118 atomics::memory_order_release
1125 static hash_node * to_node_ptr( value_type& v )
1127 return static_cast<hash_node *>( &v );
1130 static hash_node * to_node_ptr( value_type * v )
1132 return static_cast<hash_node *>( v );
1135 static hash_node const * to_node_ptr( value_type const& v )
1137 return static_cast<hash_node const*>( &v );
1140 static hash_node const * to_node_ptr( value_type const * v )
1142 return static_cast<hash_node const *>( v );
1146 template <typename Less>
1147 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1149 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1151 template <typename Q>
1152 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1154 hash_node const& n = static_cast<hash_node const&>( v );
1155 if ( n.m_nHash != q.nHash )
1156 return n.m_nHash < q.nHash ? -1 : 1;
1158 assert( !n.is_dummy());
1159 return base_class()(v, q.val);
1162 template <typename Q>
1163 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1165 hash_node const& n = static_cast<hash_node const&>( v );
1166 if ( n.m_nHash != q.nHash )
1167 return q.nHash < n.m_nHash ? -1 : 1;
1169 assert( !n.is_dummy());
1170 return base_class()(q.val, v);
1173 int operator()( value_type const& lhs, value_type const& rhs ) const
1175 hash_node const& n1 = static_cast<hash_node const&>( lhs );
1176 hash_node const& n2 = static_cast<hash_node const&>( rhs );
1177 if ( n1.m_nHash != n2.m_nHash )
1178 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1180 if ( n1.is_dummy()) {
1181 assert( n2.is_dummy());
1185 assert( !n1.is_dummy() && !n2.is_dummy());
1187 return base_class()( lhs, rhs );
1191 typedef typename native_ordered_list::template rebind_traits<
1192 opt::compare< key_compare >
1193 , opt::disposer< wrapped_disposer >
1194 , opt::boundary_node_type< aux_node >
1198 template <class OrderedList, class Traits>
1199 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1201 template <typename OrderedList, bool IsConst>
1202 struct select_list_iterator;
1204 template <typename OrderedList>
1205 struct select_list_iterator<OrderedList, false>
1207 typedef typename OrderedList::iterator type;
1210 template <typename OrderedList>
1211 struct select_list_iterator<OrderedList, true>
1213 typedef typename OrderedList::const_iterator type;
1216 template <typename NodeTraits, typename OrderedList, bool IsConst>
1219 typedef OrderedList ordered_list_type;
1220 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1223 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1224 typedef NodeTraits node_traits;
1227 list_iterator m_itCur;
1228 list_iterator m_itEnd;
1231 typedef typename list_iterator::value_ptr value_ptr;
1232 typedef typename list_iterator::value_ref value_ref;
1238 iterator_type( iterator_type const& src )
1239 : m_itCur( src.m_itCur )
1240 , m_itEnd( src.m_itEnd )
1243 // This ctor should be protected...
1244 iterator_type( list_iterator itCur, list_iterator itEnd )
1249 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
1253 value_ptr operator ->() const
1255 return m_itCur.operator->();
1258 value_ref operator *() const
1260 return m_itCur.operator*();
1264 iterator_type& operator ++()
1266 if ( m_itCur != m_itEnd ) {
1269 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
1274 iterator_type& operator = (iterator_type const& src)
1276 m_itCur = src.m_itCur;
1277 m_itEnd = src.m_itEnd;
1282 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1284 return m_itCur == i.m_itCur;
1287 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1289 return m_itCur != i.m_itCur;
1293 list_iterator const& underlying_iterator() const
1298 } // namespace details
1303 template <typename BitReversalAlgo>
1304 static inline size_t regular_hash( size_t nHash )
1306 return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1);
1309 template <typename BitReversalAlgo>
1310 static inline size_t dummy_hash( size_t nHash )
1312 return static_cast<size_t>( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1);
1316 } // namespace split_list
1319 // Forward declaration
1320 template <class GC, class OrderedList, class Traits = split_list::traits>
1324 }} // namespace cds::intrusive
1326 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H