2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
32 #define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H
34 #include <cds/intrusive/details/base.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/details/allocator.h>
37 #include <cds/algo/int_algo.h>
38 #include <cds/algo/bitop.h>
39 #include <cds/opt/hash.h>
40 #include <cds/intrusive/free_list_selector.h>
42 namespace cds { namespace intrusive {
44 /// Split-ordered list related definitions
45 /** @ingroup cds_intrusive_helper
47 namespace split_list {
51 size_t m_nHash; ///< Hash value for node
53 /// Default constructor
60 /// Initializes dummy node with \p nHash value
61 hash_node( size_t nHash )
67 /// Checks if the node is dummy node
70 return (m_nHash & 1) == 0;
75 /// Split-ordered list node
78 - \p OrderedListNode - node type for underlying ordered list
80 template <typename OrderedListNode>
81 struct node: public OrderedListNode, public hash_node
84 typedef OrderedListNode base_class;
87 /// Default constructor
94 /// Initializes dummy node with \p nHash value
101 /// Checks if the node is dummy node
102 bool is_dummy() const
104 return hash_node::is_dummy();
111 struct node<void>: public hash_node
120 /// Initializes dummy node with \p nHash value
127 /// Checks if the node is dummy node
128 bool is_dummy() const
130 return hash_node::is_dummy();
135 /// \p SplitListSet internal statistics. May be used for debugging or profiling
137 Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter.
139 template <typename Counter = cds::atomicity::event_counter >
142 typedef Counter counter_type; ///< Counter type
144 counter_type m_nInsertSuccess; ///< Count of success inserting
145 counter_type m_nInsertFailed; ///< Count of failed inserting
146 counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function
147 counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item
148 counter_type m_nEraseSuccess; ///< Count of success erasing of items
149 counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item
150 counter_type m_nExtractSuccess; ///< Count of success extracting of items
151 counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item
152 counter_type m_nFindSuccess; ///< Count of success finding
153 counter_type m_nFindFailed; ///< Count of failed finding
154 counter_type m_nHeadNodeAllocated; ///< Count of allocated head node
155 counter_type m_nHeadNodeFreed; ///< Count of freed head node
156 counter_type m_nBucketCount; ///< Current bucket count
157 counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization
158 counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered
159 counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized
160 counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation
163 void onInsertSuccess() { ++m_nInsertSuccess; }
164 void onInsertFailed() { ++m_nInsertFailed; }
165 void onUpdateNew() { ++m_nUpdateNew; }
166 void onUpdateExist() { ++m_nUpdateExist; }
167 void onEraseSuccess() { ++m_nEraseSuccess; }
168 void onEraseFailed() { ++m_nEraseFailed; }
169 void onExtractSuccess() { ++m_nExtractSuccess; }
170 void onExtractFailed() { ++m_nExtractFailed; }
171 void onFindSuccess() { ++m_nFindSuccess; }
172 void onFindFailed() { ++m_nFindFailed; }
173 bool onFind(bool bSuccess)
181 void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; }
182 void onHeadNodeFreed() { ++m_nHeadNodeFreed; }
183 void onNewBucket() { ++m_nBucketCount; }
184 void onRecursiveInitBucket() { ++m_nInitBucketRecursive; }
185 void onBucketInitContenton() { ++m_nInitBucketContention; }
186 void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; }
187 void onBucketsExhausted() { ++m_nBucketsExhausted; }
191 /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat
194 void onInsertSuccess() const {}
195 void onInsertFailed() const {}
196 void onUpdateNew() const {}
197 void onUpdateExist() const {}
198 void onEraseSuccess() const {}
199 void onEraseFailed() const {}
200 void onExtractSuccess() const {}
201 void onExtractFailed() const {}
202 void onFindSuccess() const {}
203 void onFindFailed() const {}
204 bool onFind( bool bSuccess ) const { return bSuccess; }
205 void onHeadNodeAllocated() const {}
206 void onHeadNodeFreed() const {}
207 void onNewBucket() const {}
208 void onRecursiveInitBucket() const {}
209 void onBucketInitContenton() const {}
210 void onBusyWaitBucketInit() const {}
211 void onBucketsExhausted() const {}
215 /// SplitListSet traits
220 Hash function converts the key fields of struct \p T stored in the split list
221 into hash value of type \p size_t that is an index in hash table.
222 By default, \p std::hash is used.
224 typedef opt::none hash;
228 The item counting is an important part of \p SplitListSet algorithm:
229 the <tt>empty()</tt> member function depends on correct item counting.
230 Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option.
232 Default is \p cds::atomicity::item_counter.
234 typedef cds::atomicity::item_counter item_counter;
236 /// Bucket table allocator
238 Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
240 typedef CDS_DEFAULT_ALLOCATOR allocator;
242 /// Internal statistics (by default, disabled)
244 Possible statistics types are: \p split_list::stat (enable internal statistics),
245 \p split_list::empty_stat (the default, internal statistics disabled),
246 user-provided class that supports \p %split_list::stat interface.
248 typedef split_list::empty_stat stat;
251 /// C++ memory ordering model
253 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
254 or \p opt::v::sequential_consistent (sequentially consisnent memory model).
256 typedef opt::v::relaxed_ordering memory_model;
258 /// What type of bucket table is used
260 \p true - use \p split_list::expandable_bucket_table that can be expanded
261 if the load factor of the set is exhausted.
262 \p false - use \p split_list::static_bucket_table that cannot be expanded
263 and is allocated in \p SplitListSet constructor.
267 static const bool dynamic_bucket_table = true;
269 /// Back-off strategy
270 typedef cds::backoff::Default back_off;
272 /// Padding; default is cache-line padding
274 padding = cds::opt::cache_line_padding
277 /// Free-list of auxiliary nodes
279 The split-list contains auxiliary nodes marked the start of buckets.
280 To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list
284 - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive
285 - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive
287 typedef FreeListImpl free_list;
290 /// [value-option] Split-list dynamic bucket table option
292 The option is used to select bucket table implementation.
293 Possible values of \p Value are:
294 - \p true - select \p expandable_bucket_table
295 - \p false - select \p static_bucket_table
297 template <bool Value>
298 struct dynamic_bucket_table
301 template <typename Base> struct pack: public Base
303 enum { dynamic_bucket_table = Value };
308 /// Metafunction converting option list to \p split_list::traits
310 Available \p Options:
311 - \p opt::hash - mandatory option, specifies hash functor.
312 - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter
314 - \p opt::memory_model - C++ memory model for atomic operations.
315 Can be \p opt::v::relaxed_ordering (relaxed memory model, the default)
316 or \p opt::v::sequential_consistent (sequentially consistent memory model).
317 - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR.
318 - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation.
319 Dynamic bucket table expands its size up to maximum bucket count when necessary
320 - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default.
321 - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled).
322 To enable internal statistics use \p split_list::stat.
323 - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding
324 - \p opt::free_list - a free-list implementation, see \p traits::free_list
326 template <typename... Options>
328 typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction
331 /// Static bucket table
333 Non-resizeable bucket table for \p SplitListSet class.
334 The capacity of table (max bucket count) is defined in the constructor call.
337 - \p GC - garbage collector
338 - \p Node - node type, must be a type based on \p split_list::node
339 - \p Options... - options
342 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
343 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
344 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
345 otherwise \p FreeList.
347 template <typename GC, typename Node, typename... Options>
348 class static_bucket_table
351 struct default_options
353 typedef CDS_DEFAULT_ALLOCATOR allocator;
354 typedef opt::v::relaxed_ordering memory_model;
355 typedef FreeListImpl free_list;
357 typedef typename opt::make_options< default_options, Options... >::type options;
361 typedef GC gc; ///< Garbage collector
362 typedef Node node_type; ///< Bucket node type
363 typedef typename options::allocator allocator; ///< allocator
364 typedef typename options::memory_model memory_model; ///< Memory model for atomic operations
365 typedef typename options::free_list free_list; ///< Free-list
367 /// Auxiliary node type
368 struct aux_node_type: public node_type, public free_list::node
371 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
372 typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
376 const size_t m_nLoadFactor; ///< load factor (average count of items per bucket)
377 const size_t m_nCapacity; ///< Bucket table capacity
378 table_entry * m_Table; ///< Bucket table
380 typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator;
382 aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes
383 atomics::atomic<size_t> m_nAuxNodeAllocated; ///< how many auxiliary node allocated
384 free_list m_freeList; ///< Free list
389 void allocate_table()
391 m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr );
392 m_auxNode = aux_node_allocator().allocate( m_nCapacity );
397 m_freeList.clear( []( typename free_list::node* ) {} );
398 aux_node_allocator().deallocate( m_auxNode, m_nCapacity );
399 bucket_table_allocator().Delete( m_Table, m_nCapacity );
404 /// Constructs bucket table for 512K buckets. Load factor is 1.
405 static_bucket_table()
407 , m_nCapacity( 512 * 1024 )
408 , m_nAuxNodeAllocated( 0 )
413 /// Creates the table with specified size rounded up to nearest power-of-two
415 size_t nItemCount, ///< Max expected item count in split-ordered list
416 size_t nLoadFactor ///< Load factor
418 : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 )
419 , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor ))
420 , m_nAuxNodeAllocated( 0 )
422 // m_nCapacity must be power of 2
423 assert( cds::beans::is_power2( m_nCapacity ));
427 /// Destroys bucket table
428 ~static_bucket_table()
433 /// Returns head node of bucket \p nBucket
434 aux_node_type * bucket( size_t nBucket ) const
436 assert( nBucket < capacity());
437 return m_Table[ nBucket ].load(memory_model::memory_order_acquire);
440 /// Set \p pNode as a head of bucket \p nBucket
441 void bucket( size_t nBucket, aux_node_type * pNode )
443 assert( nBucket < capacity());
444 assert( bucket( nBucket ) == nullptr );
446 m_Table[ nBucket ].store( pNode, memory_model::memory_order_release );
449 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
450 aux_node_type* alloc_aux_node()
452 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
453 // alloc next free node from m_auxNode
454 size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
455 if ( idx < capacity())
456 return new( &m_auxNode[idx] ) aux_node_type();
459 // get from free-list
460 auto pFree = m_freeList.get();
462 return static_cast<aux_node_type*>( pFree );
468 /// Places node type to free-list
469 void free_aux_node( aux_node_type* p )
471 m_freeList.put( static_cast<aux_node_type*>( p ));
474 /// Returns the capacity of the bucket table
475 size_t capacity() const
480 /// Returns the load factor, i.e. average count of items per bucket
481 size_t load_factor() const
483 return m_nLoadFactor;
487 /// Expandable bucket table
489 This bucket table can dynamically grow its capacity when necessary
490 up to maximum bucket count.
493 - \p GC - garbage collector
494 - \p Node - node type, must be derived from \p split_list::node
495 - \p Options... - options
498 - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR
499 - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering
500 - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS
501 otherwise \p FreeList.
503 template <typename GC, typename Node, typename... Options>
504 class expandable_bucket_table
507 struct default_options
509 typedef CDS_DEFAULT_ALLOCATOR allocator;
510 typedef opt::v::relaxed_ordering memory_model;
511 typedef FreeListImpl free_list;
513 typedef typename opt::make_options< default_options, Options... >::type options;
517 typedef GC gc; ///< Garbage collector
518 typedef Node node_type; ///< Bucket node type
519 typedef typename options::allocator allocator; ///< allocator
521 /// Memory model for atomic operations
522 typedef typename options::memory_model memory_model;
525 typedef typename options::free_list free_list;
527 /// Auxiliary node type
528 class aux_node_type: public node_type, public free_list::node
533 typedef atomics::atomic<aux_node_type *> table_entry; ///< Table entry type
534 typedef atomics::atomic<table_entry *> segment_type; ///< Bucket table segment type
536 struct aux_node_segment {
537 atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment
538 aux_node_segment* next_segment;
539 // aux_node_type nodes[];
543 , next_segment( nullptr )
546 aux_node_type* segment()
548 return reinterpret_cast<aux_node_type*>( this + 1 );
552 /// Bucket table metrics
554 size_t nSegmentCount; ///< max count of segments in bucket table
555 size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two.
556 size_t nSegmentSizeLog2; ///< <tt> log2( m_nSegmentSize )</tt>
557 size_t nLoadFactor; ///< load factor
558 size_t nCapacity; ///< max capacity of bucket table
561 : nSegmentCount( 1024 )
562 , nSegmentSize( 512 )
563 , nSegmentSizeLog2( cds::beans::log2( nSegmentSize ))
565 , nCapacity( nSegmentCount * nSegmentSize )
569 /// Bucket table allocator
570 typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator;
572 /// Bucket table segment allocator
573 typedef cds::details::Allocator< table_entry, allocator > segment_allocator;
575 // Aux node segment allocator
576 typedef typename allocator::template rebind<char>::other raw_allocator;
581 /// Constructs bucket table for 512K buckets. Load factor is 1.
582 expandable_bucket_table()
583 : m_metrics( calc_metrics( 512 * 1024, 1 ))
588 /// Creates the table with specified capacity rounded up to nearest power-of-two
589 expandable_bucket_table(
590 size_t nItemCount, ///< Max expected item count in split-ordered list
591 size_t nLoadFactor ///< Load factor
593 : m_metrics( calc_metrics( nItemCount, nLoadFactor ))
598 /// Destroys bucket table
599 ~expandable_bucket_table()
601 m_freeList.clear( []( typename free_list::node* ) {} );
603 for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) {
604 auto next_segment = aux_segment->next_segment;
605 free_aux_segment( aux_segment );
606 aux_segment = next_segment;
609 segment_type * pSegments = m_Segments;
610 for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) {
611 table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed);
612 if ( pEntry != nullptr )
613 destroy_segment( pEntry );
616 destroy_table( pSegments );
619 /// Returns head node of the bucket \p nBucket
620 aux_node_type * bucket( size_t nBucket ) const
622 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
623 assert( nSegment < m_metrics.nSegmentCount );
625 table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire);
626 if ( pSegment == nullptr )
627 return nullptr; // uninitialized bucket
628 return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire);
631 /// Set \p pNode as a head of bucket \p nBucket
632 void bucket( size_t nBucket, aux_node_type * pNode )
634 size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2;
635 assert( nSegment < m_metrics.nSegmentCount );
637 segment_type& segment = m_Segments[nSegment];
638 if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
639 table_entry* pNewSegment = allocate_segment();
640 table_entry * pNull = nullptr;
641 if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed ))
642 destroy_segment( pNewSegment );
645 assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr );
646 segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release );
649 /// Allocates auxiliary node; can return \p nullptr if the table exhausted
650 aux_node_type* alloc_aux_node()
652 aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire );
655 assert( aux_segment != nullptr );
657 // try to allocate from current aux segment
658 if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
659 size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
660 if ( idx < m_metrics.nSegmentSize )
661 return new( aux_segment->segment() + idx ) aux_node_type();
664 // try allocate from free-list
665 auto pFree = m_freeList.get();
667 return static_cast<aux_node_type*>( pFree );
669 // free-list is empty, current segment is full
670 // try to allocate new aux segment
671 // We can allocate more aux segments than we need but it is not a problem in this context
672 aux_node_segment* new_aux_segment = allocate_aux_segment();
673 new_aux_segment->next_segment = aux_segment;
674 new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
675 CDS_COMPILER_RW_BARRIER;
677 if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
678 return new( new_aux_segment->segment()) aux_node_type();
680 free_aux_segment( new_aux_segment );
684 /// Places auxiliary node type to free-list
685 void free_aux_node( aux_node_type* p )
687 m_freeList.put( static_cast<aux_node_type*>( p ));
690 /// Returns the capacity of the bucket table
691 size_t capacity() const
693 return m_metrics.nCapacity;
696 /// Returns the load factor, i.e. average count of items per bucket
697 size_t load_factor() const
699 return m_metrics.nLoadFactor;
704 metrics calc_metrics( size_t nItemCount, size_t nLoadFactor )
708 // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount
709 m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1;
711 size_t nBucketCount = (size_t)(((float)nItemCount) / m.nLoadFactor);
712 if ( nBucketCount <= 2 ) {
716 else if ( nBucketCount <= 1024 ) {
718 m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount );
721 nBucketCount = beans::log2ceil( nBucketCount );
723 m.nSegmentSize = ((size_t)1) << (nBucketCount / 2);
724 if ( nBucketCount & 1 )
726 if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount )
729 m.nCapacity = m.nSegmentCount * m.nSegmentSize;
730 m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize );
731 assert( m.nSegmentSizeLog2 != 0 ); //
735 segment_type * allocate_table()
737 return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr );
740 void destroy_table( segment_type * pTable )
742 bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount );
745 table_entry* allocate_segment()
747 return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr );
750 void destroy_segment( table_entry* pSegment )
752 segment_allocator().Delete( pSegment, m_metrics.nSegmentSize );
755 aux_node_segment* allocate_aux_segment()
757 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
758 return new(p) aux_node_segment();
761 void free_aux_segment( aux_node_segment* p )
763 raw_allocator().deallocate( reinterpret_cast<char*>( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
768 // m_nSegmentSize must be 2**N
769 assert( cds::beans::is_power2( m_metrics.nSegmentSize ));
770 assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize );
772 // m_nSegmentCount must be 2**K
773 assert( cds::beans::is_power2( m_metrics.nSegmentCount ));
775 m_Segments = allocate_table();
776 m_auxNodeList = allocate_aux_segment();
782 metrics const m_metrics; ///< Dynamic bucket table metrics
783 segment_type* m_Segments; ///< bucket table - array of segments
784 atomics::atomic<aux_node_segment*> m_auxNodeList; ///< segment list of aux nodes
785 free_list m_freeList; ///< List of free aux nodes
792 template <bool Value, typename GC, typename Node, typename... Options>
793 struct bucket_table_selector;
795 template <typename GC, typename Node, typename... Options>
796 struct bucket_table_selector< true, GC, Node, Options...>
798 typedef expandable_bucket_table<GC, Node, Options...> type;
801 template <typename GC, typename Node, typename... Options>
802 struct bucket_table_selector< false, GC, Node, Options...>
804 typedef static_bucket_table<GC, Node, Options...> type;
807 template <typename Q>
808 struct search_value_type
813 search_value_type( Q& v, size_t h )
819 template <class OrderedList, class Traits, bool Iterable >
820 class ordered_list_adapter;
822 template <class OrderedList, class Traits>
823 class ordered_list_adapter< OrderedList, Traits, false >
825 typedef OrderedList native_ordered_list;
826 typedef Traits traits;
828 typedef typename native_ordered_list::gc gc;
829 typedef typename native_ordered_list::key_comparator native_key_comparator;
830 typedef typename native_ordered_list::node_type node_type;
831 typedef typename native_ordered_list::value_type value_type;
832 typedef typename native_ordered_list::node_traits native_node_traits;
833 typedef typename native_ordered_list::disposer native_disposer;
835 typedef split_list::node<node_type> splitlist_node_type;
838 int operator()( value_type const& v1, value_type const& v2 ) const
840 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v1 ));
841 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v2 ));
842 if ( n1->m_nHash != n2->m_nHash )
843 return n1->m_nHash < n2->m_nHash ? -1 : 1;
845 if ( n1->is_dummy()) {
846 assert( n2->is_dummy());
850 assert( !n1->is_dummy() && !n2->is_dummy());
852 return native_key_comparator()(v1, v2);
855 template <typename Q>
856 int operator()( value_type const& v, search_value_type<Q> const& q ) const
858 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
859 if ( n->m_nHash != q.nHash )
860 return n->m_nHash < q.nHash ? -1 : 1;
862 assert( !n->is_dummy());
863 return native_key_comparator()(v, q.val);
866 template <typename Q>
867 int operator()( search_value_type<Q> const& q, value_type const& v ) const
869 return -operator()( v, q );
873 struct wrapped_disposer
875 void operator()( value_type * v )
877 splitlist_node_type * p = static_cast<splitlist_node_type *>(native_node_traits::to_node_ptr( v ));
879 native_disposer()(v);
884 typedef node_type ordered_list_node_type;
885 typedef splitlist_node_type aux_node;
887 struct node_traits: private native_node_traits
889 typedef native_node_traits base_class; ///< Base ordered list node type
890 typedef typename base_class::value_type value_type; ///< Value type
891 typedef typename base_class::node_type base_node_type; ///< Ordered list node type
892 typedef node<base_node_type> node_type; ///< Split-list node type
894 /// Convert value reference to node pointer
895 static node_type * to_node_ptr( value_type& v )
897 return static_cast<node_type *>(base_class::to_node_ptr( v ));
900 /// Convert value pointer to node pointer
901 static node_type * to_node_ptr( value_type * v )
903 return static_cast<node_type *>(base_class::to_node_ptr( v ));
906 /// Convert value reference to node pointer (const version)
907 static node_type const * to_node_ptr( value_type const& v )
909 return static_cast<node_type const*>(base_class::to_node_ptr( v ));
912 /// Convert value pointer to node pointer (const version)
913 static node_type const * to_node_ptr( value_type const * v )
915 return static_cast<node_type const *>(base_class::to_node_ptr( v ));
918 /// Convert node reference to value pointer
919 static value_type * to_value_ptr( node_type& n )
921 return base_class::to_value_ptr( static_cast<base_node_type &>(n));
924 /// Convert node pointer to value pointer
925 static value_type * to_value_ptr( node_type * n )
927 return base_class::to_value_ptr( static_cast<base_node_type *>(n));
930 /// Convert node reference to value pointer (const version)
931 static const value_type * to_value_ptr( node_type const & n )
933 return base_class::to_value_ptr( static_cast<base_node_type const &>(n));
936 /// Convert node pointer to value pointer (const version)
937 static const value_type * to_value_ptr( node_type const * n )
939 return base_class::to_value_ptr( static_cast<base_node_type const *>(n));
943 template <typename Less>
944 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
946 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
948 template <typename Q>
949 int operator()( value_type const& v, search_value_type<Q> const& q ) const
951 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
952 if ( n->m_nHash != q.nHash )
953 return n->m_nHash < q.nHash ? -1 : 1;
955 assert( !n->is_dummy());
956 return base_class()(v, q.val);
959 template <typename Q>
960 int operator()( search_value_type<Q> const& q, value_type const& v ) const
962 splitlist_node_type const * n = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( v ));
963 if ( n->m_nHash != q.nHash )
964 return q.nHash < n->m_nHash ? -1 : 1;
966 assert( !n->is_dummy());
967 return base_class()(q.val, v);
970 int operator()( value_type const& lhs, value_type const& rhs ) const
972 splitlist_node_type const * n1 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( lhs ));
973 splitlist_node_type const * n2 = static_cast<splitlist_node_type const *>(native_node_traits::to_node_ptr( rhs ));
974 if ( n1->m_nHash != n2->m_nHash )
975 return n1->m_nHash < n2->m_nHash ? -1 : 1;
977 if ( n1->is_dummy()) {
978 assert( n2->is_dummy());
982 assert( !n1->is_dummy() && !n2->is_dummy());
984 return native_key_comparator()( lhs, rhs );
988 typedef typename native_ordered_list::template rebind_traits<
989 opt::compare< key_compare >
990 , opt::disposer< wrapped_disposer >
991 , opt::boundary_node_type< splitlist_node_type >
995 template <class OrderedList, class Traits>
996 class ordered_list_adapter< OrderedList, Traits, true >
998 typedef OrderedList native_ordered_list;
999 typedef Traits traits;
1001 typedef typename native_ordered_list::gc gc;
1002 typedef typename native_ordered_list::key_comparator native_key_comparator;
1003 typedef typename native_ordered_list::value_type value_type;
1004 typedef typename native_ordered_list::disposer native_disposer;
1006 struct key_compare {
1007 int operator()( value_type const& v1, value_type const& v2 ) const
1009 hash_node const& n1 = static_cast<hash_node const&>( v1 );
1010 hash_node const& n2 = static_cast<hash_node const&>( v2 );
1011 if ( n1.m_nHash != n2.m_nHash )
1012 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1014 if ( n1.is_dummy()) {
1015 assert( n2.is_dummy());
1019 assert( !n1.is_dummy() && !n2.is_dummy());
1021 return native_key_comparator()(v1, v2);
1024 template <typename Q>
1025 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1027 hash_node const& n = static_cast<hash_node const&>( v );
1028 if ( n.m_nHash != q.nHash )
1029 return n.m_nHash < q.nHash ? -1 : 1;
1031 assert( !n.is_dummy());
1032 return native_key_comparator()(v, q.val);
1035 template <typename Q>
1036 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1038 return -operator()( v, q );
1042 struct wrapped_disposer
1044 void operator()( value_type * v )
1046 if ( !static_cast<hash_node*>( v )->is_dummy())
1047 native_disposer()( v );
1052 typedef void ordered_list_node_type;
1054 struct aux_node: public native_ordered_list::node_type, public hash_node
1058 typedef typename native_ordered_list::node_type list_node_type;
1060 list_node_type::data.store( typename list_node_type::marked_data_ptr(
1061 static_cast<value_type*>( static_cast<hash_node *>( this ))),
1062 atomics::memory_order_release
1069 static hash_node * to_node_ptr( value_type& v )
1071 return static_cast<hash_node *>( &v );
1074 static hash_node * to_node_ptr( value_type * v )
1076 return static_cast<hash_node *>( v );
1079 static hash_node const * to_node_ptr( value_type const& v )
1081 return static_cast<hash_node const*>( &v );
1084 static hash_node const * to_node_ptr( value_type const * v )
1086 return static_cast<hash_node const *>( v );
1090 template <typename Less>
1091 struct make_compare_from_less: public cds::opt::details::make_comparator_from_less<Less>
1093 typedef cds::opt::details::make_comparator_from_less<Less> base_class;
1095 template <typename Q>
1096 int operator()( value_type const& v, search_value_type<Q> const& q ) const
1098 hash_node const& n = static_cast<hash_node const&>( v );
1099 if ( n.m_nHash != q.nHash )
1100 return n.m_nHash < q.nHash ? -1 : 1;
1102 assert( !n.is_dummy());
1103 return base_class()(v, q.val);
1106 template <typename Q>
1107 int operator()( search_value_type<Q> const& q, value_type const& v ) const
1109 hash_node const& n = static_cast<hash_node const&>( v );
1110 if ( n.m_nHash != q.nHash )
1111 return q.nHash < n.m_nHash ? -1 : 1;
1113 assert( !n.is_dummy());
1114 return base_class()(q.val, v);
1117 int operator()( value_type const& lhs, value_type const& rhs ) const
1119 hash_node const& n1 = static_cast<hash_node const&>( lhs );
1120 hash_node const& n2 = static_cast<hash_node const&>( rhs );
1121 if ( n1.m_nHash != n2.m_nHash )
1122 return n1.m_nHash < n2.m_nHash ? -1 : 1;
1124 if ( n1.is_dummy()) {
1125 assert( n2.is_dummy());
1129 assert( !n1.is_dummy() && !n2.is_dummy());
1131 return base_class()( lhs, rhs );
1135 typedef typename native_ordered_list::template rebind_traits<
1136 opt::compare< key_compare >
1137 , opt::disposer< wrapped_disposer >
1138 , opt::boundary_node_type< aux_node >
1142 template <class OrderedList, class Traits>
1143 using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list<OrderedList>::value >;
1145 template <typename OrderedList, bool IsConst>
1146 struct select_list_iterator;
1148 template <typename OrderedList>
1149 struct select_list_iterator<OrderedList, false>
1151 typedef typename OrderedList::iterator type;
1154 template <typename OrderedList>
1155 struct select_list_iterator<OrderedList, true>
1157 typedef typename OrderedList::const_iterator type;
1160 template <typename NodeTraits, typename OrderedList, bool IsConst>
1163 typedef OrderedList ordered_list_type;
1164 friend class iterator_type <NodeTraits, OrderedList, !IsConst >;
1167 typedef typename select_list_iterator<ordered_list_type, IsConst>::type list_iterator;
1168 typedef NodeTraits node_traits;
1171 list_iterator m_itCur;
1172 list_iterator m_itEnd;
1175 typedef typename list_iterator::value_ptr value_ptr;
1176 typedef typename list_iterator::value_ref value_ref;
1182 iterator_type( iterator_type const& src )
1183 : m_itCur( src.m_itCur )
1184 , m_itEnd( src.m_itEnd )
1187 // This ctor should be protected...
1188 iterator_type( list_iterator itCur, list_iterator itEnd )
1193 while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy())
1197 value_ptr operator ->() const
1199 return m_itCur.operator->();
1202 value_ref operator *() const
1204 return m_itCur.operator*();
1208 iterator_type& operator ++()
1210 if ( m_itCur != m_itEnd ) {
1213 } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy());
1218 iterator_type& operator = (iterator_type const& src)
1220 m_itCur = src.m_itCur;
1221 m_itEnd = src.m_itEnd;
1226 bool operator ==(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1228 return m_itCur == i.m_itCur;
1231 bool operator !=(iterator_type<node_traits, ordered_list_type, C> const& i ) const
1233 return m_itCur != i.m_itCur;
1236 } // namespace details
1242 /// Reverses bit order in \p nHash
1243 static inline size_t reverse_bits( size_t nHash )
1245 return bitop::RBO( nHash );
1248 static inline size_t regular_hash( size_t nHash )
1250 return reverse_bits( nHash ) | size_t(1);
1253 static inline size_t dummy_hash( size_t nHash )
1255 return reverse_bits( nHash ) & ~size_t(1);
1259 } // namespace split_list
1262 // Forward declaration
1263 template <class GC, class OrderedList, class Traits = split_list::traits>
1267 }} // namespace cds::intrusive
1269 #endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H