X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=cds%2Fintrusive%2Fsplit_list_nogc.h;h=966f5da3aea33bf038ab936ccaaa5431ea18da3f;hb=59cb651402874a22500cab3ec586565b48f76059;hp=41066182d6075c78cbaaa4cfd18aa787cfa94a8e;hpb=51169cef24d316a2ae385136c2d5f05496d5d56f;p=libcds.git diff --git a/cds/intrusive/split_list_nogc.h b/cds/intrusive/split_list_nogc.h index 41066182..966f5da3 100644 --- a/cds/intrusive/split_list_nogc.h +++ b/cds/intrusive/split_list_nogc.h @@ -1,11 +1,11 @@ /* This file is a part of libcds - Concurrent Data Structures library - (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 Source code repo: http://github.com/khizmax/libcds/ Download: http://sourceforge.net/projects/libcds/files/ - + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -25,7 +25,7 @@ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H @@ -35,6 +35,7 @@ #include #include +#include namespace cds { namespace intrusive { @@ -69,46 +70,56 @@ namespace cds { namespace intrusive { protected: //@cond - typedef split_list::details::rebind_list_traits wrapped_ordered_list; + typedef split_list::details::rebind_list_traits ordered_list_adapter; //@endcond public: # ifdef CDS_DOXYGEN_INVOKED typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list # else - typedef typename wrapped_ordered_list::result ordered_list; + typedef typename ordered_list_adapter::result ordered_list; # endif typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor typedef typename ordered_list::disposer disposer; ///< Node disposer functor + typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal typedef typename traits::item_counter item_counter; ///< Item counter type typedef typename traits::back_off back_off; ///< back-off strategy typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option typedef typename traits::stat stat; ///< Internal statistics, see \p spit_list::stat + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(!std::is_same::value, + "cds::atomicity::empty_item_counter is not allowed as a item counter"); + protected: + //@cond typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list typedef split_list::node node_type; ///< split-list node type - typedef node_type dummy_node_type; ///< dummy node type /// Split-list node traits /** This traits is intended for converting between underlying ordered list node type \ref list_node_type and split-list node type \ref node_type */ - typedef split_list::node_traits node_traits; + typedef typename ordered_list_adapter::node_traits node_traits; - //@cond /// Bucket table implementation typedef typename split_list::details::bucket_table_selector< traits::dynamic_bucket_table , gc - , dummy_node_type + , typename ordered_list_adapter::aux_node , opt::allocator< typename traits::allocator > , opt::memory_model< memory_model > + , opt::free_list< typename traits::free_list > >::type bucket_table; + typedef typename bucket_table::aux_node_type aux_node_type; ///< dummy node type + typedef typename ordered_list::iterator list_iterator; typedef typename ordered_list::const_iterator list_const_iterator; //@endcond @@ -122,7 +133,7 @@ namespace cds { namespace intrusive { typedef typename base_class::auxiliary_head bucket_head_type; public: - list_iterator insert_at_( dummy_node_type * pHead, value_type& val ) + list_iterator insert_at_( aux_node_type * pHead, value_type& val ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); @@ -130,7 +141,7 @@ namespace cds { namespace intrusive { } template - std::pair update_at_( dummy_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + std::pair update_at_( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); @@ -138,7 +149,7 @@ namespace cds { namespace intrusive { } template - bool find_at( dummy_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) + bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); @@ -146,18 +157,18 @@ namespace cds { namespace intrusive { } template - list_iterator find_at_( dummy_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) + list_iterator find_at_( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) { assert( pHead != nullptr ); bucket_head_type h(static_cast(pHead)); return base_class::find_at_( h, val, cmp ); } - bool insert_aux_node( dummy_node_type * pNode ) + bool insert_aux_node( aux_node_type * pNode ) { return base_class::insert_aux_node( pNode ); } - bool insert_aux_node( dummy_node_type * pHead, dummy_node_type * pNode ) + bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) { bucket_head_type h(static_cast(pHead)); return base_class::insert_aux_node( h, pNode ); @@ -169,150 +180,6 @@ namespace cds { namespace intrusive { return base_class::erase_for( pred ); } }; - - //@endcond - - protected: - ordered_list_wrapper m_List; ///< Ordered list containing split-list items - bucket_table m_Buckets; ///< bucket table - atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) - atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize - item_counter m_ItemCounter; ///< Item counter - hash m_HashFunctor; ///< Hash functor - stat m_Stat; ///< Internal statistics - - protected: - //@cond - typedef cds::details::Allocator< dummy_node_type, typename traits::allocator > dummy_node_allocator; - - dummy_node_type * alloc_dummy_node( size_t nHash ) - { - m_Stat.onHeadNodeAllocated(); - return dummy_node_allocator().New( nHash ); - } - void free_dummy_node( dummy_node_type * p ) - { - dummy_node_allocator().Delete( p ); - m_Stat.onHeadNodeFreed(); - } - - /// Calculates hash value of \p key - template - size_t hash_value( Q const& key ) const - { - return m_HashFunctor( key ); - } - - size_t bucket_no( size_t nHash ) const - { - return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 ); - } - - static size_t parent_bucket( size_t nBucket ) - { - assert( nBucket > 0 ); - return nBucket & ~( 1 << bitop::MSBnz( nBucket ) ); - } - - dummy_node_type * init_bucket( size_t nBucket ) - { - assert( nBucket > 0 ); - size_t nParent = parent_bucket( nBucket ); - - dummy_node_type * pParentBucket = m_Buckets.bucket( nParent ); - if ( pParentBucket == nullptr ) { - pParentBucket = init_bucket( nParent ); - m_Stat.onRecursiveInitBucket(); - } - - assert( pParentBucket != nullptr ); - - // Allocate a dummy node for new bucket - { - dummy_node_type * pBucket = alloc_dummy_node( split_list::dummy_hash( nBucket ) ); - if ( m_List.insert_aux_node( pParentBucket, pBucket ) ) { - m_Buckets.bucket( nBucket, pBucket ); - m_Stat.onNewBucket(); - return pBucket; - } - free_dummy_node( pBucket ); - } - - // Another thread set the bucket. Wait while it done - - // In this point, we must wait while nBucket is empty. - // The compiler can decide that waiting loop can be "optimized" (stripped) - // To prevent this situation, we use waiting on volatile bucket_head_ptr pointer. - // - m_Stat.onBucketInitContenton(); - back_off bkoff; - while ( true ) { - dummy_node_type volatile * p = m_Buckets.bucket( nBucket ); - if ( p && p != nullptr ) - return const_cast( p ); - bkoff(); - m_Stat.onBusyWaitBucketInit(); - } - } - - dummy_node_type * get_bucket( size_t nHash ) - { - size_t nBucket = bucket_no( nHash ); - - dummy_node_type * pHead = m_Buckets.bucket( nBucket ); - if ( pHead == nullptr ) - pHead = init_bucket( nBucket ); - - assert( pHead->is_dummy() ); - - return pHead; - } - - void init() - { - // GC and OrderedList::gc must be the same - static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); - - // atomicity::empty_item_counter is not allowed as a item counter - static_assert( !std::is_same::value, - "cds::atomicity::empty_item_counter is not allowed as a item counter"); - - // Initialize bucket 0 - dummy_node_type * pNode = alloc_dummy_node( 0 /*split_list::dummy_hash(0)*/ ); - - // insert_aux_node cannot return false for empty list - CDS_VERIFY( m_List.insert_aux_node( pNode )); - - m_Buckets.bucket( 0, pNode ); - } - - static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) - { - return nBucketCount * nLoadFactor; - } - - void inc_item_count() - { - size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed); - if ( ++m_ItemCounter <= nMaxCount ) - return; - - size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed); - const size_t nBucketCount = static_cast(1) << sz; - if ( nBucketCount < m_Buckets.capacity() ) { - // we may grow the bucket table - const size_t nLoadFactor = m_Buckets.load_factor(); - if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) - return; // someone already have updated m_nBucketCountLog2, so stop here - - m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), - memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); - m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); - } - else - m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); - } - //@endcond public: @@ -324,7 +191,7 @@ namespace cds { namespace intrusive { */ SplitListSet() : m_nBucketCountLog2(1) - , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) ) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } @@ -336,11 +203,16 @@ namespace cds { namespace intrusive { ) : m_Buckets( nItemCount, nLoadFactor ) , m_nBucketCountLog2(1) - , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor()) ) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) { init(); } + /// Destroys split-list + ~SplitListSet() + { + m_List.clear(); + } public: /// Inserts new node /** @@ -374,7 +246,7 @@ namespace cds { namespace intrusive { The functor may change non-key fields of the \p item. - Returns std::pair where \p first is \p true if operation is successfull, + Returns std::pair where \p first is \p true if operation is successful, \p second is \p true if new item has been added or \p false if the item with \p key already is in the list. @@ -410,7 +282,7 @@ namespace cds { namespace intrusive { value_type * contains( Q const& key ) { iterator it = find_( key ); - if ( it == end() ) + if ( it == end()) return nullptr; return &*it; } @@ -433,7 +305,7 @@ namespace cds { namespace intrusive { value_type * contains( Q const& key, Less pred ) { iterator it = find_with_( key, pred ); - if ( it == end() ) + if ( it == end()) return nullptr; return &*it; } @@ -490,14 +362,14 @@ namespace cds { namespace intrusive { bool find_with( Q& key, Less pred, Func f ) { CDS_UNUSED( pred ); - return find_( key, typename wrapped_ordered_list::template make_compare_from_less(), f ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@cond template bool find_with( Q const& key, Less pred, Func f ) { CDS_UNUSED( pred ); - return find_( key, typename wrapped_ordered_list::template make_compare_from_less(), f ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); } //@endcond @@ -540,6 +412,12 @@ namespace cds { namespace intrusive { return m_Stat; } + /// Returns internal statistics for \p OrderedList + typename OrderedList::stat const& list_statistics() const + { + return m_List.statistics(); + } + protected: //@cond template @@ -587,7 +465,7 @@ namespace cds { namespace intrusive { */ iterator begin() { - return iterator( m_List.begin(), m_List.end() ); + return iterator( m_List.begin(), m_List.end()); } /// Returns an iterator that addresses the location succeeding the last element in a split-list @@ -599,31 +477,31 @@ namespace cds { namespace intrusive { */ iterator end() { - return iterator( m_List.end(), m_List.end() ); + return iterator( m_List.end(), m_List.end()); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator begin() const { - return const_iterator( m_List.begin(), m_List.end() ); + return const_iterator( m_List.begin(), m_List.end()); } /// Returns a forward const iterator addressing the first element in a split-list const_iterator cbegin() const { - return const_iterator( m_List.cbegin(), m_List.cend() ); + return const_iterator( m_List.cbegin(), m_List.cend()); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator end() const { - return const_iterator( m_List.end(), m_List.end() ); + return const_iterator( m_List.end(), m_List.end()); } /// Returns an const iterator that addresses the location succeeding the last element in a split-list const_iterator cend() const { - return const_iterator( m_List.cend(), m_List.cend() ); + return const_iterator( m_List.cend(), m_List.cend()); } //@} @@ -632,16 +510,16 @@ namespace cds { namespace intrusive { iterator insert_( value_type& val ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); - node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); list_iterator it = m_List.insert_at_( pHead, val ); - if ( it != m_List.end() ) { + if ( it != m_List.end()) { inc_item_count(); m_Stat.onInsertSuccess(); - return iterator( it, m_List.end() ); + return iterator( it, m_List.end()); } m_Stat.onInsertFailed(); return end(); @@ -651,13 +529,13 @@ namespace cds { namespace intrusive { std::pair update_( value_type& val, Func func, bool bAllowInsert ) { size_t nHash = hash_value( val ); - dummy_node_type * pHead = get_bucket( nHash ); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); - node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); std::pair ret = m_List.update_at_( pHead, val, func, bAllowInsert ); - if ( ret.first != m_List.end() ) { + if ( ret.first != m_List.end()) { if ( ret.second ) { inc_item_count(); m_Stat.onUpdateNew(); @@ -674,39 +552,189 @@ namespace cds { namespace intrusive { { CDS_UNUSED( pred ); size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); - auto it = m_List.find_at_( pHead, sv, typename wrapped_ordered_list::template make_compare_from_less() ); - m_Stat.onFind( it != m_List.end() ); - return iterator( it, m_List.end() ); + auto it = m_List.find_at_( pHead, sv, typename ordered_list_adapter::template make_compare_from_less()); + m_Stat.onFind( it != m_List.end()); + return iterator( it, m_List.end()); } template iterator find_( Q const& val ) { size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); - auto it = m_List.find_at_( pHead, sv, key_comparator() ); - m_Stat.onFind( it != m_List.end() ); - return iterator( it, m_List.end() ); + auto it = m_List.find_at_( pHead, sv, key_comparator()); + m_Stat.onFind( it != m_List.end()); + return iterator( it, m_List.end()); } template bool find_( Q& val, Compare cmp, Func f ) { size_t nHash = hash_value( val ); - split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); - dummy_node_type * pHead = get_bucket( nHash ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); assert( pHead != nullptr ); return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, - [&f](value_type& item, split_list::details::search_value_type& val){ f(item, val.val ); })); + [&f](value_type& item, split_list::details::search_value_type& v){ f(item, v.val ); })); + } + + aux_node_type * alloc_aux_node( size_t nHash ) + { + m_Stat.onHeadNodeAllocated(); + aux_node_type* p = m_Buckets.alloc_aux_node(); + if ( p ) + p->m_nHash = nHash; + return p; + } + + void free_aux_node( aux_node_type * p ) + { + m_Buckets.free_aux_node( p ); + m_Stat.onHeadNodeFreed(); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ((1 << m_nBucketCountLog2.load( memory_model::memory_order_relaxed )) - 1); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~(1 << bitop::MSBnz( nBucket )); + } + + aux_node_type * init_bucket( size_t const nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == nullptr ) { + pParentBucket = init_bucket( nParent ); + m_Stat.onRecursiveInitBucket(); + } + + assert( pParentBucket != nullptr ); + + // Allocate an aux node for new bucket + aux_node_type * pBucket = m_Buckets.bucket( nBucket ); + + back_off bkoff; + for ( ;; pBucket = m_Buckets.bucket( nBucket )) { + if ( pBucket ) + return pBucket; + + pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); + if ( pBucket ) { + if ( m_List.insert_aux_node( pParentBucket, pBucket )) { + m_Buckets.bucket( nBucket, pBucket ); + m_Stat.onNewBucket(); + return pBucket; + } + + // Another thread set the bucket. Wait while it done + free_aux_node( pBucket ); + m_Stat.onBucketInitContenton(); + break; + } + + // There are no free buckets. It means that the bucket table is full + // Wait while another thread set the bucket or a free bucket will be available + m_Stat.onBucketsExhausted(); + bkoff(); + } + + // Another thread set the bucket. Wait while it done + for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { + bkoff(); + m_Stat.onBusyWaitBucketInit(); + } + + return pBucket; + } + + aux_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + aux_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == nullptr ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy()); + + return pHead; + } + + void init() + { + // Initialize bucket 0 + aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); } + static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) + { + return nBucketCount * nLoadFactor; + } + + void inc_item_count() + { + size_t nMaxCount = m_nMaxItemCount.load( memory_model::memory_order_relaxed ); + if ( ++m_ItemCounter <= nMaxCount ) + return; + + size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed ); + const size_t nBucketCount = static_cast(1) << sz; + if ( nBucketCount < m_Buckets.capacity()) { + // we may grow the bucket table + const size_t nLoadFactor = m_Buckets.load_factor(); + if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) + return; // someone already have updated m_nBucketCountLog2, so stop here + + m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), + memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + } + else + m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); + } + //@endcond + + protected: + //@cond + static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; + + typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; + padded_bucket_table m_Buckets; ///< bucket table + + typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; + padded_ordered_list m_List; ///< Ordered list containing split-list items + + atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) + atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize + hash m_HashFunctor; ///< Hash functor + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics //@endcond };