From 4b17ef88f9661cc73dafe892ae091298edefb076 Mon Sep 17 00:00:00 2001 From: khizmax Date: Sat, 6 May 2017 19:38:33 +0300 Subject: [PATCH] [SplitList] Fixed TSan warnings --- cds/details/defs.h | 8 ++++ cds/intrusive/details/split_list_base.h | 55 ++++++++++++++++++------- cds/intrusive/free_list.h | 5 ++- cds/intrusive/free_list_tagged.h | 9 ++-- cds/intrusive/segmented_queue.h | 2 +- cds/intrusive/split_list.h | 15 ++++++- 6 files changed, 72 insertions(+), 22 deletions(-) diff --git a/cds/details/defs.h b/cds/details/defs.h index b1f0331c..4dec0281 100644 --- a/cds/details/defs.h +++ b/cds/details/defs.h @@ -354,6 +354,14 @@ namespace cds {} # define CDS_STRICT_DO( _expr ) #endif +#ifdef CDS_DEBUG +# define cds_assert( expr ) assert( expr ) +#else + static inline void cds_assert( bool expr ) { + if ( !expr ) + abort(); + } +#endif // Compiler-specific defines #include diff --git a/cds/intrusive/details/split_list_base.h b/cds/intrusive/details/split_list_base.h index fb0e2305..71d47744 100644 --- a/cds/intrusive/details/split_list_base.h +++ b/cds/intrusive/details/split_list_base.h @@ -59,7 +59,7 @@ namespace cds { namespace intrusive { } /// Initializes dummy node with \p nHash value - hash_node( size_t nHash ) + explicit hash_node( size_t nHash ) : m_nHash( nHash ) { assert( is_dummy()); @@ -93,7 +93,7 @@ namespace cds { namespace intrusive { } /// Initializes dummy node with \p nHash value - node( size_t nHash ) + explicit node( size_t nHash ) : hash_node( nHash ) { assert( is_dummy()); @@ -119,7 +119,7 @@ namespace cds { namespace intrusive { } /// Initializes dummy node with \p nHash value - node( size_t nHash ) + explicit node( size_t nHash ) : hash_node( nHash ) { assert( is_dummy()); @@ -396,7 +396,16 @@ namespace cds { namespace intrusive { /// Auxiliary node type struct aux_node_type: public node_type, public free_list::node - {}; + { +# ifdef CDS_DEBUG + atomics::atomic m_busy; + + aux_node_type() + { + m_busy.store( false, atomics::memory_order_release ); + } +# endif + }; typedef atomics::atomic table_entry; ///< Table entry type typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator @@ -482,8 +491,10 @@ namespace cds { namespace intrusive { if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) { // alloc next free node from m_auxNode size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed ); - if ( idx < capacity()) + if ( idx < capacity() ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) ); return new( &m_auxNode[idx] ) aux_node_type(); + } } // get from free-list @@ -555,8 +566,17 @@ namespace cds { namespace intrusive { typedef typename options::free_list free_list; /// Auxiliary node type - class aux_node_type: public node_type, public free_list::node - {}; + struct aux_node_type: public node_type, public free_list::node + { +# ifdef CDS_DEBUG + atomics::atomic m_busy; + + aux_node_type() + { + m_busy.store( false, atomics::memory_order_release ); + } +# endif + }; protected: //@cond @@ -569,9 +589,10 @@ namespace cds { namespace intrusive { // aux_node_type nodes[]; aux_node_segment() - : aux_node_count(0) - , next_segment( nullptr ) - {} + : next_segment( nullptr ) + { + aux_node_count.store( 0, atomics::memory_order_release ); + } aux_node_type* segment() { @@ -685,10 +706,12 @@ namespace cds { namespace intrusive { assert( aux_segment != nullptr ); // try to allocate from current aux segment - if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) { + if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) { size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); - if ( idx < m_metrics.nSegmentSize ) + if ( idx < m_metrics.nSegmentSize ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) ); return new( aux_segment->segment() + idx ) aux_node_type(); + } } // try allocate from free-list @@ -702,10 +725,11 @@ namespace cds { namespace intrusive { aux_node_segment* new_aux_segment = allocate_aux_segment(); new_aux_segment->next_segment = aux_segment; new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); - CDS_COMPILER_RW_BARRIER; - if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire )) - return new( new_aux_segment->segment()) aux_node_type(); + if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) ); + return new( new_aux_segment->segment() ) aux_node_type(); + } free_aux_segment( new_aux_segment ); } @@ -785,6 +809,7 @@ namespace cds { namespace intrusive { aux_node_segment* allocate_aux_segment() { char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize ); + CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) ); return new(p) aux_node_segment(); } diff --git a/cds/intrusive/free_list.h b/cds/intrusive/free_list.h index 7a5ca45d..b3a490bb 100644 --- a/cds/intrusive/free_list.h +++ b/cds/intrusive/free_list.h @@ -97,8 +97,9 @@ namespace cds { namespace intrusive { node() : m_freeListRefs( 0 ) - , m_freeListNext( nullptr ) - {} + { + m_freeListNext.store( nullptr, atomics::memory_order_release ); + } //@endcond }; diff --git a/cds/intrusive/free_list_tagged.h b/cds/intrusive/free_list_tagged.h index 3704b6e0..b9ecd4e0 100644 --- a/cds/intrusive/free_list_tagged.h +++ b/cds/intrusive/free_list_tagged.h @@ -88,8 +88,9 @@ namespace cds { namespace intrusive { atomics::atomic m_freeListNext; node() - : m_freeListNext( nullptr ) - {} + { + m_freeListNext.store( nullptr, atomics::memory_order_release ); + } //@endcond }; @@ -143,7 +144,8 @@ namespace cds { namespace intrusive { do { newHead.tag = currentHead.tag + 1; pNode->m_freeListNext.store( currentHead.ptr, atomics::memory_order_relaxed ); - } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_relaxed ))); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_freeListNext ); + } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ))); } /// Gets a node from the free list. If the list is empty, returns \p nullptr @@ -152,6 +154,7 @@ namespace cds { namespace intrusive { tagged_ptr currentHead = m_Head.load( atomics::memory_order_acquire ); tagged_ptr newHead; while ( currentHead.ptr != nullptr ) { + CDS_TSAN_ANNOTATE_HAPPENS_AFTER( ¤tHead.ptr->m_freeListNext ); newHead.ptr = currentHead.ptr->m_freeListNext.load( atomics::memory_order_relaxed ); newHead.tag = currentHead.tag + 1; if ( cds_likely( m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ))) diff --git a/cds/intrusive/segmented_queue.h b/cds/intrusive/segmented_queue.h index d9204143..31247df6 100644 --- a/cds/intrusive/segmented_queue.h +++ b/cds/intrusive/segmented_queue.h @@ -256,7 +256,7 @@ namespace cds { namespace intrusive { // cell array is placed here in one continuous memory block // Initializes the segment - segment( size_t nCellCount ) + explicit segment( size_t nCellCount ) // MSVC warning C4355: 'this': used in base member initializer list : cells( reinterpret_cast< cell *>( this + 1 )) , version( 0 ) diff --git a/cds/intrusive/split_list.h b/cds/intrusive/split_list.h index 7bbbd78d..fe792e06 100644 --- a/cds/intrusive/split_list.h +++ b/cds/intrusive/split_list.h @@ -1164,13 +1164,26 @@ namespace cds { namespace intrusive { { m_Stat.onHeadNodeAllocated(); aux_node_type* p = m_Buckets.alloc_aux_node(); - if ( p ) + if ( p ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + // p->m_nHash is read-only data member p->m_nHash = nHash; + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; +# ifdef CDS_DEBUG + cds_assert( !p->m_busy.load( atomics::memory_order_acquire ) ); + p->m_busy.store( true, atomics::memory_order_release ); +# endif + } return p; } void free_aux_node( aux_node_type * p ) { +# ifdef CDS_DEBUG + cds_assert( p->m_busy.load( atomics::memory_order_acquire ) ); + p->m_busy.store( false, atomics::memory_order_release ); +# endif + m_Buckets.free_aux_node( p ); m_Stat.onHeadNodeFreed(); } -- 2.34.1