[SplitList] Fixed TSan warnings
authorkhizmax <libcds.dev@gmail.com>
Sat, 6 May 2017 16:38:33 +0000 (19:38 +0300)
committerkhizmax <libcds.dev@gmail.com>
Sat, 6 May 2017 16:38:33 +0000 (19:38 +0300)
cds/details/defs.h
cds/intrusive/details/split_list_base.h
cds/intrusive/free_list.h
cds/intrusive/free_list_tagged.h
cds/intrusive/segmented_queue.h
cds/intrusive/split_list.h

index b1f0331c7c4c84c54ffe1a1b66693e440fae8822..4dec0281a2f6d05eda4143a9a593e76d8af14f1d 100644 (file)
@@ -354,6 +354,14 @@ namespace cds {}
 #   define CDS_STRICT_DO( _expr )
 #endif
 
+#ifdef CDS_DEBUG
+#   define cds_assert( expr )       assert( expr )
+#else
+    static inline void cds_assert( bool expr ) {
+        if ( !expr )
+            abort();
+    }
+#endif
 
 // Compiler-specific defines
 #include <cds/compiler/defs.h>
index fb0e230510f77a4191fa132ee4598a6a1549535f..71d47744a16ee6dc9f71979e3c23a31fe6519f59 100644 (file)
@@ -59,7 +59,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            hash_node( size_t nHash )
+            explicit hash_node( size_t nHash )
                 : m_nHash( nHash )
             {
                 assert( is_dummy());
@@ -93,7 +93,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            node( size_t nHash )
+            explicit node( size_t nHash )
                 : hash_node( nHash )
             {
                 assert( is_dummy());
@@ -119,7 +119,7 @@ namespace cds { namespace intrusive {
             }
 
             /// Initializes dummy node with \p nHash value
-            node( size_t nHash )
+            explicit node( size_t nHash )
                 : hash_node( nHash )
             {
                 assert( is_dummy());
@@ -396,7 +396,16 @@ namespace cds { namespace intrusive {
 
             /// Auxiliary node type
             struct aux_node_type: public node_type, public free_list::node
-            {};
+            {
+#           ifdef CDS_DEBUG
+                atomics::atomic<bool> m_busy;
+
+                aux_node_type()
+                {
+                    m_busy.store( false, atomics::memory_order_release );
+                }
+#           endif
+            };
 
             typedef atomics::atomic<aux_node_type *> table_entry;  ///< Table entry type
             typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator
@@ -482,8 +491,10 @@ namespace cds { namespace intrusive {
                 if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) {
                     // alloc next free node from m_auxNode
                     size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed );
-                    if ( idx < capacity())
+                    if ( idx < capacity() ) {
+                        CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type ) );
                         return new( &m_auxNode[idx] ) aux_node_type();
+                    }
                 }
 
                 // get from free-list
@@ -555,8 +566,17 @@ namespace cds { namespace intrusive {
             typedef typename options::free_list free_list;
 
             /// Auxiliary node type
-            class aux_node_type: public node_type, public free_list::node
-            {};
+            struct aux_node_type: public node_type, public free_list::node
+            {
+#           ifdef CDS_DEBUG
+                atomics::atomic<bool> m_busy;
+
+                aux_node_type()
+                {
+                    m_busy.store( false, atomics::memory_order_release );
+                }
+#           endif
+            };
 
         protected:
             //@cond
@@ -569,9 +589,10 @@ namespace cds { namespace intrusive {
                 // aux_node_type     nodes[];
 
                 aux_node_segment()
-                    : aux_node_count(0)
-                    , next_segment( nullptr )
-                {}
+                    : next_segment( nullptr )
+                {
+                    aux_node_count.store( 0, atomics::memory_order_release );
+                }
 
                 aux_node_type* segment()
                 {
@@ -685,10 +706,12 @@ namespace cds { namespace intrusive {
                     assert( aux_segment != nullptr );
 
                     // try to allocate from current aux segment
-                    if ( aux_segment->aux_node_count.load( memory_model::memory_order_relaxed ) < m_metrics.nSegmentSize ) {
+                    if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) {
                         size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
-                        if ( idx < m_metrics.nSegmentSize )
+                        if ( idx < m_metrics.nSegmentSize ) {
+                            CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type ) );
                             return new( aux_segment->segment() + idx ) aux_node_type();
+                        }
                     }
 
                     // try allocate from free-list
@@ -702,10 +725,11 @@ namespace cds { namespace intrusive {
                     aux_node_segment* new_aux_segment = allocate_aux_segment();
                     new_aux_segment->next_segment = aux_segment;
                     new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed );
-                    CDS_COMPILER_RW_BARRIER;
 
-                    if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ))
-                        return new( new_aux_segment->segment()) aux_node_type();
+                    if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire ) ) {
+                        CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type ) );
+                        return new( new_aux_segment->segment() ) aux_node_type();
+                    }
 
                     free_aux_segment( new_aux_segment );
                 }
@@ -785,6 +809,7 @@ namespace cds { namespace intrusive {
             aux_node_segment* allocate_aux_segment()
             {
                 char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize );
+                CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment ) );
                 return new(p) aux_node_segment();
             }
 
index 7a5ca45dd39874139b3bd9cc7d4a75918685e108..b3a490bbf65d57471069df8968b23479c9910cb0 100644 (file)
@@ -97,8 +97,9 @@ namespace cds { namespace intrusive {
 
             node()
                 : m_freeListRefs( 0 )
-                , m_freeListNext( nullptr )
-            {}
+            {
+                m_freeListNext.store( nullptr, atomics::memory_order_release );
+            }
             //@endcond
         };
 
index 3704b6e0e6ecb9882da72786aa7779e9f006335d..b9ecd4e085fe641a50033cafb021c28cf8534043 100644 (file)
@@ -88,8 +88,9 @@ namespace cds { namespace intrusive {
             atomics::atomic<node *> m_freeListNext;
 
             node()
-                : m_freeListNext( nullptr )
-            {}
+            {
+                m_freeListNext.store( nullptr, atomics::memory_order_release );
+            }
             //@endcond
         };
 
@@ -143,7 +144,8 @@ namespace cds { namespace intrusive {
             do {
                 newHead.tag = currentHead.tag + 1;
                 pNode->m_freeListNext.store( currentHead.ptr, atomics::memory_order_relaxed );
-            } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_relaxed )));
+                CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_freeListNext );
+            } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire )));
         }
 
         /// Gets a node from the free list. If the list is empty, returns \p nullptr
@@ -152,6 +154,7 @@ namespace cds { namespace intrusive {
             tagged_ptr currentHead = m_Head.load( atomics::memory_order_acquire );
             tagged_ptr newHead;
             while ( currentHead.ptr != nullptr ) {
+                CDS_TSAN_ANNOTATE_HAPPENS_AFTER( &currentHead.ptr->m_freeListNext );
                 newHead.ptr = currentHead.ptr->m_freeListNext.load( atomics::memory_order_relaxed );
                 newHead.tag = currentHead.tag + 1;
                 if ( cds_likely( m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire )))
index d9204143a001e6799422dea3c751d0ec5e7fd3e0..31247df61c56811a20940324dc7225bad2bb2215 100644 (file)
@@ -256,7 +256,7 @@ namespace cds { namespace intrusive {
             // cell array is placed here in one continuous memory block
 
             // Initializes the segment
-            segment( size_t nCellCount )
+            explicit segment( size_t nCellCount )
                 // MSVC warning C4355: 'this': used in base member initializer list
                 : cells( reinterpret_cast< cell *>( this + 1 ))
                 , version( 0 )
index 7bbbd78da6f280c0aa071af132c82149bc24848e..fe792e06a36827b6a9b77662e6fdd97ed4bccf86 100644 (file)
@@ -1164,13 +1164,26 @@ namespace cds { namespace intrusive {
         {
             m_Stat.onHeadNodeAllocated();
             aux_node_type* p = m_Buckets.alloc_aux_node();
-            if ( p )
+            if ( p ) {
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                // p->m_nHash is read-only data member
                 p->m_nHash = nHash;
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+#       ifdef CDS_DEBUG
+                cds_assert( !p->m_busy.load( atomics::memory_order_acquire ) );
+                p->m_busy.store( true, atomics::memory_order_release );
+#       endif
+            }
             return p;
         }
 
         void free_aux_node( aux_node_type * p )
         {
+#       ifdef CDS_DEBUG
+            cds_assert( p->m_busy.load( atomics::memory_order_acquire ) );
+            p->m_busy.store( false, atomics::memory_order_release );
+#       endif
+
             m_Buckets.free_aux_node( p );
             m_Stat.onHeadNodeFreed();
         }