TSan exam:
authorkhizmax <libcds.dev@gmail.com>
Wed, 6 May 2015 19:12:34 +0000 (22:12 +0300)
committerkhizmax <libcds.dev@gmail.com>
Wed, 6 May 2015 19:12:34 +0000 (22:12 +0300)
- annotations have been moved to the allocator wrapper
- fixed data races for some containers
- improved Treiber's stack elimination algo

20 files changed:
cds/compiler/feature_tsan.h
cds/container/fcqueue.h
cds/container/fcstack.h
cds/container/lazy_kvlist_rcu.h
cds/container/msqueue.h
cds/container/split_list_set_rcu.h
cds/details/allocator.h
cds/intrusive/basket_queue.h
cds/intrusive/details/ellen_bintree_base.h
cds/intrusive/fcqueue.h
cds/intrusive/fcstack.h
cds/intrusive/impl/ellen_bintree.h
cds/intrusive/msqueue.h
cds/intrusive/optimistic_queue.h
cds/intrusive/segmented_queue.h
cds/intrusive/split_list_rcu.h
cds/intrusive/treiber_stack.h
cds/memory/michael/allocator.h
cds/opt/buffer.h
tests/unit/pqueue/push_pop.cpp

index cecbadc519bc0f7e8d746f219128edf64ed86589..1a05a17d851fa838bf6eb70f9990068bde428831 100644 (file)
@@ -6,6 +6,8 @@
 // Thread Sanitizer annotations.
 // From https://groups.google.com/d/msg/thread-sanitizer/SsrHB7FTnTk/mNTGNLQj-9cJ
 
+//@cond
+
 #ifdef CDS_THREAD_SANITIZER_ENABLED
 #   define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr)   AnnotateHappensBefore(__FILE__, __LINE__, reinterpret_cast<void*>(addr))\r
 #   define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr)    AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast<void*>(addr))\r
@@ -31,7 +33,9 @@
         void AnnotateIgnoreWritesBegin(const char *f, int l);\r
         void AnnotateIgnoreWritesEnd(const char *f, int l);\r
     }\r
-#else\r
+\r
+#else // CDS_THREAD_SANITIZER_ENABLED\r
+\r
 #   define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr)\r
 #   define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr)
 
@@ -41,6 +45,8 @@
 #   define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END\r
 #   define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN\r
 #   define CDS_TSAN_ANNOTATE_IGNORE_RW_END\r
+\r
 #endif
 
+//@endcond
 #endif  // #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H
index bc7b684b22e70e0b8e958a583ace41e7fa1f96aa..fedf97adc7f75e1ec7253169312a3b1d611e75ac 100644 (file)
@@ -290,6 +290,9 @@ namespace cds { namespace container {
         {
             assert( pRec );
 
+            // this function is called under FC mutex, so switch TSan off
+            CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
             switch ( pRec->op() ) {
             case op_enq:
                 assert( pRec->pValEnq );
@@ -315,12 +318,17 @@ namespace cds { namespace container {
                 assert(false);
                 break;
             }
+            CDS_TSAN_ANNOTATE_IGNORE_RW_END;
         }
 
         /// Batch-processing flat combining
         void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd )
         {
             typedef typename fc_kernel::iterator fc_iterator;
+
+            // this function is called under FC mutex, so switch TSan off
+            CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
             for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) {
                 switch ( it->op() ) {
                 case op_enq:
@@ -335,6 +343,7 @@ namespace cds { namespace container {
                     break;
                 }
             }
+            CDS_TSAN_ANNOTATE_IGNORE_RW_END;
         }
         //@endcond
 
index 0f6aeb4d7c62b2a3f8d3b371e66513b443c67049..7ef2164ea6706559e2341b9351a5d0fff297ff46 100644 (file)
@@ -304,6 +304,7 @@ namespace cds { namespace container {
         {
             // this function is called under FC mutex, so switch TSan off
             CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
             typedef typename fc_kernel::iterator fc_iterator;
             for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) {
                 switch ( it->op() ) {
index 1ead8ccc1327f9c622aba9eccf2cd13bd84402f8..de24d1312dc0124e0009786aff83085b80238fd9 100644 (file)
@@ -130,28 +130,19 @@ namespace cds { namespace container {
         template <typename K>
         static node_type * alloc_node(const K& key)
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-            node_type * p = cxx_allocator().New( key );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-            return p;
+            return cxx_allocator().New( key );
         }
 
         template <typename K, typename V>
         static node_type * alloc_node( const K& key, const V& val )
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-            node_type * p = cxx_allocator().New( key, val );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-            return p;
+            return cxx_allocator().New( key, val );
         }
 
         template <typename... Args>
         static node_type * alloc_node( Args&&... args )
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-            node_type * p = cxx_allocator().MoveNew( std::forward<Args>(args)... );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-            return p;
+            return cxx_allocator().MoveNew( std::forward<Args>(args)... );
         }
 
         static void free_node( node_type * pNode )
index 3292ff916c674d52d07e6b532470ec0da090aacb..1d355e97fc218f8d619a26beaf380fc8f04cf67b 100644 (file)
@@ -117,10 +117,7 @@ namespace cds { namespace container {
             {
                 void operator ()( node_type * pNode )
                 {
-                    // TSan false positive possible
-                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                     cxx_allocator().Delete( pNode );
-                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
                 }
             };
 
index 7ad24be8f32ecca1bf38c086b86e2941ea6ccc45..f6caeb73e052357101bd7605af7f044eb87acdb1 100644 (file)
@@ -226,26 +226,18 @@ namespace cds { namespace container {
         template <typename Q>
         static node_type * alloc_node( Q const& v )
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-            node_type * p = cxx_node_allocator().New( v );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-            return p;
+            return cxx_node_allocator().New( v );
         }
 
         template <typename... Args>
         static node_type * alloc_node( Args&&... args )
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-            node_type * p = cxx_node_allocator().MoveNew( std::forward<Args>(args)...);
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-            return p;
+            return cxx_node_allocator().MoveNew( std::forward<Args>(args)...);
         }
 
         static void free_node( node_type * pNode )
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
             cxx_node_allocator().Delete( pNode );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
         }
 
         struct node_disposer {
index e37a16a2e4be970af95e5e63f40bd443b8d4ec49..cbc634c08deb0f8ee5b73e091fafacce6a35546f 100644 (file)
@@ -32,6 +32,9 @@ namespace cds {
                 , typename Alloc::template rebind<T>::other
             >::type allocator_type;
 
+            /// \p true if underlined allocator is \p std::allocator, \p false otherwise
+            static CDS_CONSTEXPR bool const c_bStdAllocator = std::is_same< allocator_type, std::allocator<T>>::value;
+
             /// Element type
             typedef T   value_type;
 
@@ -39,20 +42,52 @@ namespace cds {
             template <typename... S>
             value_type *  New( S const&... src )
             {
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                }
+                value_type * pv = Construct( allocator_type::allocate(1), src... );
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                }
+                return pv;
+#           else
                 return Construct( allocator_type::allocate(1), src... );
+#           endif
             }
 
             /// Analogue of <tt>operator new T( std::forward<Args>(args)... )</tt> (move semantics)
             template <typename... Args>
             value_type * MoveNew( Args&&... args )
             {
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                }
+                value_type * pv = MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                }
+                return pv;
+#           else
                 return MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
+#           endif
             }
 
             /// Analogue of operator new T[\p nCount ]
             value_type * NewArray( size_t nCount )
             {
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                }
+#           endif
                 value_type * p = allocator_type::allocate( nCount );
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                }
+#           endif
                 for ( size_t i = 0; i < nCount; ++i )
                     Construct( p + i );
                 return p;
@@ -65,7 +100,17 @@ namespace cds {
             template <typename S>
             value_type * NewArray( size_t nCount, S const& src )
             {
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                }
+#           endif
                 value_type * p = allocator_type::allocate( nCount );
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                }
+#           endif
                 for ( size_t i = 0; i < nCount; ++i )
                     Construct( p + i, src );
                 return p;
@@ -95,16 +140,22 @@ namespace cds {
             /// Analogue of operator delete
             void Delete( value_type * p )
             {
+                // TSan false positive possible
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                 allocator_type::destroy( p );
                 allocator_type::deallocate( p, 1 );
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             }
 
             /// Analogue of operator delete []
             void Delete( value_type * p, size_t nCount )
             {
+                // TSan false positive possible
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                  for ( size_t i = 0; i < nCount; ++i )
                      allocator_type::destroy( p + i );
                 allocator_type::deallocate( p, nCount );
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             }
 
 #       if CDS_COMPILER == CDS_COMPILER_INTEL
@@ -119,14 +170,22 @@ namespace cds {
             template <typename... S>
             value_type * Construct( void * p, S const&... src )
             {
-                return new( p ) value_type( src... );
+                // TSan false positive possible
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                value_type * pv = new( p ) value_type( src... );
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                return pv;
             }
 
             /// Analogue of placement <tt>operator new( p ) T( std::forward<Args>(args)... )</tt>
             template <typename... Args>
             value_type * MoveConstruct( void * p, Args&&... args )
             {
-                return new( p ) value_type( std::forward<Args>(args)... );
+                // TSan false positive possible
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                value_type * pv = new( p ) value_type( std::forward<Args>(args)... );
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                return pv;
             }
 
             /// Rebinds allocator to other type \p Q instead of \p T
@@ -143,7 +202,18 @@ namespace cds {
 
                 size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *);
                 typedef typename allocator_type::template rebind< void * >::other void_allocator;
-                return void_allocator().allocate( nPtrSize );
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                }
+#           endif
+                void * p = void_allocator().allocate( nPtrSize );
+#           if CDS_THREAD_SANITIZER_ENABLED
+                if ( c_bStdAllocator ) {
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                }
+#           endif
+                return p;
             }
             //@endcond
         };
@@ -163,7 +233,7 @@ namespace cds {
             */
             static void free( T * p )
             {
-                Allocator<T, Alloc> a;
+                Allocator<type, allocator_type> a;
                 a.Delete( p );
             }
         };
index 4c26872b8bc924912e42a02bc107a26fedf5eae9..f5d129a95274693655bb68c9211973a63ca02423 100644 (file)
@@ -514,7 +514,7 @@ namespace cds { namespace intrusive {
                             }
                         }
 
-                        m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed );
+                        m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed );
                     }
                     else {
                         marked_ptr iter( h );
@@ -537,7 +537,7 @@ namespace cds { namespace intrusive {
                         else if ( bDeque ) {
                             res.pNext = pNext.ptr();
 
-                            if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
+                            if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
                                 if ( hops >= m_nMaxHops )
                                     free_chain( h, pNext );
                                 break;
@@ -565,7 +565,7 @@ namespace cds { namespace intrusive {
         {
             // "head" and "newHead" are guarded
 
-            if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+            if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed ))
             {
                 typename gc::template GuardArray<2> guards;
                 guards.assign( 0, node_traits::to_value_ptr(head.ptr()) );
@@ -659,8 +659,8 @@ namespace cds { namespace intrusive {
 
                 if ( pNext.ptr() == nullptr ) {
                     pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release );
-                    if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
-                        if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+                    if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
+                        if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ))
                             m_Stat.onAdvanceTailFailed();
                         break;
                     }
@@ -681,7 +681,7 @@ namespace cds { namespace intrusive {
                     {
                         bkoff();
                         pNew->m_pNext.store( pNext, memory_model::memory_order_release );
-                        if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) {
+                        if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
                             m_Stat.onAddBasket();
                             break;
                         }
@@ -713,7 +713,7 @@ namespace cds { namespace intrusive {
                         pNext = p;
                         g.assign( 0, g.template get<value_type>( 1 ) );
                     }
-                    if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+                    if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
                         m_Stat.onAdvanceTailFailed();
 
                     m_Stat.onBadTail();
index 54e928b768f13fdc537eb2d5e5ef039cf4469e26..1bb08e1b011b4d1e7eaebacfa7d6b037b55a9f09 100644 (file)
@@ -89,7 +89,7 @@ namespace cds { namespace intrusive {
                 key_infinite = key_infinite1 | key_infinite2    ///< Cumulative infinite flags
             };
 
-            unsigned int    m_nFlags    ;   ///< Internal flags
+            atomics::atomic<unsigned int>  m_nFlags    ;   ///< Internal flags
 
             /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node
             explicit basic_node( bool bInternal )
@@ -105,25 +105,26 @@ namespace cds { namespace intrusive {
             /// Checks if the node is internal
             bool is_internal() const
             {
-                return (m_nFlags & internal) != 0;
+                return (m_nFlags.load( atomics::memory_order_relaxed ) & internal) != 0;
             }
 
             /// Returns infinite key, 0 if the node is not infinite
             unsigned int infinite_key() const
             {
-                return m_nFlags & key_infinite;
+                return m_nFlags.load( atomics::memory_order_relaxed ) & key_infinite;
             }
 
             /// Sets infinite key for the node (for internal use only!!!)
             void infinite_key( int nInf )
             {
-                m_nFlags &= ~key_infinite;
+                const unsigned int nFlags = m_nFlags.load( atomics::memory_order_relaxed ) & ~key_infinite;
+                m_nFlags.store( nFlags, atomics::memory_order_relaxed );
                 switch ( nInf ) {
                 case 1:
-                    m_nFlags |= key_infinite1;
+                    m_nFlags.store( nFlags | key_infinite1, atomics::memory_order_relaxed );
                     break;
                 case 2:
-                    m_nFlags |= key_infinite2;
+                    m_nFlags.store( nFlags | key_infinite2, atomics::memory_order_relaxed );
                     break;
                 case 0:
                     break;
index 8628736529d3f4875b39e64f679a1416ac0bc958..630b2ae3a89e2d10a59469f9907ed8ee50cba127 100644 (file)
@@ -308,7 +308,6 @@ namespace cds { namespace intrusive {
                     break;
                 }
             }
-
             CDS_TSAN_ANNOTATE_IGNORE_RW_END;
         }
         //@endcond
index 3bdf12423adcbdc702597ed01b8dfbcfd07526a5..57f9c293a5ed8d2a3680be200ff1d8d672142266 100644 (file)
@@ -292,7 +292,6 @@ namespace cds { namespace intrusive {
                     break;
                 }
             }
-
             CDS_TSAN_ANNOTATE_IGNORE_RW_END;
         }
         //@endcond
index a6fcd1a216099fd8e5d4903efe8d5547be436e0b..a3ae991bb8e92ae3078ff81ba9ea8a3e4f6f8afd 100644 (file)
@@ -1236,7 +1236,10 @@ namespace cds { namespace intrusive {
                     if ( res.pGrandParent ) {
                         assert( !res.pLeaf->infinite_key() );
                         pNewInternal->infinite_key( 0 );
+                        // TSan false positive: there is the release fence below, pNewInternal is not linked yet
+                        CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                         key_extractor()(pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf ));
+                        CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
                     }
                     else {
                         assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 );
@@ -1249,7 +1252,10 @@ namespace cds { namespace intrusive {
                     assert( !res.pLeaf->is_internal() );
                     pNewInternal->infinite_key( 0 );
 
+                    // TSan false positive: there is the release fence below, pNewInternal is not linked yet
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                     key_extractor()(pNewInternal->m_Key, val);
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
                     pNewInternal->m_pLeft.store( static_cast<tree_node *>(res.pLeaf), memory_model::memory_order_relaxed );
                     pNewInternal->m_pRight.store( static_cast<tree_node *>(pNewLeaf), memory_model::memory_order_release );
                     assert( !res.pLeaf->infinite_key() );
index c56d4b4593ee2872e8025b20446488413fa330c2..404cd1893c0c3d6ecad255522b086711c8ca8ed7 100644 (file)
@@ -372,7 +372,7 @@ namespace cds { namespace intrusive {
             node_type * h;
             while ( true ) {
                 h = res.guards.protect( 0, m_pHead, node_to_value() );
-                pNext = h->m_pNext.load( memory_model::memory_order_relaxed );
+                pNext = h->m_pNext.load( memory_model::memory_order_acquire );
                 res.guards.assign( 1, node_to_value()( pNext ));
                 if ( m_pHead.load(memory_model::memory_order_acquire) != h )
                     continue;
@@ -390,7 +390,7 @@ namespace cds { namespace intrusive {
                     continue;
                 }
 
-                if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ))
+                if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
                     break;
 
                 m_Stat.onDequeueRace();
@@ -499,7 +499,7 @@ namespace cds { namespace intrusive {
             ++m_ItemCounter;
             m_Stat.onEnqueue();
 
-            if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
+            if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ))
                 m_Stat.onAdvanceTailFailed();
             return true;
         }
index b7b6601e2ed9ba5bf80416683691a6fed1c49b36..0f98f75f7988faceb9136e05257dca042703bfc7 100644 (file)
@@ -473,7 +473,7 @@ namespace cds { namespace intrusive {
                             fix_list( pTail, pHead );
                             continue;
                         }
-                        if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
+                        if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
                             // dequeue success
                             break;
                         }
index b3f244c4ee91900a67c44c58798deeab0690c64b..a84948a90312b1206441025476b5eccc9deccec5 100644 (file)
@@ -411,21 +411,12 @@ namespace cds { namespace intrusive {
 
             segment * allocate_segment()
             {
-                // TSan: release barrier will be issued when the segment will link to the list of segments
-                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-                segment * p = segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
-                    quasi_factor() );
-                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-                return p;
+                return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor() );
             }
 
             static void free_segment( segment * pSegment )
             {
-                // TSan: deallocating is called inside SMR reclamation cycle
-                // so necessary barriers have been already issued
-                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                 segment_allocator().Delete( pSegment );
-                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             }
 
             static void retire_segment( segment * pSegment )
index 0ef8a765b18614e5a666fe5c56771db9f8de5892..3ba404927bdfff5b99850752ebfbed13e295c2e7 100644 (file)
@@ -255,16 +255,11 @@ namespace cds { namespace intrusive {
         dummy_node_type * alloc_dummy_node( size_t nHash )
         {
             m_Stat.onHeadNodeAllocated();
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
-            dummy_node_type * p = dummy_node_allocator().New( nHash );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
-            return p;
+            return dummy_node_allocator().New( nHash );
         }
         void free_dummy_node( dummy_node_type * p )
         {
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
             dummy_node_allocator().Delete( p );
-            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             m_Stat.onHeadNodeFreed();
         }
 
index b2347f4552290f7eafa8e6291758d37f289571eb..74ca175cd168e6a6d07059b290efebaf1069dd27 100644 (file)
@@ -77,8 +77,9 @@ namespace cds { namespace intrusive {
 
             operation()
                 : pVal( nullptr )
-                , nStatus(0)
-            {}
+            {
+                nStatus.store( 0 /*op_free*/, atomics::memory_order_release );
+            }
         };
         //@endcond
 
@@ -329,8 +330,8 @@ namespace cds { namespace intrusive {
 
                 /// Elimination back-off data
                 struct elimination_data {
-                    elimination_random_engine   randEngine; ///< random engine
-                    collision_array             collisions; ///< collision array
+                    mutable elimination_random_engine randEngine; ///< random engine
+                    collision_array                   collisions; ///< collision array
 
                     elimination_data()
                     {
@@ -351,6 +352,18 @@ namespace cds { namespace intrusive {
 
                 typedef std::unique_lock< elimination_lock_type > slot_scoped_lock;
 
+                template <bool Exp2 = collision_array::c_bExp2>
+                typename std::enable_if< Exp2, size_t >::type slot_index() const
+                {
+                    return m_Elimination.randEngine() & (m_Elimination.collisions.capacity() - 1);
+                }
+
+                template <bool Exp2 = collision_array::c_bExp2>
+                typename std::enable_if< !Exp2, size_t >::type slot_index() const
+                {
+                    return m_Elimination.randEngine() % m_Elimination.collisions.capacity();
+                }
+
             public:
                 elimination_backoff()
                 {
@@ -377,11 +390,11 @@ namespace cds { namespace intrusive {
                 bool backoff( operation_desc& op, Stat& stat )
                 {
                     elimination_backoff_type bkoff;
-                    op.nStatus.store( op_busy, atomics::memory_order_relaxed );
+                    op.nStatus.store( op_busy, atomics::memory_order_release );
 
                     elimination_rec * myRec = cds::algo::elimination::init_record( op );
 
-                    collision_array_record& slot = m_Elimination.collisions[m_Elimination.randEngine() % m_Elimination.collisions.capacity()];
+                    collision_array_record& slot = m_Elimination.collisions[ slot_index() ];
                     {
                         slot.lock.lock();
                         elimination_rec * himRec = slot.pRec;
@@ -394,9 +407,9 @@ namespace cds { namespace intrusive {
                                 else
                                     op.pVal = himOp->pVal;
                                 slot.pRec = nullptr;
+                                himOp->nStatus.store( op_collided, atomics::memory_order_release );
                                 slot.lock.unlock();
 
-                                himOp->nStatus.store( op_collided, atomics::memory_order_release );
                                 cds::algo::elimination::clear_record();
                                 stat.onActiveCollision( op.idOp );
                                 return true;
index d82d10cd81405df022285d3d09f1872bafa2f7e0..c4336d7ac3ca196b42570cad061c62d224ebcdc2 100644 (file)
@@ -60,12 +60,17 @@ namespace michael {
         /// Allocates memory block of \p nSize bytes (\p malloc wrapper)
         static void * alloc( size_t nSize )
         {
-            return ::malloc( nSize );
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+            void * p = ::malloc( nSize );
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+            return p;
         }
         /// Returning memory block to the system (\p free wrapper)
         static void free( void * p )
         {
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
             ::free( p );
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
         }
     };
 
@@ -1221,8 +1226,10 @@ namespace michael {
                 newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
+                CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
                 pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+                CDS_TSAN_ANNOTATE_IGNORE_READS_END;
                 newAnchor.tag += 1;
 
                 if ( oldActive.credits() == 0 ) {
@@ -1303,8 +1310,10 @@ namespace michael {
                 newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
 
                 assert( oldAnchor.avail < pDesc->nCapacity );
+                CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
                 pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
                 newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+                CDS_TSAN_ANNOTATE_IGNORE_READS_END;
                 ++newAnchor.tag;
             } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
 
@@ -1341,11 +1350,13 @@ namespace michael {
             byte * pEnd = pDesc->pSB + pDesc->nCapacity * pDesc->nBlockSize;
             unsigned int nNext = 0;
             const unsigned int nBlockSize = pDesc->nBlockSize;
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
             for ( byte * p = pDesc->pSB; p < pEnd; p += nBlockSize ) {
                 reinterpret_cast<block_header *>( p )->set( pDesc, 0 );
                 reinterpret_cast<free_block_header *>( p )->nNextFree = ++nNext;
             }
             reinterpret_cast<free_block_header *>( pEnd - nBlockSize )->nNextFree = 0;
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
 
             active_tag newActive;
             newActive.set( pDesc, ( (pDesc->nCapacity - 1 < active_tag::c_nMaxCredits) ? pDesc->nCapacity - 1 : active_tag::c_nMaxCredits ) - 1 );
index b081f93efa9bade9ddbb29a91d7dc3f20bd2cca5..9dca22ff3ed17783d4f30dd3d2f4e8848648af74 100644 (file)
@@ -52,8 +52,8 @@ namespace cds { namespace opt {
         {
         public:
             typedef T   value_type  ;   ///< value type
-            static const size_t c_nCapacity = Capacity ;    ///< Capacity
-            static const bool c_bExp2 = Exp2; ///< \p Exp2 flag
+            static CDS_CONSTEXPR const size_t c_nCapacity = Capacity ;    ///< Capacity
+            static CDS_CONSTEXPR const bool c_bExp2 = Exp2; ///< \p Exp2 flag
 
             /// Rebind buffer for other template parameters
             template <typename Q, size_t Capacity2 = c_nCapacity, bool Exp22 = c_bExp2>
index d53e8596eaa29198e06bd35c1e4286f16a7df1e0..7a1f47cf814f69aef2fe86e0a9e1f8884647735a 100644 (file)
@@ -137,14 +137,14 @@ namespace pqueue {
             }
         };
 
-        size_t  m_nPusherCount;
+        atomics::atomic<size_t>  m_nPusherCount;
         void end_pusher()
         {
-            --m_nPusherCount;
+            m_nPusherCount.fetch_sub( 1, atomics::memory_order_relaxed );
         }
         bool pushing() const
         {
-            return m_nPusherCount != 0;
+            return m_nPusherCount.load( atomics::memory_order_relaxed ) != 0;
         }
 
     protected:
@@ -179,7 +179,7 @@ namespace pqueue {
 
             pool.add( new Popper<PQueue>( pool, testQueue ), s_nPopThreadCount );
 
-            m_nPusherCount = s_nPushThreadCount;
+            m_nPusherCount.store( s_nPushThreadCount, atomics::memory_order_release );
             CPPUNIT_MSG( "   push thread count=" << s_nPushThreadCount << " pop thread count=" << s_nPopThreadCount
                 << ", item count=" << nThreadItemCount * s_nPushThreadCount << " ..." );
             pool.run();