TSan exam: fixed data races and false positives in queues
authorkhizmax <libcds.dev@gmail.com>
Thu, 30 Apr 2015 20:08:09 +0000 (23:08 +0300)
committerkhizmax <libcds.dev@gmail.com>
Thu, 30 Apr 2015 20:08:09 +0000 (23:08 +0300)
cds/container/msqueue.h
cds/intrusive/fcqueue.h
cds/intrusive/segmented_queue.h
cds/memory/michael/allocator.h
tests/unit/queue/intrusive_queue_reader_writer.cpp

index 1d355e97fc218f8d619a26beaf380fc8f04cf67b..3292ff916c674d52d07e6b532470ec0da090aacb 100644 (file)
@@ -117,7 +117,10 @@ namespace cds { namespace container {
             {
                 void operator ()( node_type * pNode )
                 {
+                    // TSan false positive possible
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                     cxx_allocator().Delete( pNode );
+                    CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
                 }
             };
 
index 642e4fc00136d1b517074558722917b2e73942f6..8628736529d3f4875b39e64f679a1416ac0bc958 100644 (file)
@@ -260,6 +260,9 @@ namespace cds { namespace intrusive {
         {
             assert( pRec );
 
+            // this function is called under FC mutex, so switch TSan off
+            CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
             switch ( pRec->op() ) {
             case op_enq:
                 assert( pRec->pVal );
@@ -282,11 +285,15 @@ namespace cds { namespace intrusive {
                 assert(false);
                 break;
             }
+            CDS_TSAN_ANNOTATE_IGNORE_RW_END;
         }
 
         /// Batch-processing flat combining
         void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd )
         {
+            // this function is called under FC mutex, so switch TSan off
+            CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
             typedef typename fc_kernel::iterator fc_iterator;
             for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) {
                 switch ( it->op() ) {
@@ -301,6 +308,8 @@ namespace cds { namespace intrusive {
                     break;
                 }
             }
+
+            CDS_TSAN_ANNOTATE_IGNORE_RW_END;
         }
         //@endcond
 
index c8e1430dbe08f678f0bb4c4c32f967e58a3e6f57..b3f244c4ee91900a67c44c58798deeab0690c64b 100644 (file)
@@ -352,7 +352,7 @@ namespace cds { namespace intrusive {
                 m_Stat.onSegmentCreated();
 
                 if ( m_List.empty() )
-                    m_pHead.store( pNew, memory_model::memory_order_relaxed );
+                    m_pHead.store( pNew, memory_model::memory_order_release );
                 m_List.push_back( *pNew );
                 m_pTail.store( pNew, memory_model::memory_order_release );
                 return guard.assign( pNew );
@@ -411,13 +411,21 @@ namespace cds { namespace intrusive {
 
             segment * allocate_segment()
             {
-                return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
+                // TSan: release barrier will be issued when the segment will link to the list of segments
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+                segment * p = segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
                     quasi_factor() );
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+                return p;
             }
 
             static void free_segment( segment * pSegment )
             {
+                // TSan: deallocating is called inside SMR reclamation cycle
+                // so necessary barriers have been already issued
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
                 segment_allocator().Delete( pSegment );
+                CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             }
 
             static void retire_segment( segment * pSegment )
index 5b14d48c2ff7eb54514b05c183f725ba26842c84..d82d10cd81405df022285d3d09f1872bafa2f7e0 100644 (file)
@@ -1450,6 +1450,9 @@ namespace michael {
 
             static_assert( (sizeof(processor_heap) % c_nAlignment) == 0, "sizeof(processor_heap) error" );
 
+            // TSan false positive: a new descriptor will be linked further with release fence
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+
             pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc;
 
             pDesc->pageHeaps = reinterpret_cast<page_heap *>( pDesc + 1 );
@@ -1474,6 +1477,7 @@ namespace michael {
                 else
                     pProcHeap->nPageIdx = pProcHeap->pSizeClass->nSBSizeIdx;
             }
+            CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
             return pDesc;
         }
 
index b5ad38c48f5e01b86e3f1dca14ad25c5f3ac1438..a4f0e734cc5e740f1d3d59dd00f47dc5a79e1602 100644 (file)
@@ -168,12 +168,14 @@ namespace queue {
                 while ( true ) {
                     typename Queue::value_type * p = m_Queue.pop();
                     if ( p ) {
+                        CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
                         p->nConsumer = m_nThreadNo;
                         ++m_nPopped;
                         if ( p->nWriterNo < nTotalWriters )
                             m_WriterData[ p->nWriterNo ].push_back( p->nNo );
                         else
                             ++m_nBadWriter;
+                        CDS_TSAN_ANNOTATE_IGNORE_RW_END;
                     }
                     else {
                         ++m_nPopEmpty;