{
void operator ()( node_type * pNode )
{
+ // TSan false positive possible
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
cxx_allocator().Delete( pNode );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
};
{
assert( pRec );
+ // this function is called under FC mutex, so switch TSan off
+ CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
switch ( pRec->op() ) {
case op_enq:
assert( pRec->pVal );
assert(false);
break;
}
+ CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
/// Batch-processing flat combining
void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd )
{
+ // this function is called under FC mutex, so switch TSan off
+ CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
typedef typename fc_kernel::iterator fc_iterator;
for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) {
switch ( it->op() ) {
break;
}
}
+
+ CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
//@endcond
m_Stat.onSegmentCreated();
if ( m_List.empty() )
- m_pHead.store( pNew, memory_model::memory_order_relaxed );
+ m_pHead.store( pNew, memory_model::memory_order_release );
m_List.push_back( *pNew );
m_pTail.store( pNew, memory_model::memory_order_release );
return guard.assign( pNew );
segment * allocate_segment()
{
- return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
+ // TSan: release barrier will be issued when the segment will link to the list of segments
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ segment * p = segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
quasi_factor() );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ return p;
}
static void free_segment( segment * pSegment )
{
+ // TSan: deallocating is called inside SMR reclamation cycle
+ // so necessary barriers have been already issued
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
segment_allocator().Delete( pSegment );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
static void retire_segment( segment * pSegment )
static_assert( (sizeof(processor_heap) % c_nAlignment) == 0, "sizeof(processor_heap) error" );
+ // TSan false positive: a new descriptor will be linked further with release fence
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+
pDesc = new( m_AlignedHeap.alloc( szTotal, c_nAlignment ) ) processor_desc;
pDesc->pageHeaps = reinterpret_cast<page_heap *>( pDesc + 1 );
else
pProcHeap->nPageIdx = pProcHeap->pSizeClass->nSBSizeIdx;
}
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
return pDesc;
}
while ( true ) {
typename Queue::value_type * p = m_Queue.pop();
if ( p ) {
+ CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
p->nConsumer = m_nThreadNo;
++m_nPopped;
if ( p->nWriterNo < nTotalWriters )
m_WriterData[ p->nWriterNo ].push_back( p->nNo );
else
++m_nBadWriter;
+ CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
else {
++m_nPopEmpty;