// Thread Sanitizer annotations.
// From https://groups.google.com/d/msg/thread-sanitizer/SsrHB7FTnTk/mNTGNLQj-9cJ
+//@cond
+
#ifdef CDS_THREAD_SANITIZER_ENABLED
# define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, reinterpret_cast<void*>(addr))\r
# define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast<void*>(addr))\r
void AnnotateIgnoreWritesBegin(const char *f, int l);\r
void AnnotateIgnoreWritesEnd(const char *f, int l);\r
}\r
-#else\r
+\r
+#else // CDS_THREAD_SANITIZER_ENABLED\r
+\r
# define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr)\r
# define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr)
# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END\r
# define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN\r
# define CDS_TSAN_ANNOTATE_IGNORE_RW_END\r
+\r
#endif
+//@endcond
#endif // #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H
{
assert( pRec );
+ // this function is called under FC mutex, so switch TSan off
+ CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
switch ( pRec->op() ) {
case op_enq:
assert( pRec->pValEnq );
assert(false);
break;
}
+ CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
/// Batch-processing flat combining
void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd )
{
typedef typename fc_kernel::iterator fc_iterator;
+
+ // this function is called under FC mutex, so switch TSan off
+ CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) {
switch ( it->op() ) {
case op_enq:
break;
}
}
+ CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
//@endcond
{
// this function is called under FC mutex, so switch TSan off
CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN;
+
typedef typename fc_kernel::iterator fc_iterator;
for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) {
switch ( it->op() ) {
template <typename K>
static node_type * alloc_node(const K& key)
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- node_type * p = cxx_allocator().New( key );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return cxx_allocator().New( key );
}
template <typename K, typename V>
static node_type * alloc_node( const K& key, const V& val )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- node_type * p = cxx_allocator().New( key, val );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return cxx_allocator().New( key, val );
}
template <typename... Args>
static node_type * alloc_node( Args&&... args )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- node_type * p = cxx_allocator().MoveNew( std::forward<Args>(args)... );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return cxx_allocator().MoveNew( std::forward<Args>(args)... );
}
static void free_node( node_type * pNode )
{
void operator ()( node_type * pNode )
{
- // TSan false positive possible
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
cxx_allocator().Delete( pNode );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
};
template <typename Q>
static node_type * alloc_node( Q const& v )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- node_type * p = cxx_node_allocator().New( v );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return cxx_node_allocator().New( v );
}
template <typename... Args>
static node_type * alloc_node( Args&&... args )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- node_type * p = cxx_node_allocator().MoveNew( std::forward<Args>(args)...);
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return cxx_node_allocator().MoveNew( std::forward<Args>(args)...);
}
static void free_node( node_type * pNode )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
cxx_node_allocator().Delete( pNode );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
struct node_disposer {
, typename Alloc::template rebind<T>::other
>::type allocator_type;
+ /// \p true if underlined allocator is \p std::allocator, \p false otherwise
+ static CDS_CONSTEXPR bool const c_bStdAllocator = std::is_same< allocator_type, std::allocator<T>>::value;
+
/// Element type
typedef T value_type;
template <typename... S>
value_type * New( S const&... src )
{
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ }
+ value_type * pv = Construct( allocator_type::allocate(1), src... );
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+ return pv;
+# else
return Construct( allocator_type::allocate(1), src... );
+# endif
}
/// Analogue of <tt>operator new T( std::forward<Args>(args)... )</tt> (move semantics)
template <typename... Args>
value_type * MoveNew( Args&&... args )
{
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ }
+ value_type * pv = MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+ return pv;
+# else
return MoveConstruct( allocator_type::allocate(1), std::forward<Args>(args)... );
+# endif
}
/// Analogue of operator new T[\p nCount ]
value_type * NewArray( size_t nCount )
{
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ }
+# endif
value_type * p = allocator_type::allocate( nCount );
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+# endif
for ( size_t i = 0; i < nCount; ++i )
Construct( p + i );
return p;
template <typename S>
value_type * NewArray( size_t nCount, S const& src )
{
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ }
+# endif
value_type * p = allocator_type::allocate( nCount );
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+# endif
for ( size_t i = 0; i < nCount; ++i )
Construct( p + i, src );
return p;
/// Analogue of operator delete
void Delete( value_type * p )
{
+ // TSan false positive possible
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
allocator_type::destroy( p );
allocator_type::deallocate( p, 1 );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
/// Analogue of operator delete []
void Delete( value_type * p, size_t nCount )
{
+ // TSan false positive possible
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
for ( size_t i = 0; i < nCount; ++i )
allocator_type::destroy( p + i );
allocator_type::deallocate( p, nCount );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
# if CDS_COMPILER == CDS_COMPILER_INTEL
template <typename... S>
value_type * Construct( void * p, S const&... src )
{
- return new( p ) value_type( src... );
+ // TSan false positive possible
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ value_type * pv = new( p ) value_type( src... );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ return pv;
}
/// Analogue of placement <tt>operator new( p ) T( std::forward<Args>(args)... )</tt>
template <typename... Args>
value_type * MoveConstruct( void * p, Args&&... args )
{
- return new( p ) value_type( std::forward<Args>(args)... );
+ // TSan false positive possible
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ value_type * pv = new( p ) value_type( std::forward<Args>(args)... );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ return pv;
}
/// Rebinds allocator to other type \p Q instead of \p T
size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *);
typedef typename allocator_type::template rebind< void * >::other void_allocator;
- return void_allocator().allocate( nPtrSize );
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ }
+# endif
+ void * p = void_allocator().allocate( nPtrSize );
+# if CDS_THREAD_SANITIZER_ENABLED
+ if ( c_bStdAllocator ) {
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ }
+# endif
+ return p;
}
//@endcond
};
*/
static void free( T * p )
{
- Allocator<T, Alloc> a;
+ Allocator<type, allocator_type> a;
a.Delete( p );
}
};
}
}
- m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed );
+ m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed );
}
else {
marked_ptr iter( h );
else if ( bDeque ) {
res.pNext = pNext.ptr();
- if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
+ if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ) {
if ( hops >= m_nMaxHops )
free_chain( h, pNext );
break;
{
// "head" and "newHead" are guarded
- if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+ if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
typename gc::template GuardArray<2> guards;
guards.assign( 0, node_traits::to_value_ptr(head.ptr()) );
if ( pNext.ptr() == nullptr ) {
pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release );
- if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ) ) {
- if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+ if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
+ if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ))
m_Stat.onAdvanceTailFailed();
break;
}
{
bkoff();
pNew->m_pNext.store( pNext, memory_model::memory_order_release );
- if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, memory_model::memory_order_relaxed )) {
+ if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
m_Stat.onAddBasket();
break;
}
pNext = p;
g.assign( 0, g.template get<value_type>( 1 ) );
}
- if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, memory_model::memory_order_relaxed ))
+ if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr() ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
m_Stat.onAdvanceTailFailed();
m_Stat.onBadTail();
key_infinite = key_infinite1 | key_infinite2 ///< Cumulative infinite flags
};
- unsigned int m_nFlags ; ///< Internal flags
+ atomics::atomic<unsigned int> m_nFlags ; ///< Internal flags
/// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node
explicit basic_node( bool bInternal )
/// Checks if the node is internal
bool is_internal() const
{
- return (m_nFlags & internal) != 0;
+ return (m_nFlags.load( atomics::memory_order_relaxed ) & internal) != 0;
}
/// Returns infinite key, 0 if the node is not infinite
unsigned int infinite_key() const
{
- return m_nFlags & key_infinite;
+ return m_nFlags.load( atomics::memory_order_relaxed ) & key_infinite;
}
/// Sets infinite key for the node (for internal use only!!!)
void infinite_key( int nInf )
{
- m_nFlags &= ~key_infinite;
+ const unsigned int nFlags = m_nFlags.load( atomics::memory_order_relaxed ) & ~key_infinite;
+ m_nFlags.store( nFlags, atomics::memory_order_relaxed );
switch ( nInf ) {
case 1:
- m_nFlags |= key_infinite1;
+ m_nFlags.store( nFlags | key_infinite1, atomics::memory_order_relaxed );
break;
case 2:
- m_nFlags |= key_infinite2;
+ m_nFlags.store( nFlags | key_infinite2, atomics::memory_order_relaxed );
break;
case 0:
break;
break;
}
}
-
CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
//@endcond
break;
}
}
-
CDS_TSAN_ANNOTATE_IGNORE_RW_END;
}
//@endcond
if ( res.pGrandParent ) {
assert( !res.pLeaf->infinite_key() );
pNewInternal->infinite_key( 0 );
+ // TSan false positive: there is the release fence below, pNewInternal is not linked yet
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
key_extractor()(pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf ));
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
else {
assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 );
assert( !res.pLeaf->is_internal() );
pNewInternal->infinite_key( 0 );
+ // TSan false positive: there is the release fence below, pNewInternal is not linked yet
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
key_extractor()(pNewInternal->m_Key, val);
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
pNewInternal->m_pLeft.store( static_cast<tree_node *>(res.pLeaf), memory_model::memory_order_relaxed );
pNewInternal->m_pRight.store( static_cast<tree_node *>(pNewLeaf), memory_model::memory_order_release );
assert( !res.pLeaf->infinite_key() );
node_type * h;
while ( true ) {
h = res.guards.protect( 0, m_pHead, node_to_value() );
- pNext = h->m_pNext.load( memory_model::memory_order_relaxed );
+ pNext = h->m_pNext.load( memory_model::memory_order_acquire );
res.guards.assign( 1, node_to_value()( pNext ));
if ( m_pHead.load(memory_model::memory_order_acquire) != h )
continue;
continue;
}
- if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
break;
m_Stat.onDequeueRace();
++m_ItemCounter;
m_Stat.onEnqueue();
- if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
+ if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ))
m_Stat.onAdvanceTailFailed();
return true;
}
fix_list( pTail, pHead );
continue;
}
- if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
+ if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
// dequeue success
break;
}
segment * allocate_segment()
{
- // TSan: release barrier will be issued when the segment will link to the list of segments
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- segment * p = segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor,
- quasi_factor() );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor() );
}
static void free_segment( segment * pSegment )
{
- // TSan: deallocating is called inside SMR reclamation cycle
- // so necessary barriers have been already issued
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
segment_allocator().Delete( pSegment );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
static void retire_segment( segment * pSegment )
dummy_node_type * alloc_dummy_node( size_t nHash )
{
m_Stat.onHeadNodeAllocated();
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
- dummy_node_type * p = dummy_node_allocator().New( nHash );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
- return p;
+ return dummy_node_allocator().New( nHash );
}
void free_dummy_node( dummy_node_type * p )
{
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
dummy_node_allocator().Delete( p );
- CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
m_Stat.onHeadNodeFreed();
}
operation()
: pVal( nullptr )
- , nStatus(0)
- {}
+ {
+ nStatus.store( 0 /*op_free*/, atomics::memory_order_release );
+ }
};
//@endcond
/// Elimination back-off data
struct elimination_data {
- elimination_random_engine randEngine; ///< random engine
- collision_array collisions; ///< collision array
+ mutable elimination_random_engine randEngine; ///< random engine
+ collision_array collisions; ///< collision array
elimination_data()
{
typedef std::unique_lock< elimination_lock_type > slot_scoped_lock;
+ template <bool Exp2 = collision_array::c_bExp2>
+ typename std::enable_if< Exp2, size_t >::type slot_index() const
+ {
+ return m_Elimination.randEngine() & (m_Elimination.collisions.capacity() - 1);
+ }
+
+ template <bool Exp2 = collision_array::c_bExp2>
+ typename std::enable_if< !Exp2, size_t >::type slot_index() const
+ {
+ return m_Elimination.randEngine() % m_Elimination.collisions.capacity();
+ }
+
public:
elimination_backoff()
{
bool backoff( operation_desc& op, Stat& stat )
{
elimination_backoff_type bkoff;
- op.nStatus.store( op_busy, atomics::memory_order_relaxed );
+ op.nStatus.store( op_busy, atomics::memory_order_release );
elimination_rec * myRec = cds::algo::elimination::init_record( op );
- collision_array_record& slot = m_Elimination.collisions[m_Elimination.randEngine() % m_Elimination.collisions.capacity()];
+ collision_array_record& slot = m_Elimination.collisions[ slot_index() ];
{
slot.lock.lock();
elimination_rec * himRec = slot.pRec;
else
op.pVal = himOp->pVal;
slot.pRec = nullptr;
+ himOp->nStatus.store( op_collided, atomics::memory_order_release );
slot.lock.unlock();
- himOp->nStatus.store( op_collided, atomics::memory_order_release );
cds::algo::elimination::clear_record();
stat.onActiveCollision( op.idOp );
return true;
/// Allocates memory block of \p nSize bytes (\p malloc wrapper)
static void * alloc( size_t nSize )
{
- return ::malloc( nSize );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
+ void * p = ::malloc( nSize );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
+ return p;
}
/// Returning memory block to the system (\p free wrapper)
static void free( void * p )
{
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
::free( p );
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
}
};
newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
+ CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+ CDS_TSAN_ANNOTATE_IGNORE_READS_END;
newAnchor.tag += 1;
if ( oldActive.credits() == 0 ) {
newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
+ CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
+ CDS_TSAN_ANNOTATE_IGNORE_READS_END;
++newAnchor.tag;
} while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
byte * pEnd = pDesc->pSB + pDesc->nCapacity * pDesc->nBlockSize;
unsigned int nNext = 0;
const unsigned int nBlockSize = pDesc->nBlockSize;
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
for ( byte * p = pDesc->pSB; p < pEnd; p += nBlockSize ) {
reinterpret_cast<block_header *>( p )->set( pDesc, 0 );
reinterpret_cast<free_block_header *>( p )->nNextFree = ++nNext;
}
reinterpret_cast<free_block_header *>( pEnd - nBlockSize )->nNextFree = 0;
+ CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
active_tag newActive;
newActive.set( pDesc, ( (pDesc->nCapacity - 1 < active_tag::c_nMaxCredits) ? pDesc->nCapacity - 1 : active_tag::c_nMaxCredits ) - 1 );
{
public:
typedef T value_type ; ///< value type
- static const size_t c_nCapacity = Capacity ; ///< Capacity
- static const bool c_bExp2 = Exp2; ///< \p Exp2 flag
+ static CDS_CONSTEXPR const size_t c_nCapacity = Capacity ; ///< Capacity
+ static CDS_CONSTEXPR const bool c_bExp2 = Exp2; ///< \p Exp2 flag
/// Rebind buffer for other template parameters
template <typename Q, size_t Capacity2 = c_nCapacity, bool Exp22 = c_bExp2>
}
};
- size_t m_nPusherCount;
+ atomics::atomic<size_t> m_nPusherCount;
void end_pusher()
{
- --m_nPusherCount;
+ m_nPusherCount.fetch_sub( 1, atomics::memory_order_relaxed );
}
bool pushing() const
{
- return m_nPusherCount != 0;
+ return m_nPusherCount.load( atomics::memory_order_relaxed ) != 0;
}
protected:
pool.add( new Popper<PQueue>( pool, testQueue ), s_nPopThreadCount );
- m_nPusherCount = s_nPushThreadCount;
+ m_nPusherCount.store( s_nPushThreadCount, atomics::memory_order_release );
CPPUNIT_MSG( " push thread count=" << s_nPushThreadCount << " pop thread count=" << s_nPopThreadCount
<< ", item count=" << nThreadItemCount * s_nPushThreadCount << " ..." );
pool.run();