publication_record * p = m_pHead->pNext.load(memory_model::memory_order_relaxed);
if ( p != static_cast<publication_record *>( pRec )) {
do {
- pRec->pNext = p;
+ pRec->pNext.store( p, memory_model::memory_order_relaxed );
// Failed CAS changes p
} while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast<publication_record *>(pRec),
- memory_model::memory_order_release, atomics::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_acquire ));
m_Stat.onActivatePubRecord();
}
}
if ( pPrev ) {
publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
- memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
free_publication_record( static_cast<publication_record_type *>( p ));
m_Stat.onDeletePubRecord();
do {
pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
pGuard->init();
return pGuard;
do {
node.m_pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_acquire ));
return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
}
do {
pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_acquire ));
return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
}
do {
pNew->pNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
+ } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_acquire ));
}
// links block's items to the free list
do {
pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
// pHead is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_acquire ));
}
}
// pItem is changed by compare_exchange_weak
} while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
pItem->m_pNextFree.load(atomics::memory_order_acquire),
- atomics::memory_order_acquire, atomics::memory_order_relaxed ));
+ atomics::memory_order_acquire, atomics::memory_order_acquire ));
success:
CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
link_checker::is_empty( pNew );
typename gc::Guard guard;
+ typename gc::Guard gNext;
back_off bkoff;
marked_ptr t;
marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire );
if ( pNext.ptr() == nullptr ) {
- pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release );
+ pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed );
if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
- if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed ))
+ if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_acquire ))
m_Stat.onAdvanceTailFailed();
break;
}
m_Stat.onTryAddBasket();
// Reread tail next
- typename gc::Guard gNext;
-
try_again:
pNext = gNext.protect( t->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());});
atomics::atomic<base_class *> m_pRight; ///< Right subtree
atomics::atomic<update_ptr> m_pUpdate; ///< Update descriptor
//@cond
- uintptr_t m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4
+ atomics::atomic<uintptr_t> m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4
//@endcond
/// Default ctor
//@cond
update_ptr null_update_desc()
{
- return update_ptr( reinterpret_cast<update_desc_type *>( (++m_nEmptyUpdate << 2) & 0xFFFF ));
+ return update_ptr( reinterpret_cast<update_desc_type *>( (m_nEmptyUpdate.fetch_add(1, atomics::memory_order_relaxed) << 2) & 0xFFFF ));
}
base_class * get_child( bool bRight, atomics::memory_order mo ) const
guards.assign( 1, &val );
node_type * pTail = guards.protect( 0, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);} ); // Read the tail
while( true ) {
- pNew->m_pNext.store( pTail, memory_model::memory_order_release );
- if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Try to CAS the tail
+ pNew->m_pNext.store( pTail, memory_model::memory_order_relaxed );
+ if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { // Try to CAS the tail
pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev
++m_ItemCounter;
m_Stat.onEnqueue();
public:
/// Construct free (unlocked) spin-lock
spin_lock() CDS_NOEXCEPT
+ : m_spin( false )
# ifdef CDS_DEBUG
- :m_dbgOwnerId( OS::c_NullThreadId )
+ , m_dbgOwnerId( OS::c_NullThreadId )
# endif
- {
- CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin );
- m_spin.store( false, atomics::memory_order_relaxed );
- }
+ {}
/// Construct spin-lock in specified state
/**
: m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
# endif
{
- CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin );
m_spin.store( bLocked, atomics::memory_order_relaxed );
-# ifdef CDS_THREAD_SANITIZER_ENABLED
- if ( bLocked )
- CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
-# endif
}
/// Dummy copy constructor
# ifdef CDS_DEBUG
, m_dbgOwnerId( cds::OS::c_NullThreadId )
# endif
- {
- CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin );
- }
+ {}
/// Destructor. On debug time it checks whether spin-lock is free
~spin_lock()
{
assert( !m_spin.load( atomics::memory_order_relaxed ));
- CDS_TSAN_ANNOTATE_MUTEX_DESTROY( &m_spin );
}
/// Check if the spin is locked
{
bool bCurrent = false;
-# ifdef CDS_THREAD_SANITIZER_ENABLED
- if ( m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
- CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
- }
-# else
- m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
-# endif
+ m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_acquire );
CDS_DEBUG_ONLY(
if ( !bCurrent ) {
assert( m_dbgOwnerId == OS::get_current_thread_id());
CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
- CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
m_spin.store( false, atomics::memory_order_release );
}
};
bool try_acquire() CDS_NOEXCEPT
{
integral_type nCurrent = 0;
-# ifdef CDS_THREAD_SANITIZER_ENABLED
- if ( m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
- CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
- return true;
- }
- return false;
-# else
- return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
-# endif
+ return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_acquire );
}
bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
reentrant_spin_lock() CDS_NOEXCEPT
: m_spin(0)
, m_OwnerId( OS::c_NullThreadId )
- {
- CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin );
- }
+ {}
/// Dummy copy constructor
/**
reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
: m_spin(0)
, m_OwnerId( OS::c_NullThreadId )
- {
- CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin );
- }
+ {}
/// Construct object in specified state
explicit reentrant_spin_lock( bool bLocked )
: m_spin(0)
, m_OwnerId( OS::c_NullThreadId )
{
- CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin );
if ( bLocked )
lock();
}
m_spin.store( n - 1, atomics::memory_order_relaxed );
else {
free();
- CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
m_spin.store( 0, atomics::memory_order_release );
}
return true;
thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire );
do {
pRec->m_list.m_pNext = pOldHead;
- CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &( pRec->m_list.m_pNext ));
- } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
return pRec;
}
hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
do {
- // TSan: Next CAS release orders the memory
- CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(&hprec->m_pNextNode );
hprec->m_pNextNode = pOldHead;
- } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
return hprec;
}