From 29ff2a580fe96e2a4cb21d1abdd089002d35e6b2 Mon Sep 17 00:00:00 2001 From: khizmax Date: Wed, 30 Nov 2016 16:11:28 +0300 Subject: [PATCH] [TSan] Fixed memory ordering --- cds/algo/flat_combining/kernel.h | 6 +-- cds/gc/details/dhp.h | 12 +++--- cds/intrusive/basket_queue.h | 7 ++-- cds/intrusive/details/ellen_bintree_base.h | 4 +- cds/intrusive/optimistic_queue.h | 4 +- cds/sync/spinlock.h | 47 ++++------------------ cds/urcu/details/base.h | 3 +- src/hp_gc.cpp | 4 +- 8 files changed, 26 insertions(+), 61 deletions(-) diff --git a/cds/algo/flat_combining/kernel.h b/cds/algo/flat_combining/kernel.h index aa240004..b0d8db43 100644 --- a/cds/algo/flat_combining/kernel.h +++ b/cds/algo/flat_combining/kernel.h @@ -598,10 +598,10 @@ namespace cds { namespace algo { publication_record * p = m_pHead->pNext.load(memory_model::memory_order_relaxed); if ( p != static_cast( pRec )) { do { - pRec->pNext = p; + pRec->pNext.store( p, memory_model::memory_order_relaxed ); // Failed CAS changes p } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), - memory_model::memory_order_release, atomics::memory_order_relaxed )); + memory_model::memory_order_release, atomics::memory_order_acquire )); m_Stat.onActivatePubRecord(); } } @@ -810,7 +810,7 @@ namespace cds { namespace algo { if ( pPrev ) { publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); if ( pPrev->pNext.compare_exchange_strong( p, pNext, - memory_model::memory_order_release, atomics::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { free_publication_record( static_cast( p )); m_Stat.onDeletePubRecord(); diff --git a/cds/gc/details/dhp.h b/cds/gc/details/dhp.h index 5f03cc8f..78ad05f3 100644 --- a/cds/gc/details/dhp.h +++ b/cds/gc/details/dhp.h @@ -160,7 +160,7 @@ namespace cds { namespace gc { do { pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed ); // pHead is changed by compare_exchange_weak - } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed )); + } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_acq_rel, atomics::memory_order_acquire )); pGuard->init(); return pGuard; @@ -303,7 +303,7 @@ namespace cds { namespace gc { do { node.m_pNext.store( pHead, atomics::memory_order_relaxed ); // pHead is changed by compare_exchange_weak - } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed )); + } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_acquire )); return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1; } @@ -318,7 +318,7 @@ namespace cds { namespace gc { do { pLast->m_pNext.store( pHead, atomics::memory_order_relaxed ); // pHead is changed by compare_exchange_weak - } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed )); + } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_acquire )); return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1; } @@ -399,7 +399,7 @@ namespace cds { namespace gc { do { pNew->pNext.store( pHead, atomics::memory_order_relaxed ); // pHead is changed by compare_exchange_weak - } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed )); + } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_acquire )); } // links block's items to the free list @@ -408,7 +408,7 @@ namespace cds { namespace gc { do { pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release ); // pHead is changed by compare_exchange_weak - } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed )); + } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_acquire )); } } @@ -485,7 +485,7 @@ namespace cds { namespace gc { // pItem is changed by compare_exchange_weak } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree.load(atomics::memory_order_acquire), - atomics::memory_order_acquire, atomics::memory_order_relaxed )); + atomics::memory_order_acquire, atomics::memory_order_acquire )); success: CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed )); diff --git a/cds/intrusive/basket_queue.h b/cds/intrusive/basket_queue.h index f2e8e82a..d1b77002 100644 --- a/cds/intrusive/basket_queue.h +++ b/cds/intrusive/basket_queue.h @@ -656,6 +656,7 @@ namespace cds { namespace intrusive { link_checker::is_empty( pNew ); typename gc::Guard guard; + typename gc::Guard gNext; back_off bkoff; marked_ptr t; @@ -665,9 +666,9 @@ namespace cds { namespace intrusive { marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_acquire ); if ( pNext.ptr() == nullptr ) { - pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_release ); + pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) { - if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) + if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_acquire )) m_Stat.onAdvanceTailFailed(); break; } @@ -676,8 +677,6 @@ namespace cds { namespace intrusive { m_Stat.onTryAddBasket(); // Reread tail next - typename gc::Guard gNext; - try_again: pNext = gNext.protect( t->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); diff --git a/cds/intrusive/details/ellen_bintree_base.h b/cds/intrusive/details/ellen_bintree_base.h index a3398f3b..efef4eec 100644 --- a/cds/intrusive/details/ellen_bintree_base.h +++ b/cds/intrusive/details/ellen_bintree_base.h @@ -232,7 +232,7 @@ namespace cds { namespace intrusive { atomics::atomic m_pRight; ///< Right subtree atomics::atomic m_pUpdate; ///< Update descriptor //@cond - uintptr_t m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4 + atomics::atomic m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4 //@endcond /// Default ctor @@ -247,7 +247,7 @@ namespace cds { namespace intrusive { //@cond update_ptr null_update_desc() { - return update_ptr( reinterpret_cast( (++m_nEmptyUpdate << 2) & 0xFFFF )); + return update_ptr( reinterpret_cast( (m_nEmptyUpdate.fetch_add(1, atomics::memory_order_relaxed) << 2) & 0xFFFF )); } base_class * get_child( bool bRight, atomics::memory_order mo ) const diff --git a/cds/intrusive/optimistic_queue.h b/cds/intrusive/optimistic_queue.h index c8bd065a..63c9f6ee 100644 --- a/cds/intrusive/optimistic_queue.h +++ b/cds/intrusive/optimistic_queue.h @@ -613,8 +613,8 @@ namespace cds { namespace intrusive { guards.assign( 1, &val ); node_type * pTail = guards.protect( 0, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);} ); // Read the tail while( true ) { - pNew->m_pNext.store( pTail, memory_model::memory_order_release ); - if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Try to CAS the tail + pNew->m_pNext.store( pTail, memory_model::memory_order_relaxed ); + if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { // Try to CAS the tail pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev ++m_ItemCounter; m_Stat.onEnqueue(); diff --git a/cds/sync/spinlock.h b/cds/sync/spinlock.h index 72941c14..94954db2 100644 --- a/cds/sync/spinlock.h +++ b/cds/sync/spinlock.h @@ -76,13 +76,11 @@ namespace cds { public: /// Construct free (unlocked) spin-lock spin_lock() CDS_NOEXCEPT + : m_spin( false ) # ifdef CDS_DEBUG - :m_dbgOwnerId( OS::c_NullThreadId ) + , m_dbgOwnerId( OS::c_NullThreadId ) # endif - { - CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin ); - m_spin.store( false, atomics::memory_order_relaxed ); - } + {} /// Construct spin-lock in specified state /** @@ -93,12 +91,7 @@ namespace cds { : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId ) # endif { - CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin ); m_spin.store( bLocked, atomics::memory_order_relaxed ); -# ifdef CDS_THREAD_SANITIZER_ENABLED - if ( bLocked ) - CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin ); -# endif } /// Dummy copy constructor @@ -112,15 +105,12 @@ namespace cds { # ifdef CDS_DEBUG , m_dbgOwnerId( cds::OS::c_NullThreadId ) # endif - { - CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin ); - } + {} /// Destructor. On debug time it checks whether spin-lock is free ~spin_lock() { assert( !m_spin.load( atomics::memory_order_relaxed )); - CDS_TSAN_ANNOTATE_MUTEX_DESTROY( &m_spin ); } /// Check if the spin is locked @@ -140,13 +130,7 @@ namespace cds { { bool bCurrent = false; -# ifdef CDS_THREAD_SANITIZER_ENABLED - if ( m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed )) { - CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin ); - } -# else - m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed ); -# endif + m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_acquire ); CDS_DEBUG_ONLY( if ( !bCurrent ) { @@ -197,7 +181,6 @@ namespace cds { assert( m_dbgOwnerId == OS::get_current_thread_id()); CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; ) - CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin ); m_spin.store( false, atomics::memory_order_release ); } }; @@ -255,15 +238,7 @@ namespace cds { bool try_acquire() CDS_NOEXCEPT { integral_type nCurrent = 0; -# ifdef CDS_THREAD_SANITIZER_ENABLED - if ( m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) { - CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin ); - return true; - } - return false; -# else - return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed ); -# endif + return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_acquire ); } bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()())) @@ -294,9 +269,7 @@ namespace cds { reentrant_spin_lock() CDS_NOEXCEPT : m_spin(0) , m_OwnerId( OS::c_NullThreadId ) - { - CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin ); - } + {} /// Dummy copy constructor /** @@ -307,16 +280,13 @@ namespace cds { reentrant_spin_lock( const reentrant_spin_lock& ) CDS_NOEXCEPT : m_spin(0) , m_OwnerId( OS::c_NullThreadId ) - { - CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin ); - } + {} /// Construct object in specified state explicit reentrant_spin_lock( bool bLocked ) : m_spin(0) , m_OwnerId( OS::c_NullThreadId ) { - CDS_TSAN_ANNOTATE_MUTEX_CREATE( &m_spin ); if ( bLocked ) lock(); } @@ -376,7 +346,6 @@ namespace cds { m_spin.store( n - 1, atomics::memory_order_relaxed ); else { free(); - CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin ); m_spin.store( 0, atomics::memory_order_release ); } return true; diff --git a/cds/urcu/details/base.h b/cds/urcu/details/base.h index e05abed5..5a2abcc0 100644 --- a/cds/urcu/details/base.h +++ b/cds/urcu/details/base.h @@ -375,8 +375,7 @@ namespace cds { thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire ); do { pRec->m_list.m_pNext = pOldHead; - CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &( pRec->m_list.m_pNext )); - } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_release, atomics::memory_order_relaxed )); + } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_acq_rel, atomics::memory_order_acquire )); return pRec; } diff --git a/src/hp_gc.cpp b/src/hp_gc.cpp index 91bb6b54..ec8ecc2a 100644 --- a/src/hp_gc.cpp +++ b/src/hp_gc.cpp @@ -150,10 +150,8 @@ namespace cds { namespace gc { hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire ); do { - // TSan: Next CAS release orders the memory - CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(&hprec->m_pNextNode ); hprec->m_pNextNode = pOldHead; - } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed )); + } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_acq_rel, atomics::memory_order_acquire )); return hprec; } -- 2.34.1