From b4e3fc72807e0215c885164d825ad3595a5a68a7 Mon Sep 17 00:00:00 2001 From: khizmax Date: Tue, 30 Sep 2014 23:27:30 +0400 Subject: [PATCH] Refactoring: cds/lock/spinlock.h --- cds/lock/spinlock.h | 113 +++++++++----------------- projects/Win/vc12/cds.vcxproj | 1 - projects/Win/vc12/cds.vcxproj.filters | 3 - tests/unit/alloc/random.cpp | 2 +- tests/unit/lock/nolock.h | 2 +- tests/unit/lock/win32_lock.h | 4 +- 6 files changed, 41 insertions(+), 84 deletions(-) diff --git a/cds/lock/spinlock.h b/cds/lock/spinlock.h index 1b657fc6..e4248a5f 100644 --- a/cds/lock/spinlock.h +++ b/cds/lock/spinlock.h @@ -46,7 +46,7 @@ namespace cds { Template parameters: - @p Backoff backoff strategy. Used when spin lock is locked */ - template + template class Spinlock { public: @@ -54,7 +54,7 @@ namespace cds { private: atomics::atomic m_spin ; ///< Spin # ifdef CDS_DEBUG - typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode) + typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode) # endif public: @@ -73,7 +73,7 @@ namespace cds { */ Spinlock( bool bLocked ) CDS_NOEXCEPT # ifdef CDS_DEBUG - :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId ) + : m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId ) # endif { m_spin.store( bLocked, atomics::memory_order_relaxed ); @@ -112,12 +112,6 @@ namespace cds { Debug version: deadlock can be detected */ bool try_lock() CDS_NOEXCEPT - { - return tryLock(); - } - - /// Try to lock the object (synonym for \ref try_lock) - bool tryLock() CDS_NOEXCEPT { bool bCurrent = false; m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed ); @@ -135,17 +129,11 @@ namespace cds { Returns \p true if locking is succeeded otherwise (if the spin is already locked) returns \p false */ - bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT + bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT( noexept( backoff_strategy()() ) ) { - return tryLock( nTryCount ); - } - - /// Try to lock the object (synonym for \ref try_lock) - bool tryLock( unsigned int nTryCount ) CDS_NOEXCEPT - { - Backoff backoff; + backoff_strategy backoff; while ( nTryCount-- ) { - if ( tryLock() ) + if ( try_lock() ) return true; backoff(); } @@ -153,15 +141,15 @@ namespace cds { } /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected - void lock() CDS_NOEXCEPT + void lock() CDS_NOEXCEPT(noexept( backoff_strategy()() )) { - Backoff backoff; + backoff_strategy backoff; // Deadlock detected assert( m_dbgOwnerId != OS::getCurrentThreadId() ); // TATAS algorithm - while ( !tryLock() ) { + while ( !try_lock() ) { while ( m_spin.load( atomics::memory_order_relaxed ) ) { backoff(); } @@ -203,11 +191,11 @@ namespace cds { private: atomics::atomic m_spin ; ///< spin-lock atomic - thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId + thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId private: //@cond - void beOwner( thread_id tid ) CDS_NOEXCEPT + void take( thread_id tid ) CDS_NOEXCEPT { m_OwnerId = tid; } @@ -217,43 +205,43 @@ namespace cds { m_OwnerId = OS::c_NullThreadId; } - bool isOwned( thread_id tid ) const CDS_NOEXCEPT + bool is_taken( thread_id tid ) const CDS_NOEXCEPT { return m_OwnerId == tid; } - bool tryLockOwned( thread_id tid ) CDS_NOEXCEPT + bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT { - if ( isOwned( tid )) { + if ( is_taken( tid )) { m_spin.fetch_add( 1, atomics::memory_order_relaxed ); return true; } return false; } - bool tryAcquireLock() CDS_NOEXCEPT + bool try_acquire() CDS_NOEXCEPT { integral_type nCurrent = 0; return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed ); } - bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) + bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) { backoff_strategy bkoff; while ( nTryCount-- ) { - if ( tryAcquireLock() ) + if ( try_acquire() ) return true; bkoff(); } return false; } - void acquireLock() CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) + void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) { // TATAS algorithm backoff_strategy bkoff; - while ( !tryAcquireLock() ) { + while ( !try_acquire() ) { while ( m_spin.load( atomics::memory_order_relaxed ) ) bkoff(); } @@ -294,76 +282,49 @@ namespace cds { */ bool is_locked() const CDS_NOEXCEPT { - return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() )); + return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::getCurrentThreadId() )); } /// Try to lock the spin-lock (synonym for \ref try_lock) - bool tryLock() CDS_NOEXCEPT + bool try_lock() CDS_NOEXCEPT { thread_id tid = OS::getCurrentThreadId(); - if ( tryLockOwned( tid ) ) + if ( try_taken_lock( tid ) ) return true; - if ( tryAcquireLock()) { - beOwner( tid ); + if ( try_acquire()) { + take( tid ); return true; } return false; } - /// Try to lock the spin-lock. If spin-lock is free the current thread owns it. Return @p true if locking is success - bool try_lock() CDS_NOEXCEPT - { - return tryLock(); - } - - /// Try to lock the object (synonym for \ref try_lock) - bool tryLock( unsigned int nTryCount ) -# if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) ) - // GCC 4.6, clang 3.0 error in noexcept expression: - // cannot call member function ‘bool cds::lock::ReentrantSpinT::tryAcquireLock(unsigned int) without object - CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) )) -# endif + /// Try to lock the object + bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) ) { thread_id tid = OS::getCurrentThreadId(); - if ( tryLockOwned( tid ) ) + if ( try_taken_lock( tid ) ) return true; - if ( tryAcquireLock( nTryCount )) { - beOwner( tid ); + if ( try_acquire( nTryCount )) { + take( tid ); return true; } return false; } - /// Try to lock the object. - /** - If the spin-lock is locked the method repeats attempts to own spin-lock up to @p nTryCount times. - Between attempts @p backoff() is called. - Return @p true if current thread owns the lock @p false otherwise - */ - bool try_lock( unsigned int nTryCount ) -# if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) ) - // GCC 4.6, clang 3.0 error in noexcept expression: - // cannot call member function ‘bool cds::lock::ReentrantSpinT::tryLock(unsigned int) without object - CDS_NOEXCEPT_( noexcept( tryLock(nTryCount) )) -# endif - { - return tryLock( nTryCount ); - } - /// Lock the object waits if it is busy void lock() CDS_NOEXCEPT { thread_id tid = OS::getCurrentThreadId(); - if ( !tryLockOwned( tid ) ) { - acquireLock(); - beOwner( tid ); + if ( !try_taken_lock( tid ) ) { + acquire(); + take( tid ); } } /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise bool unlock() CDS_NOEXCEPT { - if ( isOwned( OS::getCurrentThreadId() ) ) { + if ( is_taken( OS::getCurrentThreadId() ) ) { integral_type n = m_spin.load( atomics::memory_order_relaxed ); if ( n > 1 ) m_spin.store( n - 1, atomics::memory_order_relaxed ); @@ -377,9 +338,9 @@ namespace cds { } /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock - bool changeOwner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT + bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT { - if ( isOwned( OS::getCurrentThreadId() ) ) { + if ( is_taken( OS::getCurrentThreadId() ) ) { assert( newOwnerId != OS::c_NullThreadId ); m_OwnerId = newOwnerId; return true; @@ -389,10 +350,10 @@ namespace cds { }; /// Recursive spin-lock based on atomic32u_t - typedef ReentrantSpinT ReentrantSpin32; + typedef ReentrantSpinT ReentrantSpin32; /// Recursive spin-lock based on atomic64u_t type - typedef ReentrantSpinT ReentrantSpin64; + typedef ReentrantSpinT ReentrantSpin64; /// Recursive spin-lock based on atomic32_t type typedef ReentrantSpin32 ReentrantSpin; diff --git a/projects/Win/vc12/cds.vcxproj b/projects/Win/vc12/cds.vcxproj index 471ae9ea..cea98ac6 100644 --- a/projects/Win/vc12/cds.vcxproj +++ b/projects/Win/vc12/cds.vcxproj @@ -827,7 +827,6 @@ - diff --git a/projects/Win/vc12/cds.vcxproj.filters b/projects/Win/vc12/cds.vcxproj.filters index 358e40ba..74f37bfe 100644 --- a/projects/Win/vc12/cds.vcxproj.filters +++ b/projects/Win/vc12/cds.vcxproj.filters @@ -227,9 +227,6 @@ Header Files\cds\details - - Header Files\cds\details - Header Files\cds\gc diff --git a/tests/unit/alloc/random.cpp b/tests/unit/alloc/random.cpp index 05034f8e..3e6a6b41 100644 --- a/tests/unit/alloc/random.cpp +++ b/tests/unit/alloc/random.cpp @@ -82,7 +82,7 @@ namespace memory { for ( size_t nPass = 0; nPass < s_nPassPerThread; ) { size_t nIdx = m_rndGen( size_t(0), s_nDataSize - 1 ); Item & item = arr.at(nIdx); - if ( item.m_access.tryLock() ) { + if ( item.m_access.try_lock() ) { if ( item.m_pszBlock ) { m_Alloc.deallocate( item.m_pszBlock, 1 ); item.m_pszBlock = nullptr; diff --git a/tests/unit/lock/nolock.h b/tests/unit/lock/nolock.h index d986cf93..0fa37703 100644 --- a/tests/unit/lock/nolock.h +++ b/tests/unit/lock/nolock.h @@ -9,7 +9,7 @@ namespace lock { public: void lock() {} void unlock() {} - bool tryLock() { return true; } + bool try_lock() { return true; } }; } diff --git a/tests/unit/lock/win32_lock.h b/tests/unit/lock/win32_lock.h index 197c67cc..ff3ddb60 100644 --- a/tests/unit/lock/win32_lock.h +++ b/tests/unit/lock/win32_lock.h @@ -20,7 +20,7 @@ namespace lock { void lock() { ::EnterCriticalSection( &m_cs ) ; } void unlock() { ::LeaveCriticalSection( &m_cs) ; } - bool tryLock() { return ::TryEnterCriticalSection( &m_cs ) != 0 ; } + bool try_lock() { return ::TryEnterCriticalSection( &m_cs ) != 0 ; } }; class Mutex { @@ -32,7 +32,7 @@ namespace lock { void lock() { ::WaitForSingleObject( m_hMutex, INFINITE ); } void unlock() { ::ReleaseMutex( m_hMutex ); } - bool tryLock() { return ::WaitForSingleObject( m_hMutex, 0) == WAIT_OBJECT_0; } + bool try_lock() { return ::WaitForSingleObject( m_hMutex, 0) == WAIT_OBJECT_0; } }; } // namespace win -- 2.34.1