-//$$CDS-header$$
+//$$CDS-header$$-2
-#ifndef __CDS_LOCK_SPINLOCK_H
-#define __CDS_LOCK_SPINLOCK_H
+#ifndef CDSLIB_LOCK_SPINLOCK_H
+#define CDSLIB_LOCK_SPINLOCK_H
/*
Defines spin-lock primitives
2006 khizmax Created
*/
-#include <cds/cxx11_atomic.h>
+#include <cds/algo/atomic.h>
#include <cds/os/thread.h>
#include <cds/algo/backoff_strategy.h>
-#include <cds/lock/scoped_lock.h>
-
-#include <cds/details/noncopyable.h>
namespace cds {
/// Synchronization primitives
Template parameters:
- @p Backoff backoff strategy. Used when spin lock is locked
*/
- template <class Backoff >
+ template <typename Backoff >
class Spinlock
{
public:
typedef Backoff backoff_strategy ; ///< back-off strategy type
private:
- CDS_ATOMIC::atomic<bool> m_spin ; ///< Spin
+ atomics::atomic<bool> m_spin ; ///< Spin
# ifdef CDS_DEBUG
- typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
+ typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
# endif
public:
/// Construct free (unlocked) spin-lock
Spinlock() CDS_NOEXCEPT
# ifdef CDS_DEBUG
- :m_dbgOwnerId( OS::nullThreadId() )
+ :m_dbgOwnerId( OS::c_NullThreadId )
# endif
{
- m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.store( false, atomics::memory_order_relaxed );
}
/// Construct spin-lock in specified state
*/
Spinlock( bool bLocked ) CDS_NOEXCEPT
# ifdef CDS_DEBUG
- :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::nullThreadId() )
+ : m_dbgOwnerId( bLocked ? OS::get_current_thread_id() : OS::c_NullThreadId )
# endif
{
- m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.store( bLocked, atomics::memory_order_relaxed );
}
/// Dummy copy constructor
Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
: m_spin( false )
# ifdef CDS_DEBUG
- , m_dbgOwnerId( OS::nullThreadId() )
+ , m_dbgOwnerId( OS::c_NullThreadId )
# endif
{}
/// Destructor. On debug time it checks whether spin-lock is free
~Spinlock()
{
- assert( !m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
+ assert( !m_spin.load( atomics::memory_order_relaxed ) );
}
/// Check if the spin is locked
bool is_locked() const CDS_NOEXCEPT
{
- return m_spin.load( CDS_ATOMIC::memory_order_relaxed );
+ return m_spin.load( atomics::memory_order_relaxed );
}
/// Try to lock the object
Debug version: deadlock can be detected
*/
bool try_lock() CDS_NOEXCEPT
- {
- return tryLock();
- }
-
- /// Try to lock the object (synonym for \ref try_lock)
- bool tryLock() CDS_NOEXCEPT
{
bool bCurrent = false;
- m_spin.compare_exchange_strong( bCurrent, true, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
- CDS_DEBUG_DO(
+ CDS_DEBUG_ONLY(
if ( !bCurrent ) {
- m_dbgOwnerId = OS::getCurrentThreadId();
+ m_dbgOwnerId = OS::get_current_thread_id();
}
)
return !bCurrent;
Returns \p true if locking is succeeded
otherwise (if the spin is already locked) returns \p false
*/
- bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT
+ bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
{
- return tryLock( nTryCount );
- }
-
- /// Try to lock the object (synonym for \ref try_lock)
- bool tryLock( unsigned int nTryCount ) CDS_NOEXCEPT
- {
- Backoff backoff;
+ backoff_strategy backoff;
while ( nTryCount-- ) {
- if ( tryLock() )
+ if ( try_lock() )
return true;
backoff();
}
}
/// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
- void lock() CDS_NOEXCEPT
+ void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
{
- Backoff backoff;
+ backoff_strategy backoff;
// Deadlock detected
- assert( m_dbgOwnerId != OS::getCurrentThreadId() );
+ assert( m_dbgOwnerId != OS::get_current_thread_id() );
// TATAS algorithm
- while ( !tryLock() ) {
- while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) {
+ while ( !try_lock() ) {
+ while ( m_spin.load( atomics::memory_order_relaxed ) ) {
backoff();
}
}
- assert( m_dbgOwnerId == OS::getCurrentThreadId() );
+ assert( m_dbgOwnerId == OS::get_current_thread_id() );
}
/// Unlock the spin-lock. Debug version: deadlock may be detected
void unlock() CDS_NOEXCEPT
{
- assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
+ assert( m_spin.load( atomics::memory_order_relaxed ) );
- assert( m_dbgOwnerId == OS::getCurrentThreadId() );
- CDS_DEBUG_DO( m_dbgOwnerId = OS::nullThreadId() ;)
+ assert( m_dbgOwnerId == OS::get_current_thread_id() );
+ CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
- m_spin.store( false, CDS_ATOMIC::memory_order_release );
+ m_spin.store( false, atomics::memory_order_release );
}
};
typedef Backoff backoff_strategy ; ///< The backoff type
private:
- CDS_ATOMIC::atomic<integral_type> m_spin ; ///< spin-lock atomic
- thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::nullThreadId()
+ atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
+ thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
private:
//@cond
- void beOwner( thread_id tid ) CDS_NOEXCEPT
+ void take( thread_id tid ) CDS_NOEXCEPT
{
m_OwnerId = tid;
}
void free() CDS_NOEXCEPT
{
- m_OwnerId = OS::nullThreadId();
+ m_OwnerId = OS::c_NullThreadId;
}
- bool isOwned( thread_id tid ) const CDS_NOEXCEPT
+ bool is_taken( thread_id tid ) const CDS_NOEXCEPT
{
return m_OwnerId == tid;
}
- bool tryLockOwned( thread_id tid ) CDS_NOEXCEPT
+ bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
{
- if ( isOwned( tid )) {
- m_spin.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ if ( is_taken( tid )) {
+ m_spin.fetch_add( 1, atomics::memory_order_relaxed );
return true;
}
return false;
}
- bool tryAcquireLock() CDS_NOEXCEPT
+ bool try_acquire() CDS_NOEXCEPT
{
integral_type nCurrent = 0;
- return m_spin.compare_exchange_weak( nCurrent, 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
}
- bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+ bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
{
backoff_strategy bkoff;
while ( nTryCount-- ) {
- if ( tryAcquireLock() )
+ if ( try_acquire() )
return true;
bkoff();
}
return false;
}
- void acquireLock() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+ void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
{
// TATAS algorithm
backoff_strategy bkoff;
- while ( !tryAcquireLock() ) {
- while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) )
+ while ( !try_acquire() ) {
+ while ( m_spin.load( atomics::memory_order_relaxed ) )
bkoff();
}
}
/// Default constructor initializes spin to free (unlocked) state
ReentrantSpinT() CDS_NOEXCEPT
: m_spin(0)
- , m_OwnerId( OS::nullThreadId() )
+ , m_OwnerId( OS::c_NullThreadId )
{}
/// Dummy copy constructor
*/
ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
: m_spin(0)
- , m_OwnerId( OS::nullThreadId() )
+ , m_OwnerId( OS::c_NullThreadId )
{}
/// Construct object for specified state
ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
- : m_spin(0),
- m_OwnerId( OS::nullThreadId() )
+ : m_spin(0)
+ , m_OwnerId( OS::c_NullThreadId )
{
if ( bLocked )
lock();
*/
bool is_locked() const CDS_NOEXCEPT
{
- return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
+ return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
}
/// Try to lock the spin-lock (synonym for \ref try_lock)
- bool tryLock() CDS_NOEXCEPT
+ bool try_lock() CDS_NOEXCEPT
{
- thread_id tid = OS::getCurrentThreadId();
- if ( tryLockOwned( tid ) )
+ thread_id tid = OS::get_current_thread_id();
+ if ( try_taken_lock( tid ) )
return true;
- if ( tryAcquireLock()) {
- beOwner( tid );
+ if ( try_acquire()) {
+ take( tid );
return true;
}
return false;
}
- /// Try to lock the spin-lock. If spin-lock is free the current thread owns it. Return @p true if locking is success
- bool try_lock() CDS_NOEXCEPT
- {
- return tryLock();
- }
-
- /// Try to lock the object (synonym for \ref try_lock)
- bool tryLock( unsigned int nTryCount )
-# if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) )
- // GCC 4.6, clang 3.0 error in noexcept expression:
- // cannot call member function \91bool cds::lock::ReentrantSpinT<Integral, Backoff>::tryAcquireLock(unsigned int) without object
- CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) ))
-# endif
+ /// Try to lock the object
+ bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
{
- thread_id tid = OS::getCurrentThreadId();
- if ( tryLockOwned( tid ) )
+ thread_id tid = OS::get_current_thread_id();
+ if ( try_taken_lock( tid ) )
return true;
- if ( tryAcquireLock( nTryCount )) {
- beOwner( tid );
+ if ( try_acquire( nTryCount )) {
+ take( tid );
return true;
}
return false;
}
- /// Try to lock the object.
- /**
- If the spin-lock is locked the method repeats attempts to own spin-lock up to @p nTryCount times.
- Between attempts @p backoff() is called.
- Return @p true if current thread owns the lock @p false otherwise
- */
- bool try_lock( unsigned int nTryCount )
-# if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) )
- // GCC 4.6, clang 3.0 error in noexcept expression:
- // cannot call member function \91bool cds::lock::ReentrantSpinT<Integral, Backoff>::tryLock(unsigned int) without object
- CDS_NOEXCEPT_( noexcept( tryLock(nTryCount) ))
-# endif
- {
- return tryLock( nTryCount );
- }
-
/// Lock the object waits if it is busy
void lock() CDS_NOEXCEPT
{
- thread_id tid = OS::getCurrentThreadId();
- if ( !tryLockOwned( tid ) ) {
- acquireLock();
- beOwner( tid );
+ thread_id tid = OS::get_current_thread_id();
+ if ( !try_taken_lock( tid ) ) {
+ acquire();
+ take( tid );
}
}
/// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
bool unlock() CDS_NOEXCEPT
{
- if ( isOwned( OS::getCurrentThreadId() ) ) {
- integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
+ if ( is_taken( OS::get_current_thread_id() ) ) {
+ integral_type n = m_spin.load( atomics::memory_order_relaxed );
if ( n > 1 )
- m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.store( n - 1, atomics::memory_order_relaxed );
else {
free();
- m_spin.store( 0, CDS_ATOMIC::memory_order_release );
+ m_spin.store( 0, atomics::memory_order_release );
}
return true;
}
}
/// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
- bool changeOwner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
+ bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
{
- if ( isOwned( OS::getCurrentThreadId() ) ) {
- assert( newOwnerId != OS::nullThreadId() );
+ if ( is_taken( OS::get_current_thread_id() ) ) {
+ assert( newOwnerId != OS::c_NullThreadId );
m_OwnerId = newOwnerId;
return true;
}
}
};
- /// Recursive spin-lock based on atomic32u_t
- typedef ReentrantSpinT<atomic32u_t, backoff::LockDefault> ReentrantSpin32;
+ /// Recursive 32bit spin-lock
+ typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
- /// Recursive spin-lock based on atomic64u_t type
- typedef ReentrantSpinT<atomic64u_t, backoff::LockDefault> ReentrantSpin64;
+ /// Recursive 64bit spin-lock
+ typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
- /// Recursive spin-lock based on atomic32_t type
- typedef ReentrantSpin32 ReentrantSpin;
-
- /// The best (for the current platform) auto spin-lock
- typedef scoped_lock<Spin> AutoSpin;
+ /// Default recursive spin-lock type
+ typedef ReentrantSpin32 ReentrantSpin;
} // namespace lock
/// 64bit recursive spin-lock shortcut
typedef lock::ReentrantSpin64 RecursiveSpinLock64;
- /// Auto spin-lock shortcut
- typedef lock::AutoSpin AutoSpinLock;
-
} // namespace cds
-#endif // #ifndef __CDS_LOCK_SPINLOCK_H
+#endif // #ifndef CDSLIB_LOCK_SPINLOCK_H