-//$$CDS-header$$
+/*
+ This file is a part of libcds - Concurrent Data Structures library
+
+ (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
+
+ Source code repo: http://github.com/khizmax/libcds/
+ Download: http://sourceforge.net/projects/libcds/files/
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
#ifndef CDSLIB_SYNC_SPINLOCK_H
#define CDSLIB_SYNC_SPINLOCK_H
[1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
No serialization performed - any of waiting threads may owns the spin-lock.
- This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
- The method unlock() can call any thread
+ This spin-lock is NOT recursive: the thread owned the lock cannot call \p lock() method without deadlock.
+ The method \p unlock() can call any thread
DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
- double lock attempt encountered by same thread (deadlock)
- unlock by another thread
- If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
+ If spin-lock is locked the \p Backoff algorithm is called. Predefined \p backoff::LockDefault class yields current
thread and repeats lock attempts later
Template parameters:
- - @p Backoff backoff strategy. Used when spin lock is locked
+ - \p Backoff - backoff strategy. Used when spin lock is locked
*/
template <typename Backoff >
class spin_lock
/**
In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
*/
- spin_lock( bool bLocked ) CDS_NOEXCEPT
+ explicit spin_lock( bool bLocked ) CDS_NOEXCEPT
# ifdef CDS_DEBUG
: m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
# endif
/// Destructor. On debug time it checks whether spin-lock is free
~spin_lock()
{
- assert( !m_spin.load( atomics::memory_order_relaxed ) );
+ assert( !m_spin.load( atomics::memory_order_relaxed ));
}
/// Check if the spin is locked
bool try_lock() CDS_NOEXCEPT
{
bool bCurrent = false;
+
+# ifdef CDS_THREAD_SANITIZER_ENABLED
+ if ( m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
+ CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
+ }
+# else
m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
+# endif
CDS_DEBUG_ONLY(
if ( !bCurrent ) {
return !bCurrent;
}
- /// Try to lock the object, repeat @p nTryCount times if failed
+ /// Try to lock the object, repeat \p nTryCount times if failed
/**
Returns \p true if locking is succeeded
otherwise (if the spin is already locked) returns \p false
*/
- bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
+ bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
{
backoff_strategy backoff;
while ( nTryCount-- ) {
}
/// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
- void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
+ void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()()))
{
backoff_strategy backoff;
// Deadlock detected
- assert( m_dbgOwnerId != OS::get_current_thread_id() );
+ assert( m_dbgOwnerId != OS::get_current_thread_id());
// TATAS algorithm
- while ( !try_lock() ) {
- while ( m_spin.load( atomics::memory_order_relaxed ) ) {
+ while ( !try_lock()) {
+ while ( m_spin.load( atomics::memory_order_relaxed )) {
backoff();
}
}
- assert( m_dbgOwnerId == OS::get_current_thread_id() );
+ assert( m_dbgOwnerId == OS::get_current_thread_id());
}
/// Unlock the spin-lock. Debug version: deadlock may be detected
void unlock() CDS_NOEXCEPT
{
- assert( m_spin.load( atomics::memory_order_relaxed ) );
+ assert( m_spin.load( atomics::memory_order_relaxed ));
- assert( m_dbgOwnerId == OS::get_current_thread_id() );
+ assert( m_dbgOwnerId == OS::get_current_thread_id());
CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
m_spin.store( false, atomics::memory_order_release );
+ CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
}
};
Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
Template parameters:
- - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
- - @p Backoff backoff strategy. Used when spin lock is locked
+ - \p Integral one of integral atomic type: <tt>unsigned int</tt>, \p int, and others
+ - \p Backoff backoff strategy. Used when spin lock is locked
*/
template <typename Integral, class Backoff>
class reentrant_spin_lock
private:
atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
- thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
+ thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to \p OS::c_NullThreadId
private:
//@cond
bool try_acquire() CDS_NOEXCEPT
{
integral_type nCurrent = 0;
+# ifdef CDS_THREAD_SANITIZER_ENABLED
+ if ( m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
+ CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( &m_spin );
+ return true;
+ }
+ return false;
+# else
return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
+# endif
}
- bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+ bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
{
backoff_strategy bkoff;
return false;
}
- void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
+ void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()()))
{
// TATAS algorithm
backoff_strategy bkoff;
- while ( !try_acquire() ) {
- while ( m_spin.load( atomics::memory_order_relaxed ) )
+ while ( !try_acquire()) {
+ while ( m_spin.load( atomics::memory_order_relaxed ))
bkoff();
}
}
{}
/// Construct object for specified state
- reentrant_spin_lock( bool bLocked ) CDS_NOEXCEPT
+ explicit reentrant_spin_lock( bool bLocked )
: m_spin(0)
, m_OwnerId( OS::c_NullThreadId )
{
*/
bool is_locked() const CDS_NOEXCEPT
{
- return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
+ return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id()));
}
- /// Try to lock the spin-lock (synonym for \ref try_lock)
+ /// Try to lock the spin-lock (synonym for \p try_lock())
bool try_lock() CDS_NOEXCEPT
{
thread_id tid = OS::get_current_thread_id();
- if ( try_taken_lock( tid ) )
+ if ( try_taken_lock( tid ))
return true;
if ( try_acquire()) {
take( tid );
}
/// Try to lock the object
- bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
+ bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
{
thread_id tid = OS::get_current_thread_id();
- if ( try_taken_lock( tid ) )
+ if ( try_taken_lock( tid ))
return true;
if ( try_acquire( nTryCount )) {
take( tid );
}
/// Lock the object waits if it is busy
- void lock() CDS_NOEXCEPT
+ void lock() CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().acquire()))
{
thread_id tid = OS::get_current_thread_id();
- if ( !try_taken_lock( tid ) ) {
+ if ( !try_taken_lock( tid )) {
acquire();
take( tid );
}
}
- /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
+ /// Unlock the spin-lock. Return \p true if the current thread is owner of spin-lock \p false otherwise
bool unlock() CDS_NOEXCEPT
{
- if ( is_taken( OS::get_current_thread_id() ) ) {
+ if ( is_taken( OS::get_current_thread_id())) {
integral_type n = m_spin.load( atomics::memory_order_relaxed );
if ( n > 1 )
m_spin.store( n - 1, atomics::memory_order_relaxed );
else {
free();
m_spin.store( 0, atomics::memory_order_release );
+ CDS_TSAN_ANNOTATE_MUTEX_RELEASED( &m_spin );
}
return true;
}
/// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
{
- if ( is_taken( OS::get_current_thread_id() ) ) {
+ if ( is_taken( OS::get_current_thread_id())) {
assert( newOwnerId != OS::c_NullThreadId );
m_OwnerId = newOwnerId;
return true;