3 #ifndef __CDS_LOCK_SPINLOCK_H
4 #define __CDS_LOCK_SPINLOCK_H
7 Defines spin-lock primitives
9 2012.01.23 1.1.0 khizmax Refactoring: use C++11 atomics
10 2010.01.22 0.6.0 khizmax Refactoring: use cds::atomic namespace
11 Explicit memory ordering specification (atomic::memory_order_xxx)
15 #include <cds/cxx11_atomic.h>
16 #include <cds/os/thread.h>
17 #include <cds/backoff_strategy.h>
18 #include <cds/lock/scoped_lock.h>
20 #include <cds/details/noncopyable.h>
23 /// Synchronization primitives
27 Simple and light-weight spin-lock critical section
28 It is useful to gain access to small (short-timed) code
32 TATAS (test-and-test-and-lock)
33 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
35 No serialization performed - any of waiting threads may owns the spin-lock.
36 This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
37 The method unlock() can call any thread
39 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
40 - double lock attempt encountered by same thread (deadlock)
41 - unlock by another thread
43 If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
44 thread and repeats lock attempts later
47 - @p Backoff backoff strategy. Used when spin lock is locked
49 template <class Backoff >
53 typedef Backoff backoff_strategy ; ///< back-off strategy type
55 CDS_ATOMIC::atomic<bool> m_spin ; ///< Spin
57 typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
61 /// Construct free (unlocked) spin-lock
62 Spinlock() CDS_NOEXCEPT
64 :m_dbgOwnerId( OS::nullThreadId() )
67 m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
70 /// Construct spin-lock in specified state
72 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
74 Spinlock( bool bLocked ) CDS_NOEXCEPT
76 :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::nullThreadId() )
79 m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
82 /// Dummy copy constructor
84 In theory, spin-lock cannot be copied. However, it is not practical.
85 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
86 initializes the spin to free (unlocked) state like default ctor.
88 Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
91 , m_dbgOwnerId( OS::nullThreadId() )
95 /// Destructor. On debug time it checks whether spin-lock is free
98 assert( !m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
101 /// Check if the spin is locked
102 bool is_locked() const CDS_NOEXCEPT
104 return m_spin.load( CDS_ATOMIC::memory_order_relaxed );
107 /// Try to lock the object
109 Returns \p true if locking is succeeded
110 otherwise (if the spin is already locked) returns \p false
112 Debug version: deadlock can be detected
114 bool try_lock() CDS_NOEXCEPT
119 /// Try to lock the object (synonym for \ref try_lock)
120 bool tryLock() CDS_NOEXCEPT
122 bool bCurrent = false;
123 m_spin.compare_exchange_strong( bCurrent, true, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
127 m_dbgOwnerId = OS::getCurrentThreadId();
133 /// Try to lock the object, repeat @p nTryCount times if failed
135 Returns \p true if locking is succeeded
136 otherwise (if the spin is already locked) returns \p false
138 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT
140 return tryLock( nTryCount );
143 /// Try to lock the object (synonym for \ref try_lock)
144 bool tryLock( unsigned int nTryCount ) CDS_NOEXCEPT
147 while ( nTryCount-- ) {
155 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
156 void lock() CDS_NOEXCEPT
161 assert( m_dbgOwnerId != OS::getCurrentThreadId() );
164 while ( !tryLock() ) {
165 while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) {
169 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
172 /// Unlock the spin-lock. Debug version: deadlock may be detected
173 void unlock() CDS_NOEXCEPT
175 assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
177 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
178 CDS_DEBUG_DO( m_dbgOwnerId = OS::nullThreadId() ;)
180 m_spin.store( false, CDS_ATOMIC::memory_order_release );
184 /// Spin-lock implementation default for the current platform
185 typedef Spinlock<backoff::LockDefault > Spin;
187 /// Recursive spin lock.
189 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
192 - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
193 - @p Backoff backoff strategy. Used when spin lock is locked
195 template <typename Integral, class Backoff>
198 typedef OS::ThreadId thread_id ; ///< The type of thread id
201 typedef Integral integral_type ; ///< The integral type
202 typedef Backoff backoff_strategy ; ///< The backoff type
205 CDS_ATOMIC::atomic<integral_type> m_spin ; ///< spin-lock atomic
206 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::nullThreadId()
210 void beOwner( thread_id tid ) CDS_NOEXCEPT
215 void free() CDS_NOEXCEPT
217 m_OwnerId = OS::nullThreadId();
220 bool isOwned( thread_id tid ) const CDS_NOEXCEPT
222 return m_OwnerId == tid;
225 bool tryLockOwned( thread_id tid ) CDS_NOEXCEPT
227 if ( isOwned( tid )) {
228 m_spin.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
234 bool tryAcquireLock() CDS_NOEXCEPT
236 integral_type nCurrent = 0;
237 return m_spin.compare_exchange_weak( nCurrent, 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
240 bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
242 backoff_strategy bkoff;
244 while ( nTryCount-- ) {
245 if ( tryAcquireLock() )
252 void acquireLock() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
255 backoff_strategy bkoff;
256 while ( !tryAcquireLock() ) {
257 while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) )
264 /// Default constructor initializes spin to free (unlocked) state
265 ReentrantSpinT() CDS_NOEXCEPT
267 , m_OwnerId( OS::nullThreadId() )
270 /// Dummy copy constructor
272 In theory, spin-lock cannot be copied. However, it is not practical.
273 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
274 initializes the spin to free (unlocked) state like default ctor.
276 ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
278 , m_OwnerId( OS::nullThreadId() )
281 /// Construct object for specified state
282 ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
284 m_OwnerId( OS::nullThreadId() )
290 /// Checks if the spin is locked
292 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
293 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
295 bool is_locked() const CDS_NOEXCEPT
297 return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
300 /// Try to lock the spin-lock (synonym for \ref try_lock)
301 bool tryLock() CDS_NOEXCEPT
303 thread_id tid = OS::getCurrentThreadId();
304 if ( tryLockOwned( tid ) )
306 if ( tryAcquireLock()) {
313 /// Try to lock the spin-lock. If spin-lock is free the current thread owns it. Return @p true if locking is success
314 bool try_lock() CDS_NOEXCEPT
319 /// Try to lock the object (synonym for \ref try_lock)
320 bool tryLock( unsigned int nTryCount )
321 # if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) )
322 // GCC 4.6, clang 3.0 error in noexcept expression:
323 // cannot call member function
\91bool cds::lock::ReentrantSpinT<Integral, Backoff>::tryAcquireLock(unsigned int) without object
324 CDS_NOEXCEPT_( noexcept( tryAcquireLock(nTryCount) ))
327 thread_id tid = OS::getCurrentThreadId();
328 if ( tryLockOwned( tid ) )
330 if ( tryAcquireLock( nTryCount )) {
337 /// Try to lock the object.
339 If the spin-lock is locked the method repeats attempts to own spin-lock up to @p nTryCount times.
340 Between attempts @p backoff() is called.
341 Return @p true if current thread owns the lock @p false otherwise
343 bool try_lock( unsigned int nTryCount )
344 # if !( (CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40600 && CDS_COMPILER_VERSION < 40700) || (CDS_COMPILER == CDS_COMPILER_CLANG && CDS_COMPILER_VERSION < 30100) )
345 // GCC 4.6, clang 3.0 error in noexcept expression:
346 // cannot call member function
\91bool cds::lock::ReentrantSpinT<Integral, Backoff>::tryLock(unsigned int) without object
347 CDS_NOEXCEPT_( noexcept( tryLock(nTryCount) ))
350 return tryLock( nTryCount );
353 /// Lock the object waits if it is busy
354 void lock() CDS_NOEXCEPT
356 thread_id tid = OS::getCurrentThreadId();
357 if ( !tryLockOwned( tid ) ) {
363 /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
364 bool unlock() CDS_NOEXCEPT
366 if ( isOwned( OS::getCurrentThreadId() ) ) {
367 integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
369 m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
372 m_spin.store( 0, CDS_ATOMIC::memory_order_release );
379 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
380 bool changeOwner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
382 if ( isOwned( OS::getCurrentThreadId() ) ) {
383 assert( newOwnerId != OS::nullThreadId() );
384 m_OwnerId = newOwnerId;
391 /// Recursive spin-lock based on atomic32u_t
392 typedef ReentrantSpinT<atomic32u_t, backoff::LockDefault> ReentrantSpin32;
394 /// Recursive spin-lock based on atomic64u_t type
395 typedef ReentrantSpinT<atomic64u_t, backoff::LockDefault> ReentrantSpin64;
397 /// Recursive spin-lock based on atomic32_t type
398 typedef ReentrantSpin32 ReentrantSpin;
400 /// The best (for the current platform) auto spin-lock
401 typedef scoped_lock<Spin> AutoSpin;
405 /// Standard (best for the current platform) spin-lock implementation
406 typedef lock::Spin SpinLock;
408 /// Standard (best for the current platform) recursive spin-lock implementation
409 typedef lock::ReentrantSpin RecursiveSpinLock;
411 /// 32bit recursive spin-lock shortcut
412 typedef lock::ReentrantSpin32 RecursiveSpinLock32;
414 /// 64bit recursive spin-lock shortcut
415 typedef lock::ReentrantSpin64 RecursiveSpinLock64;
417 /// Auto spin-lock shortcut
418 typedef lock::AutoSpin AutoSpinLock;
422 #endif // #ifndef __CDS_LOCK_SPINLOCK_H