3 #ifndef __CDS_LOCK_SPINLOCK_H
4 #define __CDS_LOCK_SPINLOCK_H
7 Defines spin-lock primitives
9 2012.01.23 1.1.0 khizmax Refactoring: use C++11 atomics
10 2010.01.22 0.6.0 khizmax Refactoring: use cds::atomic namespace
11 Explicit memory ordering specification (atomic::memory_order_xxx)
15 #include <cds/algo/atomic.h>
16 #include <cds/os/thread.h>
17 #include <cds/algo/backoff_strategy.h>
20 /// Synchronization primitives
24 Simple and light-weight spin-lock critical section
25 It is useful to gain access to small (short-timed) code
29 TATAS (test-and-test-and-lock)
30 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
32 No serialization performed - any of waiting threads may owns the spin-lock.
33 This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
34 The method unlock() can call any thread
36 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
37 - double lock attempt encountered by same thread (deadlock)
38 - unlock by another thread
40 If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
41 thread and repeats lock attempts later
44 - @p Backoff backoff strategy. Used when spin lock is locked
46 template <typename Backoff >
50 typedef Backoff backoff_strategy ; ///< back-off strategy type
52 atomics::atomic<bool> m_spin ; ///< Spin
54 typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
58 /// Construct free (unlocked) spin-lock
59 Spinlock() CDS_NOEXCEPT
61 :m_dbgOwnerId( OS::c_NullThreadId )
64 m_spin.store( false, atomics::memory_order_relaxed );
67 /// Construct spin-lock in specified state
69 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
71 Spinlock( bool bLocked ) CDS_NOEXCEPT
73 : m_dbgOwnerId( bLocked ? OS::get_current_thread_id() : OS::c_NullThreadId )
76 m_spin.store( bLocked, atomics::memory_order_relaxed );
79 /// Dummy copy constructor
81 In theory, spin-lock cannot be copied. However, it is not practical.
82 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
83 initializes the spin to free (unlocked) state like default ctor.
85 Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
88 , m_dbgOwnerId( OS::c_NullThreadId )
92 /// Destructor. On debug time it checks whether spin-lock is free
95 assert( !m_spin.load( atomics::memory_order_relaxed ) );
98 /// Check if the spin is locked
99 bool is_locked() const CDS_NOEXCEPT
101 return m_spin.load( atomics::memory_order_relaxed );
104 /// Try to lock the object
106 Returns \p true if locking is succeeded
107 otherwise (if the spin is already locked) returns \p false
109 Debug version: deadlock can be detected
111 bool try_lock() CDS_NOEXCEPT
113 bool bCurrent = false;
114 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
118 m_dbgOwnerId = OS::get_current_thread_id();
124 /// Try to lock the object, repeat @p nTryCount times if failed
126 Returns \p true if locking is succeeded
127 otherwise (if the spin is already locked) returns \p false
129 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
131 backoff_strategy backoff;
132 while ( nTryCount-- ) {
140 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
141 void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
143 backoff_strategy backoff;
146 assert( m_dbgOwnerId != OS::get_current_thread_id() );
149 while ( !try_lock() ) {
150 while ( m_spin.load( atomics::memory_order_relaxed ) ) {
154 assert( m_dbgOwnerId == OS::get_current_thread_id() );
157 /// Unlock the spin-lock. Debug version: deadlock may be detected
158 void unlock() CDS_NOEXCEPT
160 assert( m_spin.load( atomics::memory_order_relaxed ) );
162 assert( m_dbgOwnerId == OS::get_current_thread_id() );
163 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
165 m_spin.store( false, atomics::memory_order_release );
169 /// Spin-lock implementation default for the current platform
170 typedef Spinlock<backoff::LockDefault > Spin;
172 /// Recursive spin lock.
174 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
177 - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
178 - @p Backoff backoff strategy. Used when spin lock is locked
180 template <typename Integral, class Backoff>
183 typedef OS::ThreadId thread_id ; ///< The type of thread id
186 typedef Integral integral_type ; ///< The integral type
187 typedef Backoff backoff_strategy ; ///< The backoff type
190 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
191 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
195 void take( thread_id tid ) CDS_NOEXCEPT
200 void free() CDS_NOEXCEPT
202 m_OwnerId = OS::c_NullThreadId;
205 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
207 return m_OwnerId == tid;
210 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
212 if ( is_taken( tid )) {
213 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
219 bool try_acquire() CDS_NOEXCEPT
221 integral_type nCurrent = 0;
222 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
225 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
227 backoff_strategy bkoff;
229 while ( nTryCount-- ) {
237 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
240 backoff_strategy bkoff;
241 while ( !try_acquire() ) {
242 while ( m_spin.load( atomics::memory_order_relaxed ) )
249 /// Default constructor initializes spin to free (unlocked) state
250 ReentrantSpinT() CDS_NOEXCEPT
252 , m_OwnerId( OS::c_NullThreadId )
255 /// Dummy copy constructor
257 In theory, spin-lock cannot be copied. However, it is not practical.
258 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
259 initializes the spin to free (unlocked) state like default ctor.
261 ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
263 , m_OwnerId( OS::c_NullThreadId )
266 /// Construct object for specified state
267 ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
269 , m_OwnerId( OS::c_NullThreadId )
275 /// Checks if the spin is locked
277 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
278 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
280 bool is_locked() const CDS_NOEXCEPT
282 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
285 /// Try to lock the spin-lock (synonym for \ref try_lock)
286 bool try_lock() CDS_NOEXCEPT
288 thread_id tid = OS::get_current_thread_id();
289 if ( try_taken_lock( tid ) )
291 if ( try_acquire()) {
298 /// Try to lock the object
299 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
301 thread_id tid = OS::get_current_thread_id();
302 if ( try_taken_lock( tid ) )
304 if ( try_acquire( nTryCount )) {
311 /// Lock the object waits if it is busy
312 void lock() CDS_NOEXCEPT
314 thread_id tid = OS::get_current_thread_id();
315 if ( !try_taken_lock( tid ) ) {
321 /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
322 bool unlock() CDS_NOEXCEPT
324 if ( is_taken( OS::get_current_thread_id() ) ) {
325 integral_type n = m_spin.load( atomics::memory_order_relaxed );
327 m_spin.store( n - 1, atomics::memory_order_relaxed );
330 m_spin.store( 0, atomics::memory_order_release );
337 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
338 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
340 if ( is_taken( OS::get_current_thread_id() ) ) {
341 assert( newOwnerId != OS::c_NullThreadId );
342 m_OwnerId = newOwnerId;
349 /// Recursive 32bit spin-lock
350 typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
352 /// Recursive 64bit spin-lock
353 typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
355 /// Default recursive spin-lock type
356 typedef ReentrantSpin32 ReentrantSpin;
360 /// Standard (best for the current platform) spin-lock implementation
361 typedef lock::Spin SpinLock;
363 /// Standard (best for the current platform) recursive spin-lock implementation
364 typedef lock::ReentrantSpin RecursiveSpinLock;
366 /// 32bit recursive spin-lock shortcut
367 typedef lock::ReentrantSpin32 RecursiveSpinLock32;
369 /// 64bit recursive spin-lock shortcut
370 typedef lock::ReentrantSpin64 RecursiveSpinLock64;
374 #endif // #ifndef __CDS_LOCK_SPINLOCK_H