3 #ifndef __CDS_LOCK_SPINLOCK_H
4 #define __CDS_LOCK_SPINLOCK_H
7 Defines spin-lock primitives
9 2012.01.23 1.1.0 khizmax Refactoring: use C++11 atomics
10 2010.01.22 0.6.0 khizmax Refactoring: use cds::atomic namespace
11 Explicit memory ordering specification (atomic::memory_order_xxx)
15 #include <cds/cxx11_atomic.h>
16 #include <cds/os/thread.h>
17 #include <cds/algo/backoff_strategy.h>
19 #include <cds/details/noncopyable.h>
22 /// Synchronization primitives
26 Simple and light-weight spin-lock critical section
27 It is useful to gain access to small (short-timed) code
31 TATAS (test-and-test-and-lock)
32 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
34 No serialization performed - any of waiting threads may owns the spin-lock.
35 This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
36 The method unlock() can call any thread
38 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
39 - double lock attempt encountered by same thread (deadlock)
40 - unlock by another thread
42 If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
43 thread and repeats lock attempts later
46 - @p Backoff backoff strategy. Used when spin lock is locked
48 template <typename Backoff >
52 typedef Backoff backoff_strategy ; ///< back-off strategy type
54 atomics::atomic<bool> m_spin ; ///< Spin
56 typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
60 /// Construct free (unlocked) spin-lock
61 Spinlock() CDS_NOEXCEPT
63 :m_dbgOwnerId( OS::c_NullThreadId )
66 m_spin.store( false, atomics::memory_order_relaxed );
69 /// Construct spin-lock in specified state
71 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
73 Spinlock( bool bLocked ) CDS_NOEXCEPT
75 : m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
78 m_spin.store( bLocked, atomics::memory_order_relaxed );
81 /// Dummy copy constructor
83 In theory, spin-lock cannot be copied. However, it is not practical.
84 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
85 initializes the spin to free (unlocked) state like default ctor.
87 Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
90 , m_dbgOwnerId( OS::c_NullThreadId )
94 /// Destructor. On debug time it checks whether spin-lock is free
97 assert( !m_spin.load( atomics::memory_order_relaxed ) );
100 /// Check if the spin is locked
101 bool is_locked() const CDS_NOEXCEPT
103 return m_spin.load( atomics::memory_order_relaxed );
106 /// Try to lock the object
108 Returns \p true if locking is succeeded
109 otherwise (if the spin is already locked) returns \p false
111 Debug version: deadlock can be detected
113 bool try_lock() CDS_NOEXCEPT
115 bool bCurrent = false;
116 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
120 m_dbgOwnerId = OS::getCurrentThreadId();
126 /// Try to lock the object, repeat @p nTryCount times if failed
128 Returns \p true if locking is succeeded
129 otherwise (if the spin is already locked) returns \p false
131 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT( noexcept( backoff_strategy()() ) )
133 backoff_strategy backoff;
134 while ( nTryCount-- ) {
142 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
143 void lock() CDS_NOEXCEPT(noexcept( backoff_strategy()() ))
145 backoff_strategy backoff;
148 assert( m_dbgOwnerId != OS::getCurrentThreadId() );
151 while ( !try_lock() ) {
152 while ( m_spin.load( atomics::memory_order_relaxed ) ) {
156 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
159 /// Unlock the spin-lock. Debug version: deadlock may be detected
160 void unlock() CDS_NOEXCEPT
162 assert( m_spin.load( atomics::memory_order_relaxed ) );
164 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
165 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
167 m_spin.store( false, atomics::memory_order_release );
171 /// Spin-lock implementation default for the current platform
172 typedef Spinlock<backoff::LockDefault > Spin;
174 /// Recursive spin lock.
176 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
179 - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
180 - @p Backoff backoff strategy. Used when spin lock is locked
182 template <typename Integral, class Backoff>
185 typedef OS::ThreadId thread_id ; ///< The type of thread id
188 typedef Integral integral_type ; ///< The integral type
189 typedef Backoff backoff_strategy ; ///< The backoff type
192 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
193 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
197 void take( thread_id tid ) CDS_NOEXCEPT
202 void free() CDS_NOEXCEPT
204 m_OwnerId = OS::c_NullThreadId;
207 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
209 return m_OwnerId == tid;
212 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
214 if ( is_taken( tid )) {
215 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
221 bool try_acquire() CDS_NOEXCEPT
223 integral_type nCurrent = 0;
224 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
227 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
229 backoff_strategy bkoff;
231 while ( nTryCount-- ) {
239 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
242 backoff_strategy bkoff;
243 while ( !try_acquire() ) {
244 while ( m_spin.load( atomics::memory_order_relaxed ) )
251 /// Default constructor initializes spin to free (unlocked) state
252 ReentrantSpinT() CDS_NOEXCEPT
254 , m_OwnerId( OS::c_NullThreadId )
257 /// Dummy copy constructor
259 In theory, spin-lock cannot be copied. However, it is not practical.
260 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
261 initializes the spin to free (unlocked) state like default ctor.
263 ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
265 , m_OwnerId( OS::c_NullThreadId )
268 /// Construct object for specified state
269 ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
271 , m_OwnerId( OS::c_NullThreadId )
277 /// Checks if the spin is locked
279 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
280 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
282 bool is_locked() const CDS_NOEXCEPT
284 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::getCurrentThreadId() ));
287 /// Try to lock the spin-lock (synonym for \ref try_lock)
288 bool try_lock() CDS_NOEXCEPT
290 thread_id tid = OS::getCurrentThreadId();
291 if ( try_taken_lock( tid ) )
293 if ( try_acquire()) {
300 /// Try to lock the object
301 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
303 thread_id tid = OS::getCurrentThreadId();
304 if ( try_taken_lock( tid ) )
306 if ( try_acquire( nTryCount )) {
313 /// Lock the object waits if it is busy
314 void lock() CDS_NOEXCEPT
316 thread_id tid = OS::getCurrentThreadId();
317 if ( !try_taken_lock( tid ) ) {
323 /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
324 bool unlock() CDS_NOEXCEPT
326 if ( is_taken( OS::getCurrentThreadId() ) ) {
327 integral_type n = m_spin.load( atomics::memory_order_relaxed );
329 m_spin.store( n - 1, atomics::memory_order_relaxed );
332 m_spin.store( 0, atomics::memory_order_release );
339 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
340 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
342 if ( is_taken( OS::getCurrentThreadId() ) ) {
343 assert( newOwnerId != OS::c_NullThreadId );
344 m_OwnerId = newOwnerId;
351 /// Recursive spin-lock based on atomic32u_t
352 typedef ReentrantSpinT<uint32_t, backoff::LockDefault> ReentrantSpin32;
354 /// Recursive spin-lock based on atomic64u_t type
355 typedef ReentrantSpinT<uint64_t, backoff::LockDefault> ReentrantSpin64;
357 /// Recursive spin-lock based on atomic32_t type
358 typedef ReentrantSpin32 ReentrantSpin;
360 /// The best (for the current platform) auto spin-lock
361 typedef scoped_lock<Spin> AutoSpin;
365 /// Standard (best for the current platform) spin-lock implementation
366 typedef lock::Spin SpinLock;
368 /// Standard (best for the current platform) recursive spin-lock implementation
369 typedef lock::ReentrantSpin RecursiveSpinLock;
371 /// 32bit recursive spin-lock shortcut
372 typedef lock::ReentrantSpin32 RecursiveSpinLock32;
374 /// 64bit recursive spin-lock shortcut
375 typedef lock::ReentrantSpin64 RecursiveSpinLock64;
377 /// Auto spin-lock shortcut
378 typedef lock::AutoSpin AutoSpinLock;
382 #endif // #ifndef __CDS_LOCK_SPINLOCK_H