3 #ifndef CDSLIB_SYNC_SPINLOCK_H
4 #define CDSLIB_SYNC_SPINLOCK_H
6 #include <cds/algo/atomic.h>
7 #include <cds/os/thread.h>
8 #include <cds/algo/backoff_strategy.h>
11 /// Synchronization primitives
15 Simple and light-weight spin-lock critical section
16 It is useful to gain access to small (short-timed) code
20 TATAS (test-and-test-and-lock)
21 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
23 No serialization performed - any of waiting threads may owns the spin-lock.
24 This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
25 The method unlock() can call any thread
27 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
28 - double lock attempt encountered by same thread (deadlock)
29 - unlock by another thread
31 If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
32 thread and repeats lock attempts later
35 - @p Backoff backoff strategy. Used when spin lock is locked
37 template <typename Backoff >
41 typedef Backoff backoff_strategy; ///< back-off strategy type
43 atomics::atomic<bool> m_spin; ///< Spin
45 typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
49 /// Construct free (unlocked) spin-lock
50 spin_lock() CDS_NOEXCEPT
52 :m_dbgOwnerId( OS::c_NullThreadId )
55 m_spin.store( false, atomics::memory_order_relaxed );
58 /// Construct spin-lock in specified state
60 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
62 spin_lock( bool bLocked ) CDS_NOEXCEPT
64 : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
67 m_spin.store( bLocked, atomics::memory_order_relaxed );
70 /// Dummy copy constructor
72 In theory, spin-lock cannot be copied. However, it is not practical.
73 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
74 initializes the spin to free (unlocked) state like default ctor.
76 spin_lock(const spin_lock<Backoff>& ) CDS_NOEXCEPT
79 , m_dbgOwnerId( cds::OS::c_NullThreadId )
83 /// Destructor. On debug time it checks whether spin-lock is free
86 assert( !m_spin.load( atomics::memory_order_relaxed ) );
89 /// Check if the spin is locked
90 bool is_locked() const CDS_NOEXCEPT
92 return m_spin.load( atomics::memory_order_relaxed );
95 /// Try to lock the object
97 Returns \p true if locking is succeeded
98 otherwise (if the spin is already locked) returns \p false
100 Debug version: deadlock can be detected
102 bool try_lock() CDS_NOEXCEPT
104 bool bCurrent = false;
105 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
109 m_dbgOwnerId = OS::get_current_thread_id();
115 /// Try to lock the object, repeat @p nTryCount times if failed
117 Returns \p true if locking is succeeded
118 otherwise (if the spin is already locked) returns \p false
120 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
122 backoff_strategy backoff;
123 while ( nTryCount-- ) {
131 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
132 void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
134 backoff_strategy backoff;
137 assert( m_dbgOwnerId != OS::get_current_thread_id() );
140 while ( !try_lock() ) {
141 while ( m_spin.load( atomics::memory_order_relaxed ) ) {
145 assert( m_dbgOwnerId == OS::get_current_thread_id() );
148 /// Unlock the spin-lock. Debug version: deadlock may be detected
149 void unlock() CDS_NOEXCEPT
151 assert( m_spin.load( atomics::memory_order_relaxed ) );
153 assert( m_dbgOwnerId == OS::get_current_thread_id() );
154 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
156 m_spin.store( false, atomics::memory_order_release );
160 /// Spin-lock implementation default for the current platform
161 typedef spin_lock<backoff::LockDefault > spin;
163 /// Recursive spin lock.
165 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
168 - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
169 - @p Backoff backoff strategy. Used when spin lock is locked
171 template <typename Integral, class Backoff>
172 class reentrant_spin_lock
174 typedef OS::ThreadId thread_id ; ///< The type of thread id
177 typedef Integral integral_type ; ///< The integral type
178 typedef Backoff backoff_strategy ; ///< The backoff type
181 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
182 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
186 void take( thread_id tid ) CDS_NOEXCEPT
191 void free() CDS_NOEXCEPT
193 m_OwnerId = OS::c_NullThreadId;
196 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
198 return m_OwnerId == tid;
201 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
203 if ( is_taken( tid )) {
204 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
210 bool try_acquire() CDS_NOEXCEPT
212 integral_type nCurrent = 0;
213 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
216 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
218 backoff_strategy bkoff;
220 while ( nTryCount-- ) {
228 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
231 backoff_strategy bkoff;
232 while ( !try_acquire() ) {
233 while ( m_spin.load( atomics::memory_order_relaxed ) )
240 /// Default constructor initializes spin to free (unlocked) state
241 reentrant_spin_lock() CDS_NOEXCEPT
243 , m_OwnerId( OS::c_NullThreadId )
246 /// Dummy copy constructor
248 In theory, spin-lock cannot be copied. However, it is not practical.
249 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
250 initializes the spin to free (unlocked) state like default ctor.
252 reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
254 , m_OwnerId( OS::c_NullThreadId )
257 /// Construct object for specified state
258 reentrant_spin_lock( bool bLocked ) CDS_NOEXCEPT
260 , m_OwnerId( OS::c_NullThreadId )
266 /// Checks if the spin is locked
268 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
269 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
271 bool is_locked() const CDS_NOEXCEPT
273 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
276 /// Try to lock the spin-lock (synonym for \ref try_lock)
277 bool try_lock() CDS_NOEXCEPT
279 thread_id tid = OS::get_current_thread_id();
280 if ( try_taken_lock( tid ) )
282 if ( try_acquire()) {
289 /// Try to lock the object
290 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
292 thread_id tid = OS::get_current_thread_id();
293 if ( try_taken_lock( tid ) )
295 if ( try_acquire( nTryCount )) {
302 /// Lock the object waits if it is busy
303 void lock() CDS_NOEXCEPT
305 thread_id tid = OS::get_current_thread_id();
306 if ( !try_taken_lock( tid ) ) {
312 /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
313 bool unlock() CDS_NOEXCEPT
315 if ( is_taken( OS::get_current_thread_id() ) ) {
316 integral_type n = m_spin.load( atomics::memory_order_relaxed );
318 m_spin.store( n - 1, atomics::memory_order_relaxed );
321 m_spin.store( 0, atomics::memory_order_release );
328 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
329 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
331 if ( is_taken( OS::get_current_thread_id() ) ) {
332 assert( newOwnerId != OS::c_NullThreadId );
333 m_OwnerId = newOwnerId;
340 /// Recursive 32bit spin-lock
341 typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
343 /// Default recursive spin-lock
344 typedef reentrant_spin32 reentrant_spin;
346 /// Recursive 64bit spin-lock
347 typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
351 #endif // #ifndef CDSLIB_SYNC_SPINLOCK_H