2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_SYNC_SPINLOCK_H
32 #define CDSLIB_SYNC_SPINLOCK_H
34 #include <cds/algo/atomic.h>
35 #include <cds/os/thread.h>
36 #include <cds/algo/backoff_strategy.h>
39 /// Synchronization primitives
43 Simple and light-weight spin-lock critical section
44 It is useful to gain access to small (short-timed) code
48 TATAS (test-and-test-and-lock)
49 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
51 No serialization performed - any of waiting threads may owns the spin-lock.
52 This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
53 The method unlock() can call any thread
55 DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
56 - double lock attempt encountered by same thread (deadlock)
57 - unlock by another thread
59 If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
60 thread and repeats lock attempts later
63 - @p Backoff backoff strategy. Used when spin lock is locked
65 template <typename Backoff >
69 typedef Backoff backoff_strategy; ///< back-off strategy type
71 atomics::atomic<bool> m_spin; ///< Spin
73 typename OS::ThreadId m_dbgOwnerId; ///< Owner thread id (only for debug mode)
77 /// Construct free (unlocked) spin-lock
78 spin_lock() CDS_NOEXCEPT
80 :m_dbgOwnerId( OS::c_NullThreadId )
83 m_spin.store( false, atomics::memory_order_relaxed );
86 /// Construct spin-lock in specified state
88 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
90 explicit spin_lock( bool bLocked ) CDS_NOEXCEPT
92 : m_dbgOwnerId( bLocked ? cds::OS::get_current_thread_id() : cds::OS::c_NullThreadId )
95 m_spin.store( bLocked, atomics::memory_order_relaxed );
98 /// Dummy copy constructor
100 In theory, spin-lock cannot be copied. However, it is not practical.
101 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
102 initializes the spin to free (unlocked) state like default ctor.
104 spin_lock(const spin_lock<Backoff>& ) CDS_NOEXCEPT
107 , m_dbgOwnerId( cds::OS::c_NullThreadId )
111 /// Destructor. On debug time it checks whether spin-lock is free
114 assert( !m_spin.load( atomics::memory_order_relaxed ) );
117 /// Check if the spin is locked
118 bool is_locked() const CDS_NOEXCEPT
120 return m_spin.load( atomics::memory_order_relaxed );
123 /// Try to lock the object
125 Returns \p true if locking is succeeded
126 otherwise (if the spin is already locked) returns \p false
128 Debug version: deadlock can be detected
130 bool try_lock() CDS_NOEXCEPT
132 bool bCurrent = false;
133 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
137 m_dbgOwnerId = OS::get_current_thread_id();
143 /// Try to lock the object, repeat @p nTryCount times if failed
145 Returns \p true if locking is succeeded
146 otherwise (if the spin is already locked) returns \p false
148 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ) )
150 backoff_strategy backoff;
151 while ( nTryCount-- ) {
159 /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
160 void lock() CDS_NOEXCEPT_(noexcept( backoff_strategy()() ))
162 backoff_strategy backoff;
165 assert( m_dbgOwnerId != OS::get_current_thread_id() );
168 while ( !try_lock() ) {
169 while ( m_spin.load( atomics::memory_order_relaxed ) ) {
173 assert( m_dbgOwnerId == OS::get_current_thread_id() );
176 /// Unlock the spin-lock. Debug version: deadlock may be detected
177 void unlock() CDS_NOEXCEPT
179 assert( m_spin.load( atomics::memory_order_relaxed ) );
181 assert( m_dbgOwnerId == OS::get_current_thread_id() );
182 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
184 m_spin.store( false, atomics::memory_order_release );
188 /// Spin-lock implementation default for the current platform
189 typedef spin_lock<backoff::LockDefault > spin;
191 /// Recursive spin lock.
193 Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
196 - @p Integral one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
197 - @p Backoff backoff strategy. Used when spin lock is locked
199 template <typename Integral, class Backoff>
200 class reentrant_spin_lock
202 typedef OS::ThreadId thread_id ; ///< The type of thread id
205 typedef Integral integral_type ; ///< The integral type
206 typedef Backoff backoff_strategy ; ///< The backoff type
209 atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
210 thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
214 void take( thread_id tid ) CDS_NOEXCEPT
219 void free() CDS_NOEXCEPT
221 m_OwnerId = OS::c_NullThreadId;
224 bool is_taken( thread_id tid ) const CDS_NOEXCEPT
226 return m_OwnerId == tid;
229 bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
231 if ( is_taken( tid )) {
232 m_spin.fetch_add( 1, atomics::memory_order_relaxed );
238 bool try_acquire() CDS_NOEXCEPT
240 integral_type nCurrent = 0;
241 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
244 bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
246 backoff_strategy bkoff;
248 while ( nTryCount-- ) {
256 void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
259 backoff_strategy bkoff;
260 while ( !try_acquire() ) {
261 while ( m_spin.load( atomics::memory_order_relaxed ) )
268 /// Default constructor initializes spin to free (unlocked) state
269 reentrant_spin_lock() CDS_NOEXCEPT
271 , m_OwnerId( OS::c_NullThreadId )
274 /// Dummy copy constructor
276 In theory, spin-lock cannot be copied. However, it is not practical.
277 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
278 initializes the spin to free (unlocked) state like default ctor.
280 reentrant_spin_lock( const reentrant_spin_lock<Integral, Backoff>& ) CDS_NOEXCEPT
282 , m_OwnerId( OS::c_NullThreadId )
285 /// Construct object for specified state
286 explicit reentrant_spin_lock( bool bLocked )
288 , m_OwnerId( OS::c_NullThreadId )
294 /// Checks if the spin is locked
296 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
297 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
299 bool is_locked() const CDS_NOEXCEPT
301 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::get_current_thread_id() ));
304 /// Try to lock the spin-lock (synonym for \ref try_lock)
305 bool try_lock() CDS_NOEXCEPT
307 thread_id tid = OS::get_current_thread_id();
308 if ( try_taken_lock( tid ) )
310 if ( try_acquire()) {
317 /// Try to lock the object
318 bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().try_acquire( nTryCount )))
320 thread_id tid = OS::get_current_thread_id();
321 if ( try_taken_lock( tid ) )
323 if ( try_acquire( nTryCount )) {
330 /// Lock the object waits if it is busy
331 void lock() CDS_NOEXCEPT_( noexcept( std::declval<reentrant_spin_lock>().acquire()))
333 thread_id tid = OS::get_current_thread_id();
334 if ( !try_taken_lock( tid ) ) {
340 /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
341 bool unlock() CDS_NOEXCEPT
343 if ( is_taken( OS::get_current_thread_id() ) ) {
344 integral_type n = m_spin.load( atomics::memory_order_relaxed );
346 m_spin.store( n - 1, atomics::memory_order_relaxed );
349 m_spin.store( 0, atomics::memory_order_release );
356 /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
357 bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
359 if ( is_taken( OS::get_current_thread_id() ) ) {
360 assert( newOwnerId != OS::c_NullThreadId );
361 m_OwnerId = newOwnerId;
368 /// Recursive 32bit spin-lock
369 typedef reentrant_spin_lock<uint32_t, backoff::LockDefault> reentrant_spin32;
371 /// Default recursive spin-lock
372 typedef reentrant_spin32 reentrant_spin;
374 /// Recursive 64bit spin-lock
375 typedef reentrant_spin_lock<uint64_t, backoff::LockDefault> reentrant_spin64;
379 #endif // #ifndef CDSLIB_SYNC_SPINLOCK_H