Replace cds::lock::scoped_lock with std::unique_lock, remove cds/lock/scoped_lock.h
[libcds.git] / cds / lock / spinlock.h
1 //$$CDS-header$$-2
2
3 #ifndef __CDS_LOCK_SPINLOCK_H
4 #define __CDS_LOCK_SPINLOCK_H
5
6 /*
7     Defines spin-lock primitives
8     Editions:
9         2012.01.23  1.1.0 khizmax     Refactoring: use C++11 atomics
10         2010.01.22  0.6.0 khizmax     Refactoring: use cds::atomic namespace
11                                       Explicit memory ordering specification (atomic::memory_order_xxx)
12         2006              khizmax     Created
13 */
14
15 #include <cds/cxx11_atomic.h>
16 #include <cds/os/thread.h>
17 #include <cds/algo/backoff_strategy.h>
18
19 #include <cds/details/noncopyable.h>
20
21 namespace cds {
22     /// Synchronization primitives
23     namespace lock {
24         /// Spin lock.
25         /**
26             Simple and light-weight spin-lock critical section
27             It is useful to gain access to small (short-timed) code
28
29             Algorithm:
30
31                 TATAS (test-and-test-and-lock)
32                 [1984] L. Rudolph, Z. Segall. Dynamic Decentralized Cache Schemes for MIMD Parallel Processors.
33
34             No serialization performed - any of waiting threads may owns the spin-lock.
35             This spin-lock is NOT recursive: the thread owned the lock cannot call lock() method withod deadlock.
36             The method unlock() can call any thread
37
38             DEBUG version: The spinlock stores owner thead id. Assertion is raised when:
39                 - double lock attempt encountered by same thread (deadlock)
40                 - unlock by another thread
41
42             If spin-lock is locked the Backoff algorithm is called. Predefined backoff::LockDefault class yields current
43             thread and repeats lock attempts later
44
45             Template parameters:
46                 - @p Backoff    backoff strategy. Used when spin lock is locked
47         */
48         template <typename Backoff >
49         class Spinlock
50         {
51         public:
52             typedef        Backoff      backoff_strategy    ;        ///< back-off strategy type
53         private:
54             atomics::atomic<bool>    m_spin  ;       ///< Spin
55 #    ifdef CDS_DEBUG
56             typename OS::ThreadId    m_dbgOwnerId        ;       ///< Owner thread id (only for debug mode)
57 #    endif
58
59         public:
60             /// Construct free (unlocked) spin-lock
61             Spinlock() CDS_NOEXCEPT
62 #    ifdef CDS_DEBUG
63                 :m_dbgOwnerId( OS::c_NullThreadId )
64 #    endif
65             {
66                 m_spin.store( false, atomics::memory_order_relaxed );
67             }
68
69             /// Construct spin-lock in specified state
70             /**
71                 In debug mode: if \p bLocked = true then spin-lock is made owned by current thread
72             */
73             Spinlock( bool bLocked ) CDS_NOEXCEPT
74 #    ifdef CDS_DEBUG
75                 : m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
76 #    endif
77             {
78                 m_spin.store( bLocked, atomics::memory_order_relaxed );
79             }
80
81             /// Dummy copy constructor
82             /**
83                 In theory, spin-lock cannot be copied. However, it is not practical.
84                 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
85                 initializes the spin to free (unlocked) state like default ctor.
86             */
87             Spinlock(const Spinlock<Backoff>& ) CDS_NOEXCEPT
88                 : m_spin( false )
89 #   ifdef CDS_DEBUG
90                 , m_dbgOwnerId( OS::c_NullThreadId )
91 #   endif
92             {}
93
94             /// Destructor. On debug time it checks whether spin-lock is free
95             ~Spinlock()
96             {
97                 assert( !m_spin.load( atomics::memory_order_relaxed ) );
98             }
99
100             /// Check if the spin is locked
101             bool is_locked() const CDS_NOEXCEPT
102             {
103                 return m_spin.load( atomics::memory_order_relaxed );
104             }
105
106             /// Try to lock the object
107             /**
108                 Returns \p true if locking is succeeded
109                 otherwise (if the spin is already locked) returns \p false
110
111                 Debug version: deadlock can be detected
112             */
113             bool try_lock() CDS_NOEXCEPT
114             {
115                 bool bCurrent = false;
116                 m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
117
118                 CDS_DEBUG_ONLY(
119                     if ( !bCurrent ) {
120                         m_dbgOwnerId = OS::getCurrentThreadId();
121                     }
122                 )
123                 return !bCurrent;
124             }
125
126             /// Try to lock the object, repeat @p nTryCount times if failed
127             /**
128                 Returns \p true if locking is succeeded
129                 otherwise (if the spin is already locked) returns \p false
130             */
131             bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT( noexcept( backoff_strategy()() ) )
132             {
133                 backoff_strategy backoff;
134                 while ( nTryCount-- ) {
135                     if ( try_lock() )
136                         return true;
137                     backoff();
138                 }
139                 return false;
140             }
141
142             /// Lock the spin-lock. Waits infinitely while spin-lock is locked. Debug version: deadlock may be detected
143             void lock() CDS_NOEXCEPT(noexcept( backoff_strategy()() ))
144             {
145                 backoff_strategy backoff;
146
147                 // Deadlock detected
148                 assert( m_dbgOwnerId != OS::getCurrentThreadId() );
149
150                 // TATAS algorithm
151                 while ( !try_lock() ) {
152                     while ( m_spin.load( atomics::memory_order_relaxed ) ) {
153                         backoff();
154                     }
155                 }
156                 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
157             }
158
159             /// Unlock the spin-lock. Debug version: deadlock may be detected
160             void unlock() CDS_NOEXCEPT
161             {
162                 assert( m_spin.load( atomics::memory_order_relaxed ) );
163
164                 assert( m_dbgOwnerId == OS::getCurrentThreadId() );
165                 CDS_DEBUG_ONLY( m_dbgOwnerId = OS::c_NullThreadId; )
166
167                 m_spin.store( false, atomics::memory_order_release );
168             }
169         };
170
171         /// Spin-lock implementation default for the current platform
172         typedef Spinlock<backoff::LockDefault >                 Spin;
173
174         /// Recursive spin lock.
175         /**
176             Allows recursive calls: the owner thread may recursive enter to critical section guarded by the spin-lock.
177
178             Template parameters:
179                 - @p Integral       one of integral atomic type: <tt>unsigned int</tt>, <tt>int</tt>, and others
180                 - @p Backoff        backoff strategy. Used when spin lock is locked
181         */
182         template <typename Integral, class Backoff>
183         class ReentrantSpinT
184         {
185             typedef OS::ThreadId    thread_id    ;        ///< The type of thread id
186
187         public:
188             typedef Integral        integral_type       ; ///< The integral type
189             typedef Backoff         backoff_strategy    ; ///< The backoff type
190
191         private:
192             atomics::atomic<integral_type>   m_spin      ; ///< spin-lock atomic
193             thread_id                        m_OwnerId   ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
194
195         private:
196             //@cond
197             void take( thread_id tid ) CDS_NOEXCEPT
198             {
199                 m_OwnerId = tid;
200             }
201
202             void free() CDS_NOEXCEPT
203             {
204                 m_OwnerId = OS::c_NullThreadId;
205             }
206
207             bool is_taken( thread_id tid ) const CDS_NOEXCEPT
208             {
209                 return m_OwnerId == tid;
210             }
211
212             bool try_taken_lock( thread_id tid ) CDS_NOEXCEPT
213             {
214                 if ( is_taken( tid )) {
215                     m_spin.fetch_add( 1, atomics::memory_order_relaxed );
216                     return true;
217                 }
218                 return false;
219             }
220
221             bool try_acquire() CDS_NOEXCEPT
222             {
223                 integral_type nCurrent = 0;
224                 return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
225             }
226
227             bool try_acquire( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
228             {
229                 backoff_strategy bkoff;
230
231                 while ( nTryCount-- ) {
232                     if ( try_acquire() )
233                         return true;
234                     bkoff();
235                 }
236                 return false;
237             }
238
239             void acquire() CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
240             {
241                 // TATAS algorithm
242                 backoff_strategy bkoff;
243                 while ( !try_acquire() ) {
244                     while ( m_spin.load( atomics::memory_order_relaxed ) )
245                         bkoff();
246                 }
247             }
248             //@endcond
249
250         public:
251             /// Default constructor initializes spin to free (unlocked) state
252             ReentrantSpinT() CDS_NOEXCEPT
253                 : m_spin(0)
254                 , m_OwnerId( OS::c_NullThreadId )
255             {}
256
257             /// Dummy copy constructor
258             /**
259                 In theory, spin-lock cannot be copied. However, it is not practical.
260                 Therefore, we provide dummy copy constructor that do no copy in fact. The ctor
261                 initializes the spin to free (unlocked) state like default ctor.
262             */
263             ReentrantSpinT(const ReentrantSpinT<Integral, Backoff>& ) CDS_NOEXCEPT
264                 : m_spin(0)
265                 , m_OwnerId( OS::c_NullThreadId )
266             {}
267
268             /// Construct object for specified state
269             ReentrantSpinT(bool bLocked) CDS_NOEXCEPT
270                 : m_spin(0)
271                 , m_OwnerId( OS::c_NullThreadId )
272             {
273                 if ( bLocked )
274                     lock();
275             }
276
277             /// Checks if the spin is locked
278             /**
279                 The spin is locked if lock count > 0 and the current thread is not an owner of the lock.
280                 Otherwise (i.e. lock count == 0 or the curren thread owns the spin) the spin is unlocked.
281             */
282             bool is_locked() const CDS_NOEXCEPT
283             {
284                 return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || is_taken( cds::OS::getCurrentThreadId() ));
285             }
286
287             /// Try to lock the spin-lock (synonym for \ref try_lock)
288             bool try_lock() CDS_NOEXCEPT
289             {
290                 thread_id tid = OS::getCurrentThreadId();
291                 if ( try_taken_lock( tid ) )
292                     return true;
293                 if ( try_acquire()) {
294                     take( tid );
295                     return true;
296                 }
297                 return false;
298             }
299
300             /// Try to lock the object
301             bool try_lock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( try_acquire( nTryCount ) ) )
302             {
303                 thread_id tid = OS::getCurrentThreadId();
304                 if ( try_taken_lock( tid ) )
305                     return true;
306                 if ( try_acquire( nTryCount )) {
307                     take( tid );
308                     return true;
309                 }
310                 return false;
311             }
312
313             /// Lock the object waits if it is busy
314             void lock() CDS_NOEXCEPT
315             {
316                 thread_id tid = OS::getCurrentThreadId();
317                 if ( !try_taken_lock( tid ) ) {
318                     acquire();
319                     take( tid );
320                 }
321             }
322
323             /// Unlock the spin-lock. Return @p true if the current thread is owner of spin-lock @p false otherwise
324             bool unlock() CDS_NOEXCEPT
325             {
326                 if ( is_taken( OS::getCurrentThreadId() ) ) {
327                     integral_type n = m_spin.load( atomics::memory_order_relaxed );
328                     if ( n > 1 )
329                         m_spin.store( n - 1, atomics::memory_order_relaxed );
330                     else {
331                         free();
332                         m_spin.store( 0, atomics::memory_order_release );
333                     }
334                     return true;
335                 }
336                 return false;
337             }
338
339             /// Change the owner of locked spin-lock. May be called by thread that is owner of the spin-lock
340             bool change_owner( OS::ThreadId newOwnerId ) CDS_NOEXCEPT
341             {
342                 if ( is_taken( OS::getCurrentThreadId() ) ) {
343                     assert( newOwnerId != OS::c_NullThreadId );
344                     m_OwnerId = newOwnerId;
345                     return true;
346                 }
347                 return false;
348             }
349         };
350
351         /// Recursive spin-lock based on atomic32u_t
352         typedef ReentrantSpinT<uint32_t, backoff::LockDefault>   ReentrantSpin32;
353
354         /// Recursive spin-lock based on atomic64u_t type
355         typedef ReentrantSpinT<uint64_t, backoff::LockDefault>   ReentrantSpin64;
356
357         /// Recursive spin-lock based on atomic32_t type
358         typedef ReentrantSpin32                                     ReentrantSpin;
359
360         /// The best (for the current platform) auto spin-lock
361         typedef scoped_lock<Spin>   AutoSpin;
362
363     }    // namespace lock
364
365     /// Standard (best for the current platform) spin-lock implementation
366     typedef lock::Spin              SpinLock;
367
368     /// Standard (best for the current platform) recursive spin-lock implementation
369     typedef lock::ReentrantSpin     RecursiveSpinLock;
370
371     /// 32bit recursive spin-lock shortcut
372     typedef lock::ReentrantSpin32   RecursiveSpinLock32;
373
374     /// 64bit recursive spin-lock shortcut
375     typedef lock::ReentrantSpin64   RecursiveSpinLock64;
376
377     /// Auto spin-lock shortcut
378     typedef lock::AutoSpin          AutoSpinLock;
379
380 } // namespace cds
381
382 #endif  // #ifndef __CDS_LOCK_SPINLOCK_H