2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
32 #define CDSLIB_SYNC_POOL_MONITOR_H
34 #include <cds/sync/monitor.h>
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/backoff_strategy.h>
37 #include <cds/opt/options.h> // opt::none
39 namespace cds { namespace sync {
41 /// \p pool_monitor traits
42 struct pool_monitor_traits {
44 /// Dummy internal statistics if \p Stat template parameter is \p false
48 void onLock() const {}
49 void onUnlock() const {}
50 void onLockContention() const {}
51 void onUnlockContention() const {}
52 void onLockAllocation() const {}
53 void onLockDeallocation() const {}
57 /// Monitor's internal statistics, used if \p Stat template parameter is \p true
58 template <typename Counter = cds::atomicity::event_counter >
61 typedef Counter event_counter; ///< measure type
63 event_counter m_nLockCount; ///< Number of monitor \p lock() call
64 event_counter m_nUnlockCount; ///< Number of monitor \p unlock() call
65 event_counter m_nMaxLocked; ///< Max number of simuntaneously locked mutexes
66 event_counter m_nLockContention; ///< Number of \p lock() contenton
67 event_counter m_nUnlockContention; ///< Number of \p unlock() contention
68 event_counter m_nLockAllocation; ///< Number of the lock allocation from the pool
69 event_counter m_nLockDeallocation; ///< Number of the lock deallocation
70 event_counter m_nMaxAllocated; ///< Max number of sumultaneously allocated mutexes
76 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
77 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
78 m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
80 void onUnlock() { ++m_nUnlockCount; }
81 void onLockContention() { ++m_nLockContention; }
82 void onUnlockContention() { ++m_nUnlockContention;}
83 void onLockAllocation()
86 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
87 if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ))
88 m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
90 void onLockDeallocation() { ++m_nLockDeallocation;}
96 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
98 The monitor is intended for reducing the number of system mutexes for
99 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
100 only when container's node should be locked. Lifetime of node's mutex is managed by
101 reference counter. When the reference counter to node's mutex becomes zero,
102 the mutex is given back to the pool.
104 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
105 However, node locking/unlocking is performed beyond the spin-lock.
108 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
109 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
110 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::yield
111 - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
115 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
116 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
119 template <class LockPool, typename BackOff = cds::backoff::yield, bool Stat = false >
123 typedef LockPool pool_type; ///< Pool type
124 typedef typename pool_type::value_type lock_type; ///< node lock type
125 typedef typename std::conditional<
126 std::is_same< BackOff, cds::opt::none >::value,
129 >::type back_off; ///< back-off strategy for spinning
130 typedef uint32_t refspin_type; ///< Reference counter + spin-lock bit
132 /// Internal statistics
133 typedef typename std::conditional<
135 typename pool_monitor_traits::stat<>,
136 typename pool_monitor_traits::empty_stat
137 >::type internal_stat;
139 /// Pool's default capacity
140 static CDS_CONSTEXPR size_t const c_nDefaultCapacity = 256;
144 static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
145 static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
146 mutable pool_type m_Pool;
147 mutable internal_stat m_Stat;
153 struct node_injection
155 mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
156 mutable lock_type * m_pLock; ///< Node-level lock
166 assert( m_pLock == nullptr );
167 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
170 bool check_free() const
172 return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_acquire ) == 0;
177 /// Initializes the pool of 256 preallocated mutexes
179 : m_Pool( c_nDefaultCapacity )
182 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
183 pool_monitor( size_t nPoolCapacity )
184 : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
187 /// Makes exclusive access to node \p p
188 template <typename Node>
189 void lock( Node const& p ) const
195 // try lock spin and increment reference counter
196 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
197 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
198 atomics::memory_order_acquire, atomics::memory_order_relaxed ))
202 m_Stat.onLockContention();
205 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
206 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
210 // If the node has no lock, allocate it from pool
211 pLock = p.m_SyncMonitorInjection.m_pLock;
214 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
215 assert( pLock != nullptr );
216 m_Stat.onLockAllocation();
220 p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
226 /// Unlocks the node \p p
227 template <typename Node>
228 void unlock( Node const& p ) const
230 lock_type * pLock = nullptr;
234 assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
235 p.m_SyncMonitorInjection.m_pLock->unlock();
238 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
239 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
240 atomics::memory_order_acquire, atomics::memory_order_relaxed ))
244 m_Stat.onUnlockContention();
247 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
248 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
253 // If we are the unique owner - deallocate lock
254 if ( cur == c_nRefIncrement ) {
255 pLock = p.m_SyncMonitorInjection.m_pLock;
256 p.m_SyncMonitorInjection.m_pLock = nullptr;
260 p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
264 m_Pool.deallocate( pLock, 1 );
265 m_Stat.onLockDeallocation();
270 template <typename Node>
271 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
273 /// Returns the reference to internal statistics
275 If class' template argument \p Stat is \p false,
276 the function returns \ref pool_monitor_traits::empty_stat "dummy statistics".
277 Otherwise, it returns the reference to monitor's internal statistics
278 of type \ref pool_monitor_traits::stat.
280 internal_stat const& statistics() const
286 }} // namespace cds::sync
288 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H