3 #ifndef CDSLIB_SYNC_POOL_MONITOR_H
4 #define CDSLIB_SYNC_POOL_MONITOR_H
6 #include <cds/sync/monitor.h>
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/backoff_strategy.h>
9 #include <cds/opt/options.h> // opt::none
11 namespace cds { namespace sync {
13 /// \p pool_monitor traits
14 struct pool_monitor_traits {
16 /// Dummy internal statistics if \p Stat template parameter is \p false
20 void onLock() const {}
21 void onUnlock() const {}
22 void onLockContention() const {}
23 void onUnlockContention() const {}
24 void onLockAllocation() const {}
25 void onLockDeallocation() const {}
29 /// Monitor's internal statistics, used if \p Stat template parameter is \p true
30 template <typename Counter = cds::atomicity::event_counter >
33 typedef Counter event_counter; ///< measure type
35 event_counter m_nLockCount; ///< Number of monitor \p lock() call
36 event_counter m_nUnlockCount; ///< Number of monitor \p unlock() call
37 event_counter m_nMaxLocked; ///< Max number of simuntaneously locked mutexes
38 event_counter m_nLockContention; ///< Number of \p lock() contenton
39 event_counter m_nUnlockContention; ///< Number of \p unlock() contention
40 event_counter m_nLockAllocation; ///< Number of the lock allocation from the pool
41 event_counter m_nLockDeallocation; ///< Number of the lock deallocation
42 event_counter m_nMaxAllocated; ///< Max number of sumultaneously allocated mutexes
48 int nDiff = static_cast<int>( m_nLockCount.get() - m_nUnlockCount.get());
49 if ( nDiff > 0 && m_nMaxLocked.get() < static_cast<typename event_counter::value_type>( nDiff ))
50 m_nMaxLocked = static_cast<typename event_counter::value_type>( nDiff );
52 void onUnlock() { ++m_nUnlockCount; }
53 void onLockContention() { ++m_nLockContention; }
54 void onUnlockContention() { ++m_nUnlockContention;}
55 void onLockAllocation()
58 int nDiff = static_cast<int>( m_nLockAllocation.get() - m_nLockDeallocation.get());
59 if ( nDiff > 0 && m_nMaxAllocated.get() < static_cast<typename event_counter::value_type>( nDiff ) )
60 m_nMaxAllocated = static_cast<typename event_counter::value_type>( nDiff );
62 void onLockDeallocation() { ++m_nLockDeallocation;}
68 /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
70 The monitor is intended for reducing the number of system mutexes for
71 huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
72 only when container's node should be locked. Lifetime of node's mutex is managed by
73 reference counter. When the reference counter to node's mutex becomes zero,
74 the mutex is given back to the pool.
76 The monitor is blocked: the access to node's mutex is performed under the spin-lock.
77 However, node locking/unlocking is performed beyond the spin-lock.
80 - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
81 the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
82 - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::yield
83 - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
87 typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
88 typedef cds::sync::pool_monitor< pool_type > sync_monitor;
91 template <class LockPool, typename BackOff = cds::backoff::yield, bool Stat = false >
95 typedef LockPool pool_type; ///< Pool type
96 typedef typename pool_type::value_type lock_type; ///< node lock type
97 typedef typename std::conditional<
98 std::is_same< BackOff, cds::opt::none >::value,
101 >::type back_off; ///< back-off strategy for spinning
102 typedef uint32_t refspin_type; ///< Reference counter + spin-lock bit
104 /// Internal statistics
105 typedef typename std::conditional<
107 typename pool_monitor_traits::stat<>,
108 typename pool_monitor_traits::empty_stat
109 >::type internal_stat;
111 /// Pool's default capacity
112 static CDS_CONSTEXPR size_t const c_nDefaultCapacity = 256;
116 static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
117 static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
118 mutable pool_type m_Pool;
119 mutable internal_stat m_Stat;
125 struct node_injection
127 mutable atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
128 mutable lock_type * m_pLock; ///< Node-level lock
138 assert( m_pLock == nullptr );
139 assert( m_RefSpin.load( atomics::memory_order_relaxed ) == 0 );
142 bool check_free() const
144 return m_pLock == nullptr && m_RefSpin.load( atomics::memory_order_acquire ) == 0;
149 /// Initializes the pool of 256 preallocated mutexes
151 : m_Pool( c_nDefaultCapacity )
154 /// Initializes the pool of \p nPoolCapacity preallocated mutexes
155 pool_monitor( size_t nPoolCapacity )
156 : m_Pool( nPoolCapacity ? nPoolCapacity : c_nDefaultCapacity )
159 /// Makes exclusive access to node \p p
160 template <typename Node>
161 void lock( Node const& p ) const
167 // try lock spin and increment reference counter
168 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
169 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
170 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
174 m_Stat.onLockContention();
177 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
178 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
182 // If the node has no lock, allocate it from pool
183 pLock = p.m_SyncMonitorInjection.m_pLock;
186 pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
187 m_Stat.onLockAllocation();
191 p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
197 /// Unlocks the node \p p
198 template <typename Node>
199 void unlock( Node const& p ) const
201 lock_type * pLock = nullptr;
205 assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
206 p.m_SyncMonitorInjection.m_pLock->unlock();
209 refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
210 if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
211 atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
215 m_Stat.onUnlockContention();
218 } while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur | c_nSpinBit,
219 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
224 // If we are the unique owner - deallocate lock
225 if ( cur == c_nRefIncrement ) {
226 pLock = p.m_SyncMonitorInjection.m_pLock;
227 p.m_SyncMonitorInjection.m_pLock = nullptr;
231 p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
235 m_Pool.deallocate( pLock, 1 );
236 m_Stat.onLockDeallocation();
241 template <typename Node>
242 using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
244 /// Returns the reference to internal statistics
246 If class' template argument \p Stat is \p false,
247 the function returns \ref pool_monitor_traits::empty_stat "dummy statistics".
248 Otherwise, it returns the reference to monitor's internal statistics
249 of type \ref pool_monitor_traits::stat.
251 internal_stat const& statistics() const
257 }} // namespace cds::sync
259 #endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H