3 #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
4 #define CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
8 #include <cds/lock/array.h>
9 #include <cds/os/thread.h>
10 #include <cds/sync/spinlock.h>
12 namespace cds { namespace intrusive { namespace striped_set {
14 /// Lock striping concurrent access policy
16 This is one of available opt::mutex_policy option type for StripedSet
18 Lock striping is very simple technique.
19 The set consists of the bucket table and the array of locks.
20 Initially, the capacity of lock array and bucket table is the same.
21 When set is resized, bucket table capacity will be doubled but lock array will not.
22 The lock \p i protects each bucket \p j, where <tt> j = i mod L </tt>,
23 where \p L - the size of lock array.
25 The policy contains an internal array of \p Lock locks.
28 - \p Lock - the type of mutex. The default is \p std::mutex. The mutex type should be default-constructible.
29 Note that a spin-lock is not so good suitable for lock striping for performance reason.
30 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
32 template <class Lock = std::mutex, class Alloc = CDS_DEFAULT_ALLOCATOR >
36 typedef Lock lock_type ; ///< lock type
37 typedef Alloc allocator_type ; ///< allocator type
39 typedef cds::lock::array< lock_type, cds::lock::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type
43 lock_array_type m_Locks;
48 class scoped_cell_lock {
49 std::unique_lock< lock_array_type > m_guard;
52 scoped_cell_lock( striping& policy, size_t nHash )
53 : m_guard( policy.m_Locks, nHash )
57 class scoped_full_lock {
58 std::unique_lock< lock_array_type > m_guard;
60 scoped_full_lock( striping& policy )
61 : m_guard( policy.m_Locks )
65 class scoped_resize_lock: public scoped_full_lock {
67 scoped_resize_lock( striping& policy )
68 : scoped_full_lock( policy )
81 size_t nLockCount ///< The size of lock array. Must be power of two.
83 : m_Locks( nLockCount, cds::lock::pow2_select_policy( nLockCount ))
86 /// Returns lock array size
88 Lock array size is unchanged during \p striped object lifetime
90 size_t lock_count() const
92 return m_Locks.size();
96 void resize( size_t /*nNewCapacity*/ )
102 /// Refinable concurrent access policy
104 This is one of available opt::mutex_policy option type for StripedSet
106 Refining is like a striping technique (see striped_set::striping)
107 but it allows growing the size of lock array when resizing the hash table.
108 So, the sizes of hash table and lock array are equal.
111 - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required.
112 The default is \p std::recursive_mutex. The mutex type should be default-constructible.
113 - \p BackOff - back-off strategy. Default is cds::backoff::yield
114 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
117 class RecursiveLock = std::recursive_mutex,
118 typename BackOff = cds::backoff::yield,
119 class Alloc = CDS_DEFAULT_ALLOCATOR>
123 typedef RecursiveLock lock_type ; ///< lock type
124 typedef BackOff back_off ; ///< back-off strategy used
125 typedef Alloc allocator_type; ///< allocator type
129 typedef cds::lock::trivial_select_policy lock_selection_policy;
131 class lock_array_type
132 : public cds::lock::array< lock_type, lock_selection_policy, allocator_type >
133 , public std::enable_shared_from_this< lock_array_type >
135 typedef cds::lock::array< lock_type, lock_selection_policy, allocator_type > lock_array_base;
137 lock_array_type( size_t nCapacity )
138 : lock_array_base( nCapacity )
141 typedef std::shared_ptr< lock_array_type > lock_array_ptr;
142 typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator;
144 typedef unsigned long long owner_t;
145 typedef cds::OS::ThreadId threadId_t;
147 typedef cds::sync::spin spinlock_type;
148 typedef std::unique_lock< spinlock_type > scoped_spinlock;
153 static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
155 lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor.
156 atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
157 atomics::atomic<size_t> m_nCapacity ; ///< Lock array capacity
158 spinlock_type m_access ; ///< access to m_arrLocks
163 struct lock_array_disposer {
164 void operator()( lock_array_type * pArr )
166 lock_array_allocator().Delete( pArr );
170 lock_array_ptr create_lock_array( size_t nCapacity )
172 m_nCapacity.store( nCapacity, atomics::memory_order_relaxed );
173 return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() );
176 lock_type& acquire( size_t nHash )
178 owner_t me = (owner_t) cds::OS::get_current_thread_id();
183 // wait while resizing
185 who = m_Owner.load( atomics::memory_order_acquire );
186 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
191 lock_array_ptr pLocks;
193 scoped_spinlock sl(m_access);
197 lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1));
200 who = m_Owner.load( atomics::memory_order_acquire );
201 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
207 lock_array_ptr acquire_all()
209 owner_t me = (owner_t) cds::OS::get_current_thread_id();
214 // wait while resizing
216 who = m_Owner.load( atomics::memory_order_acquire );
217 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
222 lock_array_ptr pLocks;
224 scoped_spinlock sl(m_access);
230 who = m_Owner.load( atomics::memory_order_acquire );
231 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
234 pLocks->unlock_all();
238 void release_all( lock_array_ptr p )
243 bool acquire_resize()
245 owner_t me = (owner_t) cds::OS::get_current_thread_id();
248 for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
250 if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
251 lock_array_ptr pOldLocks = m_arrLocks;
252 size_t const nLockCount = pOldLocks->size();
253 for ( size_t i = 0; i < nLockCount; ++i ) {
254 typename lock_array_type::lock_type& lock = pOldLocks->at(i);
256 while ( !lock.try_lock() )
268 void release_resize()
270 m_Owner.store( 0, atomics::memory_order_release );
275 class scoped_cell_lock {
276 std::unique_lock< lock_type > m_guard;
279 scoped_cell_lock( refinable& policy, size_t nHash )
280 : m_guard( policy.acquire( nHash ), std::adopt_lock_t() )
284 class scoped_full_lock {
286 lock_array_ptr m_Locks;
288 scoped_full_lock( refinable& policy )
291 m_Locks = policy.acquire_all();
295 m_Policy.release_all( m_Locks );
299 class scoped_resize_lock {
304 scoped_resize_lock( refinable& policy )
307 m_bSucceess = policy.acquire_resize();
310 ~scoped_resize_lock()
313 m_Policy.release_resize();
326 size_t nLockCount ///< Initial size of lock array. Must be power of two.
329 , m_nCapacity( nLockCount )
331 assert( cds::beans::is_power2( nLockCount ));
332 m_arrLocks = create_lock_array( nLockCount );
335 /// Returns lock array size
337 Lock array size is not a constant for \p refinable policy and can be changed when the set is resized.
339 size_t lock_count() const
341 return m_nCapacity.load( atomics::memory_order_relaxed );
344 /// Resize for new capacity
345 void resize( size_t nNewCapacity )
347 // Expect the access is locked by scoped_resize_lock!!!
348 lock_array_ptr pNewArr = create_lock_array( nNewCapacity );
349 scoped_spinlock sl(m_access);
350 m_arrLocks.swap( pNewArr );
354 }}} // namespace cds::intrusive::striped_set