3 #ifndef __CDS_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
4 #define __CDS_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
6 #include <cds/lock/array.h>
7 #include <cds/os/thread.h>
8 #include <cds/details/std/memory.h>
9 #include <cds/lock/spinlock.h>
11 #include <cds/details/std/mutex.h>
12 //#include <boost/thread/mutex.hpp>
13 //#include <boost/thread/recursive_mutex.hpp>
16 namespace cds { namespace intrusive { namespace striped_set {
18 /// Lock striping concurrent access policy
20 This is one of available opt::mutex_policy option type for StripedSet
22 Lock striping is very simple technique.
23 The set consists of the bucket table and the array of locks.
24 Initially, the capacity of lock array and bucket table is the same.
25 When set is resized, bucket table capacity will be doubled but lock array will not.
26 The lock \p i protects each bucket \p j, where <tt> j = i mod L </tt>,
27 where \p L - the size of lock array.
29 The policy contains an internal array of \p Lock locks.
32 - \p Lock - the type of mutex. The default is \p cds_std::mutex. The mutex type should be default-constructible.
33 Note that a spin-lock is not so good suitable for lock striping for performance reason.
34 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
36 template <class Lock = cds_std::mutex, class Alloc = CDS_DEFAULT_ALLOCATOR >
40 typedef Lock lock_type ; ///< lock type
41 typedef Alloc allocator_type ; ///< allocator type
43 typedef cds::lock::array< lock_type, cds::lock::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type
47 lock_array_type m_Locks;
52 class scoped_cell_lock {
53 cds::lock::scoped_lock< lock_array_type > m_guard;
56 scoped_cell_lock( striping& policy, size_t nHash )
57 : m_guard( policy.m_Locks, nHash )
61 class scoped_full_lock {
62 cds::lock::scoped_lock< lock_array_type > m_guard;
64 scoped_full_lock( striping& policy )
65 : m_guard( policy.m_Locks )
69 class scoped_resize_lock: public scoped_full_lock {
71 scoped_resize_lock( striping& policy )
72 : scoped_full_lock( policy )
85 size_t nLockCount ///< The size of lock array. Must be power of two.
87 : m_Locks( nLockCount, cds::lock::pow2_select_policy( nLockCount ))
90 /// Returns lock array size
92 Lock array size is unchanged during \p striped object lifetime
94 size_t lock_count() const
96 return m_Locks.size();
100 void resize( size_t /*nNewCapacity*/ )
106 /// Refinable concurrent access policy
108 This is one of available opt::mutex_policy option type for StripedSet
110 Refining is like a striping technique (see striped_set::striping)
111 but it allows growing the size of lock array when resizing the hash table.
112 So, the sizes of hash table and lock array are equal.
115 - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required.
116 The default is \p cds_std::recursive_mutex. The mutex type should be default-constructible.
117 - \p BackOff - back-off strategy. Default is cds::backoff::yield
118 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
121 class RecursiveLock = cds_std::recursive_mutex,
122 typename BackOff = cds::backoff::yield,
123 class Alloc = CDS_DEFAULT_ALLOCATOR>
127 typedef RecursiveLock lock_type ; ///< lock type
128 typedef BackOff back_off ; ///< back-off strategy used
129 typedef Alloc allocator_type; ///< allocator type
133 typedef cds::lock::trivial_select_policy lock_selection_policy;
135 class lock_array_type
136 : public cds::lock::array< lock_type, lock_selection_policy, allocator_type >
137 , public std::enable_shared_from_this< lock_array_type >
139 typedef cds::lock::array< lock_type, lock_selection_policy, allocator_type > lock_array_base;
141 lock_array_type( size_t nCapacity )
142 : lock_array_base( nCapacity )
145 typedef std::shared_ptr< lock_array_type > lock_array_ptr;
146 typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator;
148 typedef unsigned long long owner_t;
149 typedef cds::OS::ThreadId threadId_t;
151 typedef cds::lock::Spin spinlock_type;
152 typedef cds::lock::scoped_lock< spinlock_type > scoped_spinlock;
157 static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
159 lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor.
160 CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
161 CDS_ATOMIC::atomic<size_t> m_nCapacity ; ///< Lock array capacity
162 spinlock_type m_access ; ///< access to m_arrLocks
167 struct lock_array_disposer {
168 void operator()( lock_array_type * pArr )
170 lock_array_allocator().Delete( pArr );
174 lock_array_ptr create_lock_array( size_t nCapacity )
176 m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_relaxed );
177 return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() );
180 lock_type& acquire( size_t nHash )
182 owner_t me = (owner_t) cds::OS::getCurrentThreadId();
187 // wait while resizing
189 who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
190 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
195 lock_array_ptr pLocks;
197 scoped_spinlock sl(m_access);
201 lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1));
204 who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
205 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
211 lock_array_ptr acquire_all()
213 owner_t me = (owner_t) cds::OS::getCurrentThreadId();
218 // wait while resizing
220 who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
221 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
226 lock_array_ptr pLocks;
228 scoped_spinlock sl(m_access);
234 who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
235 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
238 pLocks->unlock_all();
242 void release_all( lock_array_ptr p )
247 bool acquire_resize()
249 owner_t me = (owner_t) cds::OS::getCurrentThreadId();
252 for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
254 if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
255 lock_array_ptr pOldLocks = m_arrLocks;
256 size_t const nLockCount = pOldLocks->size();
257 for ( size_t i = 0; i < nLockCount; ++i ) {
258 typename lock_array_type::lock_type& lock = pOldLocks->at(i);
260 while ( !lock.try_lock() )
272 void release_resize()
274 m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
279 class scoped_cell_lock {
280 cds::lock::scoped_lock< lock_type > m_guard;
283 scoped_cell_lock( refinable& policy, size_t nHash )
284 : m_guard( policy.acquire( nHash ), true )
288 class scoped_full_lock {
290 lock_array_ptr m_Locks;
292 scoped_full_lock( refinable& policy )
295 m_Locks = policy.acquire_all();
299 m_Policy.release_all( m_Locks );
303 class scoped_resize_lock {
308 scoped_resize_lock( refinable& policy )
311 m_bSucceess = policy.acquire_resize();
314 ~scoped_resize_lock()
317 m_Policy.release_resize();
330 size_t nLockCount ///< Initial size of lock array. Must be power of two.
333 , m_nCapacity( nLockCount )
335 assert( cds::beans::is_power2( nLockCount ));
336 m_arrLocks = create_lock_array( nLockCount );
339 /// Returns lock array size
341 Lock array size is not a constant for \p refinable policy and can be changed when the set is resized.
343 size_t lock_count() const
345 return m_nCapacity.load( CDS_ATOMIC::memory_order_relaxed );
348 /// Resize for new capacity
349 void resize( size_t nNewCapacity )
351 // Expect the access is locked by scoped_resize_lock!!!
352 lock_array_ptr pNewArr = create_lock_array( nNewCapacity );
353 scoped_spinlock sl(m_access);
354 m_arrLocks.swap( pNewArr );
358 }}} // namespace cds::intrusive::striped_set