2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
32 #define CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H
36 #include <cds/sync/lock_array.h>
37 #include <cds/os/thread.h>
38 #include <cds/sync/spinlock.h>
40 namespace cds { namespace intrusive { namespace striped_set {
42 /// Lock striping concurrent access policy
44 This is one of available opt::mutex_policy option type for StripedSet
46 Lock striping is very simple technique.
47 The set consists of the bucket table and the array of locks.
48 Initially, the capacity of lock array and bucket table is the same.
49 When set is resized, bucket table capacity will be doubled but lock array will not.
50 The lock \p i protects each bucket \p j, where <tt> j = i mod L </tt>,
51 where \p L - the size of lock array.
53 The policy contains an internal array of \p Lock locks.
56 - \p Lock - the type of mutex. The default is \p std::mutex. The mutex type should be default-constructible.
57 Note that a spin-lock is not so good suitable for lock striping for performance reason.
58 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
60 template <class Lock = std::mutex, class Alloc = CDS_DEFAULT_ALLOCATOR >
64 typedef Lock lock_type ; ///< lock type
65 typedef Alloc allocator_type ; ///< allocator type
67 typedef cds::sync::lock_array< lock_type, cds::sync::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type
71 lock_array_type m_Locks;
76 class scoped_cell_lock {
77 std::unique_lock< lock_array_type > m_guard;
80 scoped_cell_lock( striping& policy, size_t nHash )
81 : m_guard( policy.m_Locks, nHash )
85 class scoped_full_lock {
86 std::unique_lock< lock_array_type > m_guard;
88 scoped_full_lock( striping& policy )
89 : m_guard( policy.m_Locks )
93 class scoped_resize_lock: public scoped_full_lock {
95 scoped_resize_lock( striping& policy )
96 : scoped_full_lock( policy )
109 size_t nLockCount ///< The size of lock array. Must be power of two.
111 : m_Locks( nLockCount, cds::sync::pow2_select_policy( nLockCount ))
114 /// Returns lock array size
116 Lock array size is unchanged during \p striped object lifetime
118 size_t lock_count() const
120 return m_Locks.size();
124 void resize( size_t /*nNewCapacity*/ )
130 /// Refinable concurrent access policy
132 This is one of available opt::mutex_policy option type for StripedSet
134 Refining is like a striping technique (see striped_set::striping)
135 but it allows growing the size of lock array when resizing the hash table.
136 So, the sizes of hash table and lock array are equal.
139 - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required.
140 The default is \p std::recursive_mutex. The mutex type should be default-constructible.
141 - \p BackOff - back-off strategy. Default is cds::backoff::yield
142 - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR.
145 class RecursiveLock = std::recursive_mutex,
146 typename BackOff = cds::backoff::yield,
147 class Alloc = CDS_DEFAULT_ALLOCATOR>
151 typedef RecursiveLock lock_type ; ///< lock type
152 typedef BackOff back_off ; ///< back-off strategy used
153 typedef Alloc allocator_type; ///< allocator type
157 typedef cds::sync::trivial_select_policy lock_selection_policy;
159 class lock_array_type
160 : public cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type >
161 , public std::enable_shared_from_this< lock_array_type >
163 typedef cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > lock_array_base;
165 lock_array_type( size_t nCapacity )
166 : lock_array_base( nCapacity )
169 typedef std::shared_ptr< lock_array_type > lock_array_ptr;
170 typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator;
172 typedef unsigned long long owner_t;
173 typedef cds::OS::ThreadId threadId_t;
175 typedef cds::sync::spin spinlock_type;
176 typedef std::unique_lock< spinlock_type > scoped_spinlock;
181 static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
183 lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor.
184 atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
185 atomics::atomic<size_t> m_nCapacity ; ///< Lock array capacity
186 spinlock_type m_access ; ///< access to m_arrLocks
191 struct lock_array_disposer {
192 void operator()( lock_array_type * pArr )
194 lock_array_allocator().Delete( pArr );
198 lock_array_ptr create_lock_array( size_t nCapacity )
200 m_nCapacity.store( nCapacity, atomics::memory_order_relaxed );
201 return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() );
204 lock_type& acquire( size_t nHash )
206 owner_t me = (owner_t) cds::OS::get_current_thread_id();
211 // wait while resizing
213 who = m_Owner.load( atomics::memory_order_acquire );
214 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
219 lock_array_ptr pLocks;
221 scoped_spinlock sl(m_access);
225 lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1));
228 who = m_Owner.load( atomics::memory_order_acquire );
229 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
235 lock_array_ptr acquire_all()
237 owner_t me = (owner_t) cds::OS::get_current_thread_id();
242 // wait while resizing
244 who = m_Owner.load( atomics::memory_order_acquire );
245 if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
250 lock_array_ptr pLocks;
252 scoped_spinlock sl(m_access);
258 who = m_Owner.load( atomics::memory_order_acquire );
259 if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
262 pLocks->unlock_all();
266 void release_all( lock_array_ptr p )
271 bool acquire_resize()
273 owner_t me = (owner_t) cds::OS::get_current_thread_id();
276 for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
278 if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
279 lock_array_ptr pOldLocks = m_arrLocks;
280 size_t const nLockCount = pOldLocks->size();
281 for ( size_t i = 0; i < nLockCount; ++i ) {
282 typename lock_array_type::lock_type& lock = pOldLocks->at(i);
284 while ( !lock.try_lock() )
296 void release_resize()
298 m_Owner.store( 0, atomics::memory_order_release );
303 class scoped_cell_lock {
304 std::unique_lock< lock_type > m_guard;
307 scoped_cell_lock( refinable& policy, size_t nHash )
308 : m_guard( policy.acquire( nHash ), std::adopt_lock_t() )
312 class scoped_full_lock {
314 lock_array_ptr m_Locks;
316 scoped_full_lock( refinable& policy )
319 m_Locks = policy.acquire_all();
323 m_Policy.release_all( m_Locks );
327 class scoped_resize_lock {
332 scoped_resize_lock( refinable& policy )
335 m_bSucceess = policy.acquire_resize();
338 ~scoped_resize_lock()
341 m_Policy.release_resize();
354 size_t nLockCount ///< Initial size of lock array. Must be power of two.
357 , m_nCapacity( nLockCount )
359 assert( cds::beans::is_power2( nLockCount ));
360 m_arrLocks = create_lock_array( nLockCount );
363 /// Returns lock array size
365 Lock array size is not a constant for \p refinable policy and can be changed when the set is resized.
367 size_t lock_count() const
369 return m_nCapacity.load( atomics::memory_order_relaxed );
372 /// Resize for new capacity
373 void resize( size_t nNewCapacity )
375 // Expect the access is locked by scoped_resize_lock!!!
376 lock_array_ptr pNewArr = create_lock_array( nNewCapacity );
377 scoped_spinlock sl(m_access);
378 m_arrLocks.swap( pNewArr );
382 }}} // namespace cds::intrusive::striped_set