/// Allocator type
typedef CDS_DEFAULT_ALLOCATOR allocator;
};
+
/// Free-list based on bounded lock-free queue cds::intrusive::VyukovMPMCCycleQueue
/** @ingroup cds_memory_pool
Template parameters:
- \p T - the type of object maintaining by free-list
- - \p Traits - traits for cds::intrusive::VyukovMPMCCycleQueue class plus
- cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
+ - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus
+ \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits
\b Internals
/**
\p nCapacity argument is the queue capacity. It should be passed
if the queue is based on dynamically-allocated buffer.
- See cds::intrusive::VyukovMPMCCycleQueue for explanation.
+ See \p cds::intrusive::VyukovMPMCCycleQueue for explanation.
*/
vyukov_queue_pool( size_t nCapacity = 0 )
: m_Queue( nCapacity )
/**
\p nCapacity argument is the queue capacity. It should be passed
if the queue is based on dynamically-allocated buffer.
- See cds::intrusive::VyukovMPMCCycleQueue for explanation.
+ See \p cds::intrusive::VyukovMPMCCycleQueue for explanation.
*/
bounded_vyukov_queue_pool( size_t nCapacity = 0 )
: m_Queue( nCapacity )
/// @ref cds_sync_monitor "Monitor" that injects the lock into each node
/**
- This monitor injects the lock object of type \p Lock into each node.
+ This simple monitor injects the lock object of type \p Lock into each node.
The monitor is designed for user-space locking primitives like \ref sync::spin_lock "spin-lock".
Template arguments:
typedef Lock lock_type; ///< Lock type
/// Monitor injection into \p Node
- struct node_injection
+ template <typename Node>
+ struct node_injection: public Node
{
# ifdef CDS_CXX11_INHERITING_CTOR
using Node::Node;
{}
# endif
mutable lock_type m_Lock; ///< Node-level lock
-
- /// Makes exclusive access to the node
- void lock() const
- {
- m_Lock.lock;
- }
-
- /// Unlocks the node
- void unlock() const
- {
- m_Lock.unlock();
- }
};
/// Makes exclusive access to node \p p
- /**
- \p p must have method \p lock()
- */
template <typename Node>
void lock( Node const& p ) const
{
- p.lock();
+ p.m_Lock.lock();
}
/// Unlocks the node \p p
- /**
- \p p must have method \p unlock()
- */
template <typename Node>
void unlock( Node const& p ) const
{
- p.unlock();
+ p.m_Lock.unlock();
}
/// Scoped lock
template <typename Node>
- class scoped_lock
- {
- Node const& m_Locked; ///< Our locked node
-
- public:
- /// Makes exclusive access to node \p p
- scoped_lock( injecting_monitor const&, Node const& p )
- : m_Locked( p )
- {
- p.lock();
- }
-
- /// Unlocks the node
- ~scoped_lock()
- {
- p.unlock();
- }
- };
+ using scoped_lock = monitor_scoped_lock< injecting_monitor, Node > ;
};
}} // namespace cds::sync
namespace cds { namespace sync {
- /// Trivial lock \ref array selection policy
+ /// Trivial lock \ref lock_array selection policy
struct trivial_select_policy
{
/// Returns \p nWhat
}
};
- /// The lock \ref array cell selection policy "division by modulo"
+ /// The lock \ref lock_array cell selection policy "division by modulo"
struct mod_select_policy
{
/// Returns <tt> nWhat % nCapacity </tt>
}
};
- /// The lock \ref array cell selection policy "division by modulo of power of 2"
+ /// The lock \ref lock_array cell selection policy "division by modulo of power of 2"
/**
This policy may be used if the size of lock array is equal to power of two.
*/
- \p sync::injecting_monitor injects the lock object into each node.
That mock monitor is designed for user-space locking primitive like
\ref sync::spin_lock "spin-lock".
+ - \p sync::pool_monitor is the monitor that allocates the lock object
+ for the node from the pool when needed. When the node is unlocked
+ the lock assigned to it is given back to the pool if no thread
+ references to that node.
<b>How to use</b>
// Scoped lock applyes RAII to Monitor
template <typename Node>
- class scoped_lock
- {
- public:
- // Locks node by monitor mon
- scoped_lock( Monitor& mon, Node& node );
-
- // Unlocks the node locked by ctor
- ~scoped_lock();
- };
+ using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
};
\endcode
The monitor should be a member of your container:
\endcode
*/
+ /// Monitor scoped lock (RAII)
+ /**
+ Template arguments:
+ - \p Monitor - monitor type
+ - \p Node - node type
+ */
+ template <typename Monitor, typename Node>
+ struct monitor_scoped_lock
+ {
+ public:
+ typedef Monitor monitor_type; ///< Monitor type
+ typedef Node node_type; ///< Node type
+
+ private:
+ //@cond
+ monitor_type& m_Monitor; ///< Monitor
+ node_type const& m_Node; ///< Our locked node
+ //@endcond
+
+ public:
+ /// Makes exclusive access to the node \p p by \p monitor
+ scoped_lock( monitor_type& monitor, node_type const& p )
+ : m_Monitor( monitor )
+ , m_Node( p )
+ {
+ monitor.lock( p );
+ }
+
+ /// Unlocks the node
+ ~scoped_lock()
+ {
+ m_Monitor.unlock( m_Node );
+ }
+ };
+
}} // namespace cds::sync
#endif // #ifndef CDSLIB_SYNC_MONITOR_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef CDSLIB_SYNC_POOL_MONITOR_H
+#define CDSLIB_SYNC_POOL_MONITOR_H
+
+#include <cds/sync/monitor.h>
+#include <cds/algo/atomic.h>
+#include <cds/algo/backoff_strategy.h>
+
+namespace cds { namespace sync {
+
+ /// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
+ /**
+ The monitor is intended for reducing the number of system mutexes for
+ huge containers like a tree. The monitor allocates the mutex from the pool \p LockPool
+ only when container's node should be locked. Lifetime of node's mutex is managed by
+ reference counter. When the reference counter to node's mutex becomes zero,
+ the mutex is given back to the pool.
+
+ The monitor is blocked: the access to node's mutex is performed under the spin-lock.
+ However, node locking/unlocking is performed beyond the spin-lock.
+
+ Template arguments:
+ - \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
+ the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
+ - \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
+
+ <b>How to use</b>
+ \code
+ typedef cds::memory::vyukov_queue_pool< std::mutex > pool_type;
+ typedef cds::sync::pool_monitor< pool_type > sync_monitor;
+ \endcode
+ */
+ template <class LockPool, typename BackOff = cds::backoff::LockDefault >
+ class pool_monitor
+ {
+ public:
+ typedef LockPool pool_type; ///< Pool type
+ typedef typename pool_type::value_type lock_type; ///< node lock type
+ typedef BackOff back_off; ///< back-off strategy for spinning
+
+ private:
+ //@cond
+ pool_type m_Pool;
+ //@endcond
+
+ public:
+ /// Monitor injection into \p Node
+ template <typename Node>
+ class node_injection : public Node
+ {
+ //@cond
+ typedef unsigned int refspin_type;
+ constexpr refspin_type const c_nSpinBit = 1;
+ constexpr refspin_type const c_nRefIncrement = 2;
+
+ struct injection
+ {
+ atomics::atomic<refspin_type> m_RefSpin; ///< Spin-lock for \p m_pLock (bit 0) + reference counter
+ lock_type * m_pLock; ///< Node-level lock
+
+ injection()
+ : m_Access( 0 )
+ , m_pLock( nullptr )
+ {}
+ };
+ //@endcond
+
+ public:
+ mutable injection m_Access; ///< injected data
+
+# ifdef CDS_CXX11_INHERITING_CTOR
+ using Node::Node;
+# else
+ // Inheriting ctor emulation
+ template <typename... Args>
+ node_injection( Args&&... args )
+ : Node( std::forward<Args>( args )... )
+ {}
+# endif
+ };
+
+ /// Initializes the pool of 256 preallocated mutexes
+ pool_monitor()
+ : m_Pool( 256 )
+ {}
+
+ /// Initializes the pool of \p nPoolCapacity preallocated mutexes
+ pool_monitor( size_t nPoolCapacity )
+ : m_Pool( nPoolCapacity)
+ {}
+
+ /// Makes exclusive access to node \p p
+ template <typename Node>
+ void lock( Node const& p ) const
+ {
+ lock_type * pLock;
+
+ // try lock spin and increment reference counter
+ refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
+ if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
+ atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ {
+ back_off bkoff;
+ do {
+ bkoff();
+ cur &= ~c_nSpinBit;
+ } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
+ atomics::memory_order_acquire, atomics::memory_order_relaxed );
+ }
+
+ // spin locked
+ // If the node has no lock, allocate it from pool
+ pLock = p.m_Access.m_pLock;
+ if ( !pLock )
+ pLock = p.m_Access.m_pLock = m_Pool.allocate( 1 );
+
+ // unlock spin
+ p.m_Access.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
+
+ // lock the node
+ pLock->lock();
+ }
+
+ /// Unlocks the node \p p
+ template <typename Node>
+ void unlock( Node const& p ) const
+ {
+ lock_type * pLock = nullptr;
+
+ assert( p.m_Access.m_pLock != nullptr );
+ p.m_Access.m_pLock->unlock();
+
+ // try lock spin
+ refspin_type cur = p.m_Access.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
+ if ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
+ atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ {
+ back_off bkoff;
+ do {
+ bkoff();
+ cur &= ~c_nSpinBit;
+ } while ( !p.m_Access.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
+ atomics::memory_order_acquire, atomics::memory_order_relaxed );
+ }
+
+ // spin locked now
+
+ // If we are the unique owner - deallocate lock
+ if ( cur == c_nRefIncrement ) {
+ pLock = p.m_Access.m_pLock;
+ p.m_Access.m_pLock = nullptr;
+ }
+
+ // unlock spin
+ p.m_Access.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
+
+ // free pLock
+ if ( pLock )
+ m_Pool.deallocate( pLock, 1 );
+ }
+
+ /// Scoped lock
+ template <typename Node>
+ using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
+ };
+
+}} // namespace cds::sync
+
+#endif // #ifndef CDSLIB_SYNC_POOL_MONITOR_H
+
<ClInclude Include="..\..\..\cds\sync\injecting_monitor.h" />\r
<ClInclude Include="..\..\..\cds\sync\lock_array.h" />\r
<ClInclude Include="..\..\..\cds\sync\monitor.h" />\r
+ <ClInclude Include="..\..\..\cds\sync\pool_monitor.h" />\r
<ClInclude Include="..\..\..\cds\sync\spinlock.h" />\r
<ClInclude Include="..\..\..\cds\threading\details\cxx11.h" />\r
<ClInclude Include="..\..\..\cds\threading\details\cxx11_manager.h" />\r
<ClInclude Include="..\..\..\cds\sync\injecting_monitor.h">\r
<Filter>Header Files\cds\sync</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\cds\sync\pool_monitor.h">\r
+ <Filter>Header Files\cds\sync</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file