/projects/Win/vc14/.vs/cds/v14
/build/build-mingw-amd64.bat
/build/build-mingw-amd64.log
+/todo-2.1.txt
return base_class::statistics();
}
+ /// Returns reference to \p sync_monitor object
+ sync_monitor& monitor()
+ {
+ return base_class::monitor();
+ }
+ //@cond
+ sync_monitor const& monitor() const
+ {
+ return base_class::monitor();
+ }
+ //@endcond
+
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
return m_stat;
}
+ /// Returns reference to \p sync_monitor object
+ sync_monitor& monitor()
+ {
+ return m_Monitor;
+ }
+ //@cond
+ sync_monitor const& monitor() const
+ {
+ return m_Monitor;
+ }
+ //@endcond
+
/// Checks internal consistency (not atomic, not thread-safe)
/**
The debugging function to check internal consistency of the tree.
namespace cds { namespace sync {
+ //@cond
+ struct injecting_monitor_traits {
+ struct empty_stat
+ {};
+ };
+ //@endcond
+
/// @ref cds_sync_monitor "Monitor" that injects the lock into each node
/**
This simple monitor injects the lock object of type \p Lock into each node.
p.m_SyncMonitorInjection.m_Lock.unlock();
}
+ //@cond
+ injecting_monitor_traits::empty_stat statistics() const
+ {
+ return injecting_monitor_traits::empty_stat();
+ }
+ //@endcond
+
/// Scoped lock
template <typename Node>
using scoped_lock = monitor_scoped_lock< injecting_monitor, Node > ;
- \p sync::injecting_monitor injects the lock object into each node.
That mock monitor is designed for user-space locking primitive like
\ref sync::spin_lock "spin-lock".
- - \p sync::pool_monitor is the monitor that allocates the lock object
- for the node from the pool when needed. When the node is unlocked
+ - \p sync::pool_monitor is the monitor that allocates a lock object
+ for a node from the pool when needed. When the node is unlocked
the lock assigned to it is given back to the pool if no thread
references to that node.
#include <cds/sync/monitor.h>
#include <cds/algo/atomic.h>
#include <cds/algo/backoff_strategy.h>
+#include <cds/opt/options.h> // opt::none
namespace cds { namespace sync {
+ /// \p pool_monitor traits
+ struct pool_monitor_traits {
+
+ /// Dummy internal statistics if \p Stat template parameter is \p false
+ struct empty_stat
+ {
+ //@cond
+ void onLock() const {}
+ void onUnlock() const {}
+ void onLockContention() const {}
+ void onUnlockContention() const {}
+ void onLockAllocation() const {}
+ void onLockDeallocation() const {}
+ //@endcond
+ };
+
+ /// Monitor's internal statistics, used if \p Stat template parameter is \p true
+ template <typename Counter = cds::atomicity::event_counter >
+ struct stat
+ {
+ typedef Counter event_counter; ///< measure type
+
+ event_counter m_nLockCount; ///< Number of monitor \p lock() call
+ event_counter m_nUnlockCount; ///< Number of monitor \p unlock call
+ event_counter m_nLockContention; ///< Number of \p lock() contenton
+ event_counter m_nUnlockContention; ///< Number of \p unlock() contention
+ event_counter m_nLockAllocation; ///< Number of the lock allocation from the pool
+ event_counter m_nLockDeallocation; ///< Number of the lock deallocation
+
+ //@cond
+ void onLock() { ++m_nLockCount; }
+ void onUnlock() { ++m_nUnlockCount; }
+ void onLockContention() { ++m_nLockContention; }
+ void onUnlockContention() { ++m_nUnlockContention;}
+ void onLockAllocation() { ++m_nLockAllocation; }
+ void onLockDeallocation() { ++m_nLockDeallocation;}
+ //@endcond
+ };
+ };
+
+
/// @ref cds_sync_monitor "Monitor" that allocates node's lock when needed
/**
The monitor is intended for reducing the number of system mutexes for
- \p LockPool - the @ref cds_memory_pool "pool type". The pool must maintain
the objects of type \p std::mutex or similar. The access to the pool is not synchronized.
- \p BackOff - back-off strategy for spinning, default is \p cds::backoff::LockDefault
+ - \p Stat - enable (\p true) or disable (\p false, the default) monitor's internal statistics.
<b>How to use</b>
\code
typedef cds::sync::pool_monitor< pool_type > sync_monitor;
\endcode
*/
- template <class LockPool, typename BackOff = cds::backoff::LockDefault >
+ template <class LockPool, typename BackOff = cds::backoff::LockDefault, bool Stat = false >
class pool_monitor
{
public:
typedef LockPool pool_type; ///< Pool type
typedef typename pool_type::value_type lock_type; ///< node lock type
- typedef BackOff back_off; ///< back-off strategy for spinning
+ typedef typename std::conditional<
+ std::is_same< BackOff, cds::opt::none >::value,
+ cds::backoff::LockDefault,
+ BackOff
+ >::type back_off; ///< back-off strategy for spinning
typedef uint32_t refspin_type; ///< Reference counter + spin-lock bit
+ /// Internal statistics
+ typedef typename std::conditional<
+ Stat,
+ typename pool_monitor_traits::stat<>,
+ typename pool_monitor_traits::empty_stat
+ >::type internal_stat;
+
private:
//@cond
static CDS_CONSTEXPR refspin_type const c_nSpinBit = 1;
static CDS_CONSTEXPR refspin_type const c_nRefIncrement = 2;
- mutable pool_type m_Pool;
+ mutable pool_type m_Pool;
+ mutable internal_stat m_Stat;
//@endcond
public:
{
lock_type * pLock;
+ m_Stat.onLock();
+
// try lock spin and increment reference counter
refspin_type cur = p.m_SyncMonitorInjection.m_RefSpin.load( atomics::memory_order_relaxed ) & ~c_nSpinBit;
if ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
{
back_off bkoff;
do {
+ m_Stat.onLockContention();
bkoff();
cur &= ~c_nSpinBit;
} while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nRefIncrement + c_nSpinBit,
// spin locked
// If the node has no lock, allocate it from pool
pLock = p.m_SyncMonitorInjection.m_pLock;
- if ( !pLock )
+ if ( !pLock ) {
pLock = p.m_SyncMonitorInjection.m_pLock = m_Pool.allocate( 1 );
+ m_Stat.onLockAllocation();
+ }
// unlock spin
p.m_SyncMonitorInjection.m_RefSpin.store( cur + c_nRefIncrement, atomics::memory_order_release );
{
lock_type * pLock = nullptr;
+ m_Stat.onUnlock();
+
assert( p.m_SyncMonitorInjection.m_pLock != nullptr );
p.m_SyncMonitorInjection.m_pLock->unlock();
{
back_off bkoff;
do {
+ m_Stat.onUnlockContention();
bkoff();
cur &= ~c_nSpinBit;
} while ( !p.m_SyncMonitorInjection.m_RefSpin.compare_exchange_weak( cur, cur + c_nSpinBit,
p.m_SyncMonitorInjection.m_RefSpin.store( cur - c_nRefIncrement, atomics::memory_order_release );
// free pLock
- if ( pLock )
+ if ( pLock ) {
m_Pool.deallocate( pLock, 1 );
+ m_Stat.onLockDeallocation();
+ }
}
/// Scoped lock
template <typename Node>
using scoped_lock = monitor_scoped_lock< pool_monitor, Node >;
+
+ /// Returns the reference to internal statistics
+ /**
+ If class' template argument \p Stat is \p false,
+ the function returns \ref empty_stat "dummy statistics".
+ Otherwise, it returns the reference to monitor's internal statistics
+ of type \ref stat.
+ */
+ internal_stat const& statistics() const
+ {
+ return m_Stat;
+ }
};
}} // namespace cds::sync
..\..\..\tests\unit\print_segmentedqueue_stat.h = ..\..\..\tests\unit\print_segmentedqueue_stat.h\r
..\..\..\tests\unit\print_skip_list_stat.h = ..\..\..\tests\unit\print_skip_list_stat.h\r
..\..\..\tests\unit\print_split_list_stat.h = ..\..\..\tests\unit\print_split_list_stat.h\r
+ ..\..\..\tests\unit\print_sync_monitor_stat.h = ..\..\..\tests\unit\print_sync_monitor_stat.h\r
EndProjectSection\r
EndProject\r
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "unit-prerequisites", "unit-prerequisites.vcxproj", "{61179F2F-07E1-490D-B64D-D85A90B6EF81}"\r
protected:
static const size_t c_nItemCount = 10000;
- /*
- class data_array
- {
- int * pFirst;
- int * pLast;
-
- public:
- data_array()
- : pFirst( new int[c_nItemCount] )
- , pLast( pFirst + c_nItemCount )
- {
- int i = 0;
- for ( int * p = pFirst; p != pLast; ++p, ++i )
- *p = i;
-
- std::random_shuffle( pFirst, pLast );
- }
-
- ~data_array()
- {
- delete[] pFirst;
- }
-
- int operator[]( size_t i ) const
- {
- assert( i < size_t( pLast - pFirst ) );
- return pFirst[i];
- }
- };
- */
-
struct find_functor
{
void operator()( key_type, value_type& v ) const
CPPUNIT_ASSERT( s.empty() );
CPPUNIT_ASSERT( check_size( s, 0 ) );
-
-
PrintStat()(s);
}
#include <cds/memory/vyukov_queue_pool.h>
#include "unit/print_bronsonavltree_stat.h"
+#include "unit/print_sync_monitor_stat.h"
namespace tree {
namespace cc = cds::container;
struct print_stat {
template <typename Tree>
- void operator()( Tree const& t )
+ void operator()( Tree const& t ) const
{
std::cout << t.statistics();
+ std::cout << t.monitor().statistics();
}
};
cc::bronson_avltree::make_traits<
co::less< std::less<key_type> >
,co::stat< cc::bronson_avltree::stat<> >
- ,co::sync_monitor< cds::sync::pool_monitor<lazy_pool> >
+ ,co::sync_monitor< cds::sync::pool_monitor<lazy_pool, cds::opt::none, true >>
,cc::bronson_avltree::relaxed_insert< false >
>::type
{};
co::compare< compare >
,co::item_counter< cds::atomicity::item_counter >
,co::stat< cc::bronson_avltree::stat<> >
- ,co::sync_monitor< cds::sync::pool_monitor<bounded_pool> >
+ ,co::sync_monitor< cds::sync::pool_monitor<bounded_pool, cds::opt::none, true >>
>::type
{};
typedef cc::BronsonAVLTreeMap< rcu_type, key_type, value_type, traits > map_type;
,co::item_counter< cds::atomicity::item_counter >
,co::stat< cc::bronson_avltree::stat<> >
,co::back_off< cds::backoff::yield >
- ,co::sync_monitor< cds::sync::pool_monitor<lazy_pool> >
+ ,co::sync_monitor< cds::sync::pool_monitor<lazy_pool, cds::opt::none, true >>
>::type
{};
typedef cc::BronsonAVLTreeMap< rcu_type, key_type, value_type, traits > map_type;
co::less< std::less<key_type> >
,co::stat< cc::bronson_avltree::stat<> >
,cc::bronson_avltree::relaxed_insert< true >
- ,co::sync_monitor< cds::sync::pool_monitor<simple_pool> >
+ ,co::sync_monitor< cds::sync::pool_monitor<simple_pool, cds::opt::none, true >>
>::type
{};
typedef cc::BronsonAVLTreeMap< rcu_type, key_type, value_type, traits > map_type;
#include <cds/memory/vyukov_queue_pool.h>
#include "unit/print_bronsonavltree_stat.h"
+#include "unit/print_sync_monitor_stat.h"
namespace tree {
namespace cc = cds::container;
void operator()( Tree const& t )
{
std::cout << t.statistics();
+ std::cout << t.monitor().statistics();
}
};
#include <cds/memory/vyukov_queue_pool.h>
#include "unit/print_bronsonavltree_stat.h"
+#include "unit/print_sync_monitor_stat.h"
+
namespace tree {
namespace cc = cds::container;
void operator()( Tree const& t )
{
std::cout << t.statistics();
+ std::cout << t.monitor().statistics();
}
};
#include <cds/memory/vyukov_queue_pool.h>
#include "unit/print_bronsonavltree_stat.h"
+#include "unit/print_sync_monitor_stat.h"
namespace tree {
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
void operator()( Tree const& t )
{
std::cout << t.statistics();
+ std::cout << t.monitor().statistics();
}
};
#include <cds/memory/vyukov_queue_pool.h>
#include "unit/print_bronsonavltree_stat.h"
+#include "unit/print_sync_monitor_stat.h"
namespace tree {
#ifdef CDS_URCU_SIGNAL_HANDLING_ENABLED
void operator()( Tree const& t )
{
std::cout << t.statistics();
+ std::cout << t.monitor().statistics();
}
};
#include "print_skip_list_stat.h"
#include "print_ellenbintree_stat.h"
#include "print_bronsonavltree_stat.h"
+#include "print_sync_monitor_stat.h"
#include "ellen_bintree_update_desc_pool.h"
namespace map2 {
typedef cc::BronsonAVLTreeMap< rcu_shb, Key, Value, BronsonAVLTreeMap_less_pool_simple > BronsonAVLTreeMap_rcu_shb_less_pool_simple;
typedef cc::BronsonAVLTreeMap< rcu_sht, Key, Value, BronsonAVLTreeMap_less_pool_simple > BronsonAVLTreeMap_rcu_sht_less_pool_simple;
#endif
- struct BronsonAVLTreeMap_less_pool_simple_stat : public BronsonAVLTreeMap_less_pool_simple
+ struct BronsonAVLTreeMap_less_pool_simple_stat : public BronsonAVLTreeMap_less
{
typedef cc::bronson_avltree::stat<> stat;
+ typedef cds::sync::pool_monitor<BronsonAVLTreeMap_simple_pool, cds::opt::none, true > sync_monitor;
};
typedef cc::BronsonAVLTreeMap< rcu_gpi, Key, Value, BronsonAVLTreeMap_less_pool_simple_stat > BronsonAVLTreeMap_rcu_gpi_less_pool_simple_stat;
typedef cc::BronsonAVLTreeMap< rcu_gpb, Key, Value, BronsonAVLTreeMap_less_pool_simple_stat > BronsonAVLTreeMap_rcu_gpb_less_pool_simple_stat;
typedef cc::BronsonAVLTreeMap< rcu_shb, Key, Value, BronsonAVLTreeMap_less_pool_lazy > BronsonAVLTreeMap_rcu_shb_less_pool_lazy;
typedef cc::BronsonAVLTreeMap< rcu_sht, Key, Value, BronsonAVLTreeMap_less_pool_lazy > BronsonAVLTreeMap_rcu_sht_less_pool_lazy;
#endif
- struct BronsonAVLTreeMap_less_pool_lazy_stat : public BronsonAVLTreeMap_less_pool_lazy
+ struct BronsonAVLTreeMap_less_pool_lazy_stat : public BronsonAVLTreeMap_less
{
typedef cc::bronson_avltree::stat<> stat;
+ typedef cds::sync::pool_monitor<BronsonAVLTreeMap_lazy_pool, cds::opt::none, true > sync_monitor;
+ static CDS_CONSTEXPR bool const relaxed_insert = true;
};
typedef cc::BronsonAVLTreeMap< rcu_gpi, Key, Value, BronsonAVLTreeMap_less_pool_lazy_stat > BronsonAVLTreeMap_rcu_gpi_less_pool_lazy_stat;
typedef cc::BronsonAVLTreeMap< rcu_gpb, Key, Value, BronsonAVLTreeMap_less_pool_lazy_stat > BronsonAVLTreeMap_rcu_gpb_less_pool_lazy_stat;
typedef cc::BronsonAVLTreeMap< rcu_shb, Key, Value, BronsonAVLTreeMap_less_pool_bounded > BronsonAVLTreeMap_rcu_shb_less_pool_bounded;
typedef cc::BronsonAVLTreeMap< rcu_sht, Key, Value, BronsonAVLTreeMap_less_pool_bounded > BronsonAVLTreeMap_rcu_sht_less_pool_bounded;
#endif
- struct BronsonAVLTreeMap_less_pool_bounded_stat : public BronsonAVLTreeMap_less_pool_bounded
+ struct BronsonAVLTreeMap_less_pool_bounded_stat : public BronsonAVLTreeMap_less
{
typedef cc::bronson_avltree::stat<> stat;
+ typedef cds::sync::pool_monitor<BronsonAVLTreeMap_bounded_pool, cds::opt::none, true > sync_monitor;
+ static CDS_CONSTEXPR bool const relaxed_insert = true;
};
typedef cc::BronsonAVLTreeMap< rcu_gpi, Key, Value, BronsonAVLTreeMap_less_pool_bounded_stat > BronsonAVLTreeMap_rcu_gpi_less_pool_bounded_stat;
typedef cc::BronsonAVLTreeMap< rcu_gpb, Key, Value, BronsonAVLTreeMap_less_pool_bounded_stat > BronsonAVLTreeMap_rcu_gpb_less_pool_bounded_stat;
static inline void print_stat( cc::BronsonAVLTreeMap<GC, Key, T, Traits> const& m )
{
CPPUNIT_MSG( m.statistics() );
+ CPPUNIT_MSG( m.monitor().statistics() );
}
template <typename GC, typename Key, typename T, typename Traits>
--- /dev/null
+//$$CDS-header$$
+
+#if defined(CDSLIB_SYNC_INJECTING_MONITOR_H) && !defined(CDSUNIT_PRINT_INJECTING_MONITOR_STAT_H)
+#define CDSUNIT_PRINT_INJECTING_MONITOR_STAT_H
+
+namespace std {
+ static inline ostream& operator <<( ostream& o, cds::sync::injecting_monitor_traits::empty_stat const& /*s*/ )
+ {
+ return o;
+ }
+}
+#endif
+
+#if defined(CDSLIB_SYNC_POOL_MONITOR_H) && !defined(CDSUNIT_PRINT_POOL_MONITOR_STAT_H)
+#define CDSUNIT_PRINT_POOL_MONITOR_STAT_H
+
+namespace std {
+ static inline ostream& operator <<( ostream& o, cds::sync::pool_monitor_traits::empty_stat const& /*s*/ )
+ {
+ return o;
+ }
+
+ static inline ostream& operator <<( ostream& o, cds::sync::pool_monitor_traits::stat<> const& s )
+ {
+ return o << "cds::sync::pool_monitor statistics:\n"
+ << "\t\t m_nLockCount: " << s.m_nLockCount.get() << "\n"
+ << "\t\t m_nUnlockCount: " << s.m_nUnlockCount.get() << "\n"
+ << "\t\t m_nLockContention: " << s.m_nLockContention.get() << "\n"
+ << "\t\t m_nUnlockContention: " << s.m_nUnlockContention.get() << "\n"
+ << "\t\t m_nLockAllocation: " << s.m_nLockAllocation.get() << "\n"
+ << "\t\t m_nLockDeallocation: " << s.m_nLockDeallocation.get() << "\n";
+ }
+}
+#endif