private:
hp_allocator()
+#ifdef CDS_ENABLE_HPSTAT
+ : block_allocated_(0)
+#endif
{}
CDS_EXPORT_API ~hp_allocator();
private:
cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
+#ifdef CDS_ENABLE_HPSTAT
+ public:
+ atomics::atomic<size_t> block_allocated_; ///< count of allocated blocks
+#endif
};
//@endcond
, extended_list_( nullptr )
, array_( arr )
, initial_capacity_( nSize )
+# ifdef CDS_ENABLE_HPSTAT
+ , alloc_guard_count_( 0 )
+ , free_guard_count_( 0 )
+ , extend_call_count_( 0 )
+# endif
{
// Initialize guards
new( arr ) guard[nSize];
guard* g = free_head_;
free_head_ = g->next_;
+ CDS_HPSTAT( ++alloc_guard_count_ );
return g;
}
g->clear();
g->next_ = free_head_;
free_head_ = g;
+ CDS_HPSTAT( ++free_guard_count_ );
}
}
arr.reset( i, free_head_ );
free_head_ = free_head_->next_;
}
+ CDS_HPSTAT( alloc_guard_count_ += Capacity );
return Capacity;
}
g->clear();
g->next_ = gList;
gList = g;
+ CDS_HPSTAT( ++free_guard_count_ );
}
}
free_head_ = gList;
block->next_ = extended_list_;
extended_list_ = block;
free_head_ = block->first();
+ CDS_HPSTAT( ++extend_call_count_ );
}
private:
guard_block* extended_list_; ///< Head of extended guard blocks allocated for the thread
guard* const array_; ///< initial HP array
size_t const initial_capacity_; ///< Capacity of \p array_
+# ifdef CDS_ENABLE_HPSTAT
+ public:
+ size_t alloc_guard_count_;
+ size_t free_guard_count_;
+ size_t extend_call_count_;
+# endif
};
//@endcond
private:
retired_allocator()
+#ifdef CDS_ENABLE_HPSTAT
+ : block_allocated_(0)
+#endif
{}
CDS_EXPORT_API ~retired_allocator();
private:
cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block
+#ifdef CDS_ENABLE_HPSTAT
+ public:
+ atomics::atomic<size_t> block_allocated_; ///< Count of allocated blocks
+#endif
};
//@endcond
, list_head_( nullptr )
, list_tail_( nullptr )
, block_count_(0)
+# ifdef CDS_ENABLE_HPSTAT
+ , retire_call_count_( 0 )
+ , extend_call_count_( 0 )
+# endif
{}
retired_array( retired_array const& ) = delete;
//assert( &p != current_cell_ );
*current_cell_ = p;
+ CDS_HPSTAT( ++retire_call_count_ );
+
if ( ++current_cell_ == current_block_->last() ) {
// goto next block if exists
if ( current_block_->next_ ) {
return true;
}
- bool safe_push( retired_ptr* p ) CDS_NOEXCEPT
+ bool repush( retired_ptr* p ) CDS_NOEXCEPT
{
bool ret = push( *p );
+ CDS_HPSTAT( --retire_call_count_ );
assert( ret );
return ret;
}
list_tail_ = list_tail_->next_ = block;
current_cell_ = block->first();
++block_count_;
+ CDS_HPSTAT( ++extend_call_count_ );
}
bool empty() const
retired_block* list_head_;
retired_block* list_tail_;
size_t block_count_;
+# ifdef CDS_ENABLE_HPSTAT
+ public:
+ size_t retire_call_count_;
+ size_t extend_call_count_;
+# endif
};
//@endcond
+ /// Internal statistics
+ struct stat {
+ size_t guard_allocated; ///< Count of allocated HP guards
+ size_t guard_freed; ///< Count of freed HP guards
+ size_t retired_count; ///< Count of retired pointers
+ size_t free_count; ///< Count of free pointers
+ size_t scan_count; ///< Count of \p scan() call
+ size_t help_scan_count; ///< Count of \p help_scan() call
+
+ size_t thread_rec_count; ///< Count of thread records
+
+ size_t hp_block_count; ///< Count of extended HP blocks allocated
+ size_t retired_block_count; ///< Count of retired blocks allocated
+ size_t hp_extend_count; ///< Count of hp array \p extend() call
+ size_t retired_extend_count; ///< Count of retired array \p extend() call
+
+ /// Default ctor
+ stat()
+ {
+ clear();
+ }
+
+ /// Clears all counters
+ void clear()
+ {
+ guard_allocated =
+ guard_freed =
+ retired_count =
+ free_count =
+ scan_count =
+ help_scan_count =
+ thread_rec_count =
+ hp_block_count =
+ retired_block_count =
+ hp_extend_count =
+ retired_extend_count = 0;
+ }
+ };
+
//@cond
/// Per-thread data
struct thread_data {
thread_hp_storage hazards_; ///< Hazard pointers private to the thread
retired_array retired_; ///< Retired data private to the thread
+# ifdef CDS_ENABLE_HPSTAT
+ size_t free_call_count_;
+ size_t scan_call_count_;
+ size_t help_scan_call_count_;
+# endif
+
char pad1_[cds::c_nCacheLineSize];
atomics::atomic<unsigned int> sync_; ///< dummy var to introduce synchronizes-with relationship between threads
char pad2_[cds::c_nCacheLineSize];
thread_data( guard* guards, size_t guard_count )
: hazards_( guards, guard_count )
, sync_( 0 )
+# ifdef CDS_ENABLE_HPSTAT
+ , free_call_count_(0)
+ , scan_call_count_(0)
+ , help_scan_call_count_(0)
+# endif
{}
thread_data() = delete;
static CDS_EXPORT_API void attach_thread();
static CDS_EXPORT_API void detach_thread();
+ /// Get internal statistics
+ void statistics( stat& st );
+
public: // for internal use only
/// The main garbage collecting function
CDS_EXPORT_API void scan( thread_data* pRec );
/// Atomic marked pointer
template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
+ /// Internal statistics
+ typedef dhp::stat stat;
/// Dynamic Hazard Pointer guard
/**
{
scan();
}
+
+ /// Returns internal statistics
+ /**
+ The function clears \p st before gathering statistics.
+
+ @note Internal statistics is available only if you compile
+ \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
+ */
+ static void statistics( stat& st )
+ {
+ dhp::smr::instance().statistics( st );
+ }
+
+ /// Returns post-mortem statistics
+ /**
+ Post-mortem statistics is gathered in the \p %DHP object destructor
+ and can be accessible after destructing the global \p %DHP object.
+
+ @note Internal statistics is available only if you compile
+ \p libcds and your program with \p -DCDS_ENABLE_HPSTAT key.
+
+ Usage:
+ \code
+ int main()
+ {
+ cds::Initialize();
+ {
+ // Initialize DHP SMR
+ cds::gc::DHP dhp;
+
+ // deal with DHP-based data structured
+ // ...
+ }
+
+ // DHP object destroyed
+ // Get total post-mortem statistics
+ cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics();
+
+ printf( "DHP statistics:\n"
+ " thread count = %llu\n"
+ " guard allocated = %llu\n"
+ " guard freed = %llu\n"
+ " retired data count = %llu\n"
+ " free data count = %llu\n"
+ " scan() call count = %llu\n"
+ " help_scan() call count = %llu\n",
+ st.thread_rec_count,
+ st.guard_allocated, st.guard_freed,
+ st.retired_count, st.free_count,
+ st.scan_count, st.help_scan_count
+ );
+
+ cds::Terminate();
+ }
+ \endcode
+ */
+ static stat const& postmortem_statistics();
+
};
}} // namespace cds::gc
}
};
+ stat s_postmortem_stat;
} // namespace
/*static*/ CDS_EXPORT_API smr* smr::instance_ = nullptr;
// allocate new block
gb = new( s_alloc_memory( sizeof( guard_block ) + sizeof( guard ) * defaults::c_extended_guard_block_size )) guard_block;
new ( gb->first() ) guard[defaults::c_extended_guard_block_size];
+
+ CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ));
}
// links guards in the block
CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id(); )
+ CDS_HPSTAT( statistics( s_postmortem_stat ) );
+
thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed );
thread_list_.store( nullptr, atomics::memory_order_relaxed );
// delete retired data
for ( retired_block* block = retired.list_head_; block && block != retired.current_block_; block = block->next_ ) {
- for ( retired_ptr* p = block->first(); p != block->last(); ++p )
+ for ( retired_ptr* p = block->first(); p != block->last(); ++p ) {
p->free();
+ CDS_HPSTAT( ++s_postmortem_stat.free_count );
+ }
}
if ( retired.current_block_ ) {
- for ( retired_ptr* p = retired.current_block_->first(); p != retired.current_cell_; ++p )
+ for ( retired_ptr* p = retired.current_block_->first(); p != retired.current_cell_; ++p ) {
p->free();
+ CDS_HPSTAT( ++s_postmortem_stat.free_count );
+ }
}
hprec->retired_.fini();
hprec->hazards_.clear();
for ( retired_ptr* p = block->first(), *end = p + block_size; p != end; ++p ) {
if ( cds_unlikely( std::binary_search( hp_begin, hp_end, p->m_p )))
- stg.safe_push( p );
+ stg.repush( p );
else {
p->free();
++count;
{
thread_record* pRec = static_cast<thread_record*>( pThreadRec );
+ CDS_HPSTAT( ++pRec->scan_call_count_ );
+
hp_vector plist;
size_t plist_size = last_plist_size_.load( std::memory_order_relaxed );
plist.reserve( plist_size );
if ( end_block )
break;
}
+ CDS_HPSTAT( pRec->free_call_count_ += free_count );
// If the count of freed elements is too small, increase retired array
if ( free_count == 0 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last() )
CDS_EXPORT_API void smr::help_scan( thread_data* pThis )
{
assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() );
+ CDS_HPSTAT( ++pThis->help_scan_call_count_ );
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
scan( pThis );
}
+ void smr::statistics( stat& st )
+ {
+ st.clear();
+# ifdef CDS_ENABLE_HPSTAT
+ for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
+ {
+ ++st.thread_rec_count;
+ st.guard_allocated += hprec->hazards_.alloc_guard_count_;
+ st.guard_freed += hprec->hazards_.free_guard_count_;
+ st.hp_extend_count += hprec->hazards_.extend_call_count_;
+ st.retired_count += hprec->retired_.retire_call_count_;
+ st.retired_extend_count += hprec->retired_.extend_call_count_;
+ st.free_count += hprec->free_call_count_;
+ st.scan_count += hprec->scan_call_count_;
+ st.help_scan_count += hprec->help_scan_call_count_;
+ }
+
+ st.hp_block_count = hp_allocator_.block_allocated_.load( atomics::memory_order_relaxed );
+ st.retired_block_count = retired_allocator_.block_allocated_.load( atomics::memory_order_relaxed );
+# endif
+ }
+
+
}}} // namespace cds::gc::dhp
+
+/*static*/ cds::gc::DHP::stat const& cds::gc::DHP::postmortem_statistics()
+{
+ return cds::gc::dhp::s_postmortem_stat;
+}
+