static T* allocate( size_t nCount )
{
- return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ) );
+ return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ));
}
static void deallocate( T* p, size_t /*nCount*/ )
{
- s_free_memory( reinterpret_cast<void*>( p ) );
+ s_free_memory( reinterpret_cast<void*>( p ));
}
};
thread_record( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity )
: thread_data( guards, guard_count, retired_arr, retired_capacity )
+ , m_pNextNode( nullptr )
+ , m_idOwner( cds::OS::c_NullThreadId )
, m_bFree( false )
{}
};
}
CDS_EXPORT_API smr::smr( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
- : thread_list_( nullptr )
- , hazard_ptr_count_( nHazardPtrCount == 0 ? defaults::c_nHazardPointerPerThread : nHazardPtrCount )
+ : hazard_ptr_count_( nHazardPtrCount == 0 ? defaults::c_nHazardPointerPerThread : nHazardPtrCount )
, max_thread_count_( nMaxThreadCount == 0 ? defaults::c_nMaxThreadCount : nMaxThreadCount )
, max_retired_ptr_count_( calc_retired_size( nMaxRetiredPtrCount, hazard_ptr_count_, max_thread_count_ ))
, scan_type_( nScanType )
, scan_func_( nScanType == classic ? &smr::classic_scan : &smr::inplace_scan )
- {}
+ {
+ thread_list_.store( nullptr, atomics::memory_order_release );
+ }
CDS_EXPORT_API smr::~smr()
{
CDS_HPSTAT( statistics( s_postmortem_stat ));
thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed );
- thread_list_.store( nullptr, atomics::memory_order_relaxed );
+ thread_list_.store( nullptr, atomics::memory_order_release );
thread_record* pNext = nullptr;
for ( thread_record* hprec = pHead; hprec; hprec = pNext )
{
assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
- || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
- || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
- );
+ || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId );
retired_array& arr = hprec->retired_;
for ( retired_ptr* cur{ arr.first() }, *last{ arr.last() }; cur != last; ++cur ) {
+--------------------------+
*/
- char* mem = reinterpret_cast<char*>( s_alloc_memory( nSize ));
- return new( mem ) thread_record(
- reinterpret_cast<guard*>( mem + sizeof( thread_record )), get_hazard_ptr_count(),
- reinterpret_cast<retired_ptr*>( mem + sizeof( thread_record ) + guard_array_size ), get_max_retired_ptr_count()
+ uint8_t* mem = reinterpret_cast<uint8_t*>( s_alloc_memory( nSize ));
+
+ return new( mem ) thread_record(
+ reinterpret_cast<guard*>( mem + sizeof( thread_record )),
+ get_hazard_ptr_count(),
+ reinterpret_cast<retired_ptr*>( mem + sizeof( thread_record ) + guard_array_size ),
+ get_max_retired_ptr_count()
);
}
CDS_EXPORT_API smr::thread_record* smr::alloc_thread_data()
{
- //CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
-
thread_record * hprec;
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
// First try to reuse a free (non-active) HP record
- for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) {
+ for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_acquire )) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ))
continue;
hprec->m_bFree.store( false, atomics::memory_order_release );
return hprec;
thread_record* pOldHead = thread_list_.load( atomics::memory_order_relaxed );
do {
- hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed );
- } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ) );
+ hprec->m_pNextNode.store( pOldHead, atomics::memory_order_release );
+ } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ));
return hprec;
}
CDS_EXPORT_API void smr::free_thread_data( smr::thread_record* pRec )
{
assert( pRec != nullptr );
- //CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
pRec->hazards_.clear();
scan( pRec );
}
}
- CDS_HPSTAT( ++pRec->scan_count_ );
+ CDS_HPSTAT( ++pThreadRec->scan_count_ );
// Sort retired pointer array
std::sort( first_retired, last_retired, retired_ptr::less );
{
thread_record* pRec = static_cast<thread_record*>( pThreadRec );
- CDS_HPSTAT( ++pRec->scan_count_ );
+ CDS_HPSTAT( ++pThreadRec->scan_count_ );
std::vector< void*, allocator<void*>> plist;
plist.reserve( get_max_thread_count() * get_hazard_ptr_count());
}
// Sort plist to simplify search in
- std::sort( plist.begin(), plist.end() );
+ std::sort( plist.begin(), plist.end());
// Stage 2: Search plist
retired_array& retired = pRec->retired_;
auto itEnd = plist.end();
retired_ptr* insert_pos = first_retired;
for ( retired_ptr* it = first_retired; it != last_retired; ++it ) {
- if ( std::binary_search( itBegin, itEnd, first_retired->m_p ) ) {
+ if ( std::binary_search( itBegin, itEnd, first_retired->m_p )) {
if ( insert_pos != it )
*insert_pos = *it;
++insert_pos;
CDS_EXPORT_API void smr::help_scan( thread_data* pThis )
{
- assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() );
+ assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id());
CDS_HPSTAT( ++pThis->help_scan_count_ );
const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ))
{
+ if ( hprec == static_cast<thread_record*>( pThis ))
+ continue;
+
// If m_bFree == true then hprec->retired_ is empty - we don't need to see it
if ( hprec->m_bFree.load( atomics::memory_order_acquire ))
continue;
// Several threads may work concurrently so we use atomic technique only.
{
cds::OS::ThreadId curOwner = hprec->m_idOwner.load( atomics::memory_order_relaxed );
- if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
+ if ( curOwner == nullThreadId ) {
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
continue;
}
else
}
// We own the thread record successfully. Now, we can see whether it has retired pointers.
- // If it has ones then we move to pThis that is private for current thread.
+ // If it has ones then we move them to pThis that is private for current thread.
retired_array& src = hprec->retired_;
retired_array& dest = pThis->retired_;
- assert( !dest.full() );
+ assert( !dest.full());
retired_ptr* src_first = src.first();
retired_ptr* src_last = src.last();
scan( pThis );
}
- src.reset( 0 );
-
- hprec->m_bFree.store( true, atomics::memory_order_relaxed );
+ src.interthread_clear();
+ hprec->m_bFree.store( true, atomics::memory_order_release );
hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
scan( pThis );
{
st.clear();
# ifdef CDS_ENABLE_HPSTAT
- for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
+ for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ))
{
+ CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
++st.thread_rec_count;
st.guard_allocated += hprec->hazards_.alloc_guard_count_;
st.guard_freed += hprec->hazards_.free_guard_count_;
st.free_count += hprec->free_count_;
st.scan_count += hprec->scan_count_;
st.help_scan_count += hprec->help_scan_count_;
+ CDS_TSAN_ANNOTATE_IGNORE_READS_END;
}
# endif
}