</Link>\r
</ItemDefinitionGroup>\r
<ItemGroup>\r
+ <ClCompile Include="..\..\..\src\dhp_gc.cpp" />\r
<ClCompile Include="..\..\..\src\dllmain.cpp" />\r
- <ClCompile Include="..\..\..\src\hzp_gc.cpp" />\r
+ <ClCompile Include="..\..\..\src\hp_gc.cpp" />\r
<ClCompile Include="..\..\..\src\init.cpp" />\r
<ClCompile Include="..\..\..\src\michael_heap.cpp" />\r
- <ClCompile Include="..\..\..\src\ptb_gc.cpp" />\r
<ClCompile Include="..\..\..\src\topology_hpux.cpp" />\r
<ClCompile Include="..\..\..\src\topology_linux.cpp" />\r
<ClCompile Include="..\..\..\src\topology_osx.cpp" />\r
<ClInclude Include="..\..\..\cds\urcu\options.h" />\r
<ClInclude Include="..\..\..\cds\urcu\signal_buffered.h" />\r
<ClInclude Include="..\..\..\cds\urcu\signal_threaded.h" />\r
- <ClInclude Include="..\..\..\src\hzp_const.h" />\r
<ClInclude Include="..\..\..\cds\init.h" />\r
<ClInclude Include="..\..\..\cds\refcounter.h" />\r
<ClInclude Include="..\..\..\cds\version.h" />\r
<ClInclude Include="..\..\..\cds\container\details\make_lazy_list.h" />\r
<ClInclude Include="..\..\..\cds\container\details\make_michael_kvlist.h" />\r
<ClInclude Include="..\..\..\cds\container\details\make_michael_list.h" />\r
+ <ClInclude Include="..\..\..\src\hp_const.h" />\r
</ItemGroup>\r
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
<ImportGroup Label="ExtensionTargets">\r
<ClCompile Include="..\..\..\src\dllmain.cpp">\r
<Filter>Source Files</Filter>\r
</ClCompile>\r
- <ClCompile Include="..\..\..\src\hzp_gc.cpp">\r
- <Filter>Source Files</Filter>\r
- </ClCompile>\r
<ClCompile Include="..\..\..\src\init.cpp">\r
<Filter>Source Files</Filter>\r
</ClCompile>\r
<ClCompile Include="..\..\..\src\michael_heap.cpp">\r
<Filter>Source Files</Filter>\r
</ClCompile>\r
- <ClCompile Include="..\..\..\src\ptb_gc.cpp">\r
- <Filter>Source Files</Filter>\r
- </ClCompile>\r
<ClCompile Include="..\..\..\src\topology_hpux.cpp">\r
<Filter>Source Files</Filter>\r
</ClCompile>\r
<ClCompile Include="..\..\..\src\topology_osx.cpp">\r
<Filter>Source Files</Filter>\r
</ClCompile>\r
+ <ClCompile Include="..\..\..\src\hp_gc.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\..\src\dhp_gc.cpp">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
</ItemGroup>\r
<ItemGroup>\r
- <ClInclude Include="..\..\..\src\hzp_const.h">\r
- <Filter>Source Files</Filter>\r
- </ClInclude>\r
<ClInclude Include="..\..\..\cds\init.h">\r
<Filter>Header Files\cds</Filter>\r
</ClInclude>\r
<ClInclude Include="..\..\..\cds\gc\hp\hp_impl.h">\r
<Filter>Header Files\cds\gc\hp</Filter>\r
</ClInclude>\r
+ <ClInclude Include="..\..\..\src\hp_const.h">\r
+ <Filter>Source Files</Filter>\r
+ </ClInclude>\r
</ItemGroup>\r
</Project>
\ No newline at end of file
-CDS_SOURCES=src/hzp_gc.cpp \\r
+CDS_SOURCES= \\r
+ src/hp_gc.cpp \\r
src/init.cpp \\r
- src/ptb_gc.cpp \\r
+ src/dhp_gc.cpp \\r
src/urcu_gp.cpp \\r
src/urcu_sh.cpp \\r
src/michael_heap.cpp \\r
--- /dev/null
+//$$CDS-header$$
+
+// Pass The Buck (PTB) Memory manager implementation
+
+#include <algorithm> // std::fill
+#include <functional> // std::hash
+
+#include <cds/gc/dhp/dhp.h>
+#include <cds/algo/int_algo.h>
+
+namespace cds { namespace gc { namespace ptb {
+
+ namespace details {
+
+ class liberate_set {
+ typedef retired_ptr_node * item_type;
+ typedef cds::details::Allocator<item_type, CDS_DEFAULT_ALLOCATOR> allocator_type;
+
+ size_t const m_nBucketCount;
+ item_type * m_Buckets;
+
+ item_type& bucket( retired_ptr_node& node )
+ {
+ return bucket( node.m_ptr.m_p );
+ }
+ item_type& bucket( guard_data::guarded_ptr p )
+ {
+ return m_Buckets[ std::hash<guard_data::guarded_ptr>()( p ) & (m_nBucketCount - 1) ];
+ }
+
+ public:
+ liberate_set( size_t nBucketCount )
+ : m_nBucketCount( nBucketCount )
+ {
+ assert( nBucketCount > 0 );
+ assert( (nBucketCount & (nBucketCount - 1)) == 0 );
+
+ m_Buckets = allocator_type().NewArray( nBucketCount );
+ std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
+ }
+
+ ~liberate_set()
+ {
+ allocator_type().Delete( m_Buckets, m_nBucketCount );
+ }
+
+ void insert( retired_ptr_node& node )
+ {
+ node.m_pNext = nullptr;
+
+ item_type& refBucket = bucket( node );
+ if ( refBucket ) {
+ item_type p = refBucket;
+ do {
+ if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
+ assert( node.m_pNextFree == nullptr );
+
+ node.m_pNextFree = p->m_pNextFree;
+ p->m_pNextFree = &node;
+ return;
+ }
+ p = p->m_pNext;
+ } while ( p );
+
+ node.m_pNext = refBucket;
+ }
+ refBucket = &node;
+ }
+
+ item_type erase( guard_data::guarded_ptr ptr )
+ {
+ item_type& refBucket = bucket( ptr );
+ item_type p = refBucket;
+ item_type pPrev = nullptr;
+
+ while ( p ) {
+ if ( p->m_ptr.m_p == ptr ) {
+ if ( pPrev )
+ pPrev->m_pNext = p->m_pNext;
+ else
+ refBucket = p->m_pNext;
+ p->m_pNext = nullptr;
+ return p;
+ }
+ pPrev = p;
+ p = p->m_pNext;
+ }
+
+ return nullptr;
+ }
+
+ typedef std::pair<item_type, item_type> list_range;
+
+ list_range free_all()
+ {
+ item_type pTail = nullptr;
+ list_range ret = std::make_pair( pTail, pTail );
+
+ item_type const * pEndBucket = m_Buckets + m_nBucketCount;
+ for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
+ item_type pBucket = *ppBucket;
+ if ( pBucket ) {
+ if ( !ret.first )
+ ret.first = pBucket;
+ else
+ pTail->m_pNextFree = pBucket;
+
+ pTail = pBucket;
+ for (;;) {
+ item_type pNext = pTail->m_pNext;
+ pTail->m_ptr.free();
+ pTail->m_pNext = nullptr;
+
+ while ( pTail->m_pNextFree ) {
+ pTail = pTail->m_pNextFree;
+ pTail->m_ptr.free();
+ pTail->m_pNext = nullptr;
+ }
+
+ if ( pNext )
+ pTail = pTail->m_pNextFree = pNext;
+ else
+ break;
+ }
+ }
+ }
+
+ if ( pTail )
+ pTail->m_pNextFree = nullptr;
+ ret.second = pTail;
+ return ret;
+ }
+ };
+ }
+
+ GarbageCollector * GarbageCollector::m_pManager = nullptr;
+
+ void CDS_STDCALL GarbageCollector::Construct(
+ size_t nLiberateThreshold
+ , size_t nInitialThreadGuardCount
+ )
+ {
+ if ( !m_pManager ) {
+ m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount );
+ }
+ }
+
+ void CDS_STDCALL GarbageCollector::Destruct()
+ {
+ if ( m_pManager ) {
+ delete m_pManager;
+ m_pManager = nullptr;
+ }
+ }
+
+ GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount )
+ : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
+ , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
+ //, m_nInLiberate(0)
+ {
+ }
+
+ GarbageCollector::~GarbageCollector()
+ {
+ liberate();
+
+#if 0
+ details::retired_ptr_node * pHead = nullptr;
+ details::retired_ptr_node * pTail = nullptr;
+
+ for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) {
+ details::guard_data::handoff_ptr h = pGuard->pHandOff;
+ pGuard->pHandOff = nullptr;
+ while ( h ) {
+ details::guard_data::handoff_ptr pNext = h->m_pNextFree;
+ if ( h->m_ptr.m_p )
+ h->m_ptr.free();
+ if ( !pHead )
+ pTail = pHead = h;
+ else
+ pTail = pTail->m_pNextFree = h;
+ h = pNext;
+ }
+ }
+ if ( pHead )
+ m_RetiredAllocator.free_range( pHead, pTail );
+#endif
+ }
+
+ void GarbageCollector::liberate()
+ {
+ details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
+ if ( retiredList.first ) {
+
+ size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
+ details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
+
+ // Get list of retired pointers
+ details::retired_ptr_node * pHead = retiredList.first;
+ while ( pHead ) {
+ details::retired_ptr_node * pNext = pHead->m_pNext;
+ pHead->m_pNextFree = nullptr;
+ set.insert( *pHead );
+ pHead = pNext;
+ }
+
+ // Liberate cycle
+ for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
+ {
+ // get guarded pointer
+ details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
+
+ if ( valGuarded ) {
+ details::retired_ptr_node * pRetired = set.erase( valGuarded );
+ if ( pRetired ) {
+ // Retired pointer is being guarded
+ // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
+ // List is linked on m_pNextFree field
+
+ do {
+ details::retired_ptr_node * pNext = pRetired->m_pNextFree;
+ m_RetiredBuffer.push( *pRetired );
+ pRetired = pNext;
+ } while ( pRetired );
+ }
+ }
+ }
+
+ // Free all retired pointers
+ details::liberate_set::list_range range = set.free_all();
+
+ m_RetiredAllocator.inc_epoch();
+
+ if ( range.first ) {
+ assert( range.second != nullptr );
+ m_RetiredAllocator.free_range( range.first, range.second );
+ }
+ else {
+ // liberate cycle did not free any retired pointer - double liberate threshold
+ m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
+ }
+ }
+ }
+
+#if 0
+ void GarbageCollector::liberate( details::liberate_set& set )
+ {
+ details::guard_data::handoff_ptr const nullHandOff = nullptr;
+
+ for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
+ {
+ // get guarded pointer
+ details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
+ details::guard_data::handoff_ptr h;
+
+ if ( valGuarded ) {
+ details::retired_ptr_node * pRetired = set.erase( valGuarded );
+ if ( pRetired ) {
+ // Retired pointer is being guarded
+
+ // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
+ // List is linked on m_pNextFree field
+
+ // Now, try to set retired node pRetired as a hand-off node for the guard
+ cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
+ if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) {
+ if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) {
+ h = nullHandOff ; //nullptr;
+ details::retired_ptr_node * pTail = pGuard->pHandOff;
+ while ( pTail->m_pNextFree )
+ pTail = pTail->m_pNextFree;
+ pTail->m_pNextFree = pRetired;
+ }
+ else {
+ // swap h and pGuard->pHandOff
+ h = pGuard->pHandOff;
+ pGuard->pHandOff = pRetired;
+ }
+ }
+ else
+ h = pRetired;
+ }
+ else {
+ cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
+ h = pGuard->pHandOff;
+ if ( h ) {
+ if ( h->m_ptr.m_p != valGuarded )
+ pGuard->pHandOff = nullHandOff;
+ else
+ h = nullHandOff;
+ }
+ }
+ }
+ else {
+ cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
+ h = pGuard->pHandOff;
+ pGuard->pHandOff = nullHandOff;
+ }
+
+ // h is the head of a list linked on m_pNextFree field
+ if ( h ) {
+ set.insert( *h );
+ }
+ }
+ }
+#endif
+}}} // namespace cds::gc::ptb
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDSIMPL_HP_CONST_H
+#define __CDSIMPL_HP_CONST_H
+
+/*
+ File: hp_const.h
+
+ Michael's Hazard Pointer reclamation schema global constants
+ Gidenstam's reclamation schema global constants
+
+ Editions:
+ 2008.03.10 Maxim.Khiszinsky Created
+*/
+
+namespace cds { namespace gc {
+
+ //---------------------------------------------------------------
+ // Hazard Pointers reclamation schema constants
+ namespace hzp {
+ // Max number of threads expected
+ static const size_t c_nMaxThreadCount = 100;
+
+ // Number of Hazard Pointers per thread
+ static const size_t c_nHazardPointerPerThread = 8;
+ } // namespace hzp
+
+} /* namespace gc */ } /* namespace cds */
+
+#endif // #ifndef __CDSIMPL_HZP_CONST_H
--- /dev/null
+//$$CDS-header$$
+
+/*
+ File: hzp_gc.cpp
+
+ Hazard Pointers memory reclamation strategy implementation
+
+ Editions:
+ 2008.02.10 Maxim.Khiszinsky Created
+*/
+
+#include <cds/gc/hp/hp.h>
+
+#include <algorithm> // std::sort
+#include "hp_const.h"
+
+#define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
+
+namespace cds { namespace gc {
+ namespace hzp {
+
+ /// Max array size of retired pointers
+ static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
+
+ GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
+
+ void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
+ {
+ if ( !m_pHZPManager ) {
+ m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
+ }
+ }
+
+ void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
+ {
+ if ( m_pHZPManager ) {
+ if ( bDetachAll )
+ m_pHZPManager->detachAllThread();
+
+ delete m_pHZPManager;
+ m_pHZPManager = nullptr;
+ }
+ }
+
+ GarbageCollector::GarbageCollector(
+ size_t nHazardPtrCount,
+ size_t nMaxThreadCount,
+ size_t nMaxRetiredPtrCount,
+ scan_type nScanType
+ )
+ : m_pListHead( nullptr )
+ ,m_bStatEnabled( true )
+ ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
+ ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
+ ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
+ ,m_nScanType( nScanType )
+ {}
+
+ GarbageCollector::~GarbageCollector()
+ {
+ CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
+ CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
+
+ hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
+ m_pListHead.store( nullptr, atomics::memory_order_relaxed );
+
+ hplist_node * pNext = nullptr;
+ for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
+ assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+ || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+ || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
+ );
+ details::retired_vector& vect = hprec->m_arrRetired;
+ details::retired_vector::iterator itRetired = vect.begin();
+ details::retired_vector::iterator itRetiredEnd = vect.end();
+ while ( itRetired != itRetiredEnd ) {
+ DeletePtr( *itRetired );
+ ++itRetired;
+ }
+ vect.clear();
+ pNext = hprec->m_pNextNode;
+ hprec->m_bFree.store( true, atomics::memory_order_relaxed );
+ DeleteHPRec( hprec );
+ }
+ }
+
+ inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec );
+ return new hplist_node( *this );
+ }
+
+ inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec );
+ assert( pNode->m_arrRetired.size() == 0 );
+ delete pNode;
+ }
+
+ inline void GarbageCollector::DeletePtr( details::retired_ptr& p )
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode );
+ p.free();
+ }
+
+ details::HPRec * GarbageCollector::AllocateHPRec()
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
+
+ hplist_node * hprec;
+ const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+ const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+
+ // First try to reuse a retired (non-active) HP record
+ for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
+ cds::OS::ThreadId thId = nullThreadId;
+ if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
+ continue;
+ hprec->m_bFree.store( false, atomics::memory_order_release );
+ return hprec;
+ }
+
+ // No HP records available for reuse
+ // Allocate and push a new HP record
+ hprec = NewHPRec();
+ hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
+ hprec->m_bFree.store( false, atomics::memory_order_relaxed );
+
+ atomics::atomic_thread_fence( atomics::memory_order_release );
+
+ hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
+ do {
+ hprec->m_pNextNode = pOldHead;
+ } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
+
+ return hprec;
+ }
+
+ void GarbageCollector::RetireHPRec( details::HPRec * pRec )
+ {
+ assert( pRec != nullptr );
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
+
+ pRec->clear();
+ Scan( pRec );
+ hplist_node * pNode = static_cast<hplist_node *>( pRec );
+ pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
+ }
+
+ void GarbageCollector::detachAllThread()
+ {
+ hplist_node * pNext = nullptr;
+ const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
+ pNext = hprec->m_pNextNode;
+ if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
+ RetireHPRec( hprec );
+ }
+ }
+ }
+
+ void GarbageCollector::classic_scan( details::HPRec * pRec )
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
+
+ std::vector< void * > plist;
+ plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
+ assert( plist.size() == 0 );
+
+ // Stage 1: Scan HP list and insert non-null values in plist
+
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
+
+ while ( pNode ) {
+ for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
+ void * hptr = pNode->m_hzp[i];
+ if ( hptr )
+ plist.push_back( hptr );
+ }
+ pNode = pNode->m_pNextNode;
+ }
+
+ // Sort plist to simplify search in
+ std::sort( plist.begin(), plist.end() );
+
+ // Stage 2: Search plist
+ details::retired_vector& arrRetired = pRec->m_arrRetired;
+
+ details::retired_vector::iterator itRetired = arrRetired.begin();
+ details::retired_vector::iterator itRetiredEnd = arrRetired.end();
+ // arrRetired is not a std::vector!
+ // clear is just set up item counter to 0, the items is not destroying
+ arrRetired.clear();
+
+ std::vector< void * >::iterator itBegin = plist.begin();
+ std::vector< void * >::iterator itEnd = plist.end();
+ while ( itRetired != itRetiredEnd ) {
+ if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
+ arrRetired.push( *itRetired );
+ }
+ else
+ DeletePtr( *itRetired );
+ ++itRetired;
+ }
+ }
+
+ void GarbageCollector::inplace_scan( details::HPRec * pRec )
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
+
+ // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
+ // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
+ // If it is wrong, we use classic scan algorithm
+
+ // Check if all retired pointers has zero LSB
+ // LSB is used for marking pointers that cannot be deleted yet
+ details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
+ details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
+ for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
+ if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
+ // found a pointer with LSB bit set - use classic_scan
+ classic_scan( pRec );
+ return;
+ }
+ }
+
+ // Sort retired pointer array
+ std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
+
+ // Search guarded pointers in retired array
+
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
+
+ while ( pNode ) {
+ for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
+ void * hptr = pNode->m_hzp[i];
+ if ( hptr ) {
+ details::retired_ptr dummyRetired;
+ dummyRetired.m_p = hptr;
+ details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
+ if ( it != itRetiredEnd && it->m_p == hptr ) {
+ // Mark retired pointer as guarded
+ it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) | 1);
+ }
+ }
+ }
+ pNode = pNode->m_pNextNode;
+ }
+
+ // Move all marked pointers to head of array
+ details::retired_vector::iterator itInsert = itRetired;
+ for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
+ if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
+ it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) & ~1);
+ *itInsert = *it;
+ ++itInsert;
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
+ }
+ else {
+ // Retired pointer may be freed
+ DeletePtr( *it );
+ }
+ }
+ pRec->m_arrRetired.size( itInsert - itRetired );
+ }
+
+ void GarbageCollector::HelpScan( details::HPRec * pThis )
+ {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
+
+ assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+
+ const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
+ const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+
+ // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
+ if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
+ continue;
+
+ // Owns hprec if it is empty.
+ // Several threads may work concurrently so we use atomic technique only.
+ {
+ cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
+ if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ continue;
+ }
+ else {
+ curOwner = nullThreadId;
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ continue;
+ }
+ }
+
+ // We own the thread successfully. Now, we can see whether HPRec has retired pointers.
+ // If it has ones then we move to pThis that is private for current thread.
+ details::retired_vector& src = hprec->m_arrRetired;
+ details::retired_vector& dest = pThis->m_arrRetired;
+ assert( !dest.isFull());
+ details::retired_vector::iterator itRetired = src.begin();
+ details::retired_vector::iterator itRetiredEnd = src.end();
+ while ( itRetired != itRetiredEnd ) {
+ dest.push( *itRetired );
+ if ( dest.isFull()) {
+ CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan );
+ Scan( pThis );
+ }
+ ++itRetired;
+ }
+ src.clear();
+
+ hprec->m_bFree.store(true, atomics::memory_order_release);
+ hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
+ }
+ }
+
+ GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
+ {
+ stat.nHPCount = m_nHazardPointerCount;
+ stat.nMaxThreadCount = m_nMaxThreadCount;
+ stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
+ stat.nHPRecSize = sizeof( hplist_node )
+ + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
+
+ stat.nHPRecAllocated =
+ stat.nHPRecUsed =
+ stat.nTotalRetiredPtrCount =
+ stat.nRetiredPtrInFreeHPRecs = 0;
+
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ ++stat.nHPRecAllocated;
+ stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
+
+ if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
+ // Free HP record
+ stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
+ }
+ else {
+ // Used HP record
+ ++stat.nHPRecUsed;
+ }
+ }
+
+ // Events
+ stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
+ stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
+ stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
+ stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
+
+ stat.evcScanCall = m_Stat.m_ScanCallCount;
+ stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
+ stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
+
+ stat.evcDeletedNode = m_Stat.m_DeletedNode;
+ stat.evcDeferredNode = m_Stat.m_DeferredNode;
+
+ return stat;
+ }
+
+
+ } //namespace hzp
+}} // namespace cds::gc
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDSIMPL_HZP_CONST_H
-#define __CDSIMPL_HZP_CONST_H
-
-/*
- File: hzp_const.h
-
- Michael's Hazard Pointer reclamation schema global constants
- Gidenstam's reclamation schema global constants
-
- Editions:
- 2008.03.10 Maxim.Khiszinsky Created
-*/
-
-namespace cds { namespace gc {
-
- //---------------------------------------------------------------
- // Hazard Pointers reclamation schema constants
- namespace hzp {
- // Max number of threads expected
- static const size_t c_nMaxThreadCount = 100;
-
- // Number of Hazard Pointers per thread
- static const size_t c_nHazardPointerPerThread = 8;
- } // namespace hzp
-
-} /* namespace gc */ } /* namespace cds */
-
-#endif // #ifndef __CDSIMPL_HZP_CONST_H
+++ /dev/null
-//$$CDS-header$$
-
-/*
- File: hzp_gc.cpp
-
- Hazard Pointers memory reclamation strategy implementation
-
- Editions:
- 2008.02.10 Maxim.Khiszinsky Created
-*/
-
-#include <cds/gc/hp/hp.h>
-
-#include <algorithm> // std::sort
-#include "hzp_const.h"
-
-#define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
-
-namespace cds { namespace gc {
- namespace hzp {
-
- /// Max array size of retired pointers
- static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
-
- GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
-
- void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
- {
- if ( !m_pHZPManager ) {
- m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
- }
- }
-
- void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
- {
- if ( m_pHZPManager ) {
- if ( bDetachAll )
- m_pHZPManager->detachAllThread();
-
- delete m_pHZPManager;
- m_pHZPManager = nullptr;
- }
- }
-
- GarbageCollector::GarbageCollector(
- size_t nHazardPtrCount,
- size_t nMaxThreadCount,
- size_t nMaxRetiredPtrCount,
- scan_type nScanType
- )
- : m_pListHead( nullptr )
- ,m_bStatEnabled( true )
- ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
- ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
- ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
- ,m_nScanType( nScanType )
- {}
-
- GarbageCollector::~GarbageCollector()
- {
- CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
- CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
-
- hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
- m_pListHead.store( nullptr, atomics::memory_order_relaxed );
-
- hplist_node * pNext = nullptr;
- for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
- assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
- || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
- || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
- );
- details::retired_vector& vect = hprec->m_arrRetired;
- details::retired_vector::iterator itRetired = vect.begin();
- details::retired_vector::iterator itRetiredEnd = vect.end();
- while ( itRetired != itRetiredEnd ) {
- DeletePtr( *itRetired );
- ++itRetired;
- }
- vect.clear();
- pNext = hprec->m_pNextNode;
- hprec->m_bFree.store( true, atomics::memory_order_relaxed );
- DeleteHPRec( hprec );
- }
- }
-
- inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec );
- return new hplist_node( *this );
- }
-
- inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec );
- assert( pNode->m_arrRetired.size() == 0 );
- delete pNode;
- }
-
- inline void GarbageCollector::DeletePtr( details::retired_ptr& p )
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode );
- p.free();
- }
-
- details::HPRec * GarbageCollector::AllocateHPRec()
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
-
- hplist_node * hprec;
- const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
-
- // First try to reuse a retired (non-active) HP record
- for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
- cds::OS::ThreadId thId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
- continue;
- hprec->m_bFree.store( false, atomics::memory_order_release );
- return hprec;
- }
-
- // No HP records available for reuse
- // Allocate and push a new HP record
- hprec = NewHPRec();
- hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
- hprec->m_bFree.store( false, atomics::memory_order_relaxed );
-
- atomics::atomic_thread_fence( atomics::memory_order_release );
-
- hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
- do {
- hprec->m_pNextNode = pOldHead;
- } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
-
- return hprec;
- }
-
- void GarbageCollector::RetireHPRec( details::HPRec * pRec )
- {
- assert( pRec != nullptr );
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
-
- pRec->clear();
- Scan( pRec );
- hplist_node * pNode = static_cast<hplist_node *>( pRec );
- pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
- }
-
- void GarbageCollector::detachAllThread()
- {
- hplist_node * pNext = nullptr;
- const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
- pNext = hprec->m_pNextNode;
- if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
- RetireHPRec( hprec );
- }
- }
- }
-
- void GarbageCollector::classic_scan( details::HPRec * pRec )
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
-
- std::vector< void * > plist;
- plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
- assert( plist.size() == 0 );
-
- // Stage 1: Scan HP list and insert non-null values in plist
-
- hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
-
- while ( pNode ) {
- for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
- void * hptr = pNode->m_hzp[i];
- if ( hptr )
- plist.push_back( hptr );
- }
- pNode = pNode->m_pNextNode;
- }
-
- // Sort plist to simplify search in
- std::sort( plist.begin(), plist.end() );
-
- // Stage 2: Search plist
- details::retired_vector& arrRetired = pRec->m_arrRetired;
-
- details::retired_vector::iterator itRetired = arrRetired.begin();
- details::retired_vector::iterator itRetiredEnd = arrRetired.end();
- // arrRetired is not a std::vector!
- // clear is just set up item counter to 0, the items is not destroying
- arrRetired.clear();
-
- std::vector< void * >::iterator itBegin = plist.begin();
- std::vector< void * >::iterator itEnd = plist.end();
- while ( itRetired != itRetiredEnd ) {
- if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
- arrRetired.push( *itRetired );
- }
- else
- DeletePtr( *itRetired );
- ++itRetired;
- }
- }
-
- void GarbageCollector::inplace_scan( details::HPRec * pRec )
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
-
- // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
- // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
- // If it is wrong, we use classic scan algorithm
-
- // Check if all retired pointers has zero LSB
- // LSB is used for marking pointers that cannot be deleted yet
- details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
- details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
- for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
- if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
- // found a pointer with LSB bit set - use classic_scan
- classic_scan( pRec );
- return;
- }
- }
-
- // Sort retired pointer array
- std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
-
- // Search guarded pointers in retired array
-
- hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
-
- while ( pNode ) {
- for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
- void * hptr = pNode->m_hzp[i];
- if ( hptr ) {
- details::retired_ptr dummyRetired;
- dummyRetired.m_p = hptr;
- details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
- if ( it != itRetiredEnd && it->m_p == hptr ) {
- // Mark retired pointer as guarded
- it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) | 1);
- }
- }
- }
- pNode = pNode->m_pNextNode;
- }
-
- // Move all marked pointers to head of array
- details::retired_vector::iterator itInsert = itRetired;
- for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
- if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
- it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) & ~1);
- *itInsert = *it;
- ++itInsert;
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
- }
- else {
- // Retired pointer may be freed
- DeletePtr( *it );
- }
- }
- pRec->m_arrRetired.size( itInsert - itRetired );
- }
-
- void GarbageCollector::HelpScan( details::HPRec * pThis )
- {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
-
- assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
-
- const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
- for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
-
- // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
- if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
- continue;
-
- // Owns hprec if it is empty.
- // Several threads may work concurrently so we use atomic technique only.
- {
- cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
- if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
- continue;
- }
- else {
- curOwner = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
- continue;
- }
- }
-
- // We own the thread successfully. Now, we can see whether HPRec has retired pointers.
- // If it has ones then we move to pThis that is private for current thread.
- details::retired_vector& src = hprec->m_arrRetired;
- details::retired_vector& dest = pThis->m_arrRetired;
- assert( !dest.isFull());
- details::retired_vector::iterator itRetired = src.begin();
- details::retired_vector::iterator itRetiredEnd = src.end();
- while ( itRetired != itRetiredEnd ) {
- dest.push( *itRetired );
- if ( dest.isFull()) {
- CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan );
- Scan( pThis );
- }
- ++itRetired;
- }
- src.clear();
-
- hprec->m_bFree.store(true, atomics::memory_order_release);
- hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
- }
- }
-
- GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
- {
- stat.nHPCount = m_nHazardPointerCount;
- stat.nMaxThreadCount = m_nMaxThreadCount;
- stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
- stat.nHPRecSize = sizeof( hplist_node )
- + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
-
- stat.nHPRecAllocated =
- stat.nHPRecUsed =
- stat.nTotalRetiredPtrCount =
- stat.nRetiredPtrInFreeHPRecs = 0;
-
- for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
- ++stat.nHPRecAllocated;
- stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
-
- if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
- // Free HP record
- stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
- }
- else {
- // Used HP record
- ++stat.nHPRecUsed;
- }
- }
-
- // Events
- stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
- stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
- stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
- stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
-
- stat.evcScanCall = m_Stat.m_ScanCallCount;
- stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
- stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
-
- stat.evcDeletedNode = m_Stat.m_DeletedNode;
- stat.evcDeferredNode = m_Stat.m_DeferredNode;
-
- return stat;
- }
-
-
- } //namespace hzp
-}} // namespace cds::gc
+++ /dev/null
-//$$CDS-header$$
-
-// Pass The Buck (PTB) Memory manager implementation
-
-#include <algorithm> // std::fill
-#include <functional> // std::hash
-
-#include <cds/gc/dhp/dhp.h>
-#include <cds/algo/int_algo.h>
-
-namespace cds { namespace gc { namespace ptb {
-
- namespace details {
-
- class liberate_set {
- typedef retired_ptr_node * item_type;
- typedef cds::details::Allocator<item_type, CDS_DEFAULT_ALLOCATOR> allocator_type;
-
- size_t const m_nBucketCount;
- item_type * m_Buckets;
-
- item_type& bucket( retired_ptr_node& node )
- {
- return bucket( node.m_ptr.m_p );
- }
- item_type& bucket( guard_data::guarded_ptr p )
- {
- return m_Buckets[ std::hash<guard_data::guarded_ptr>()( p ) & (m_nBucketCount - 1) ];
- }
-
- public:
- liberate_set( size_t nBucketCount )
- : m_nBucketCount( nBucketCount )
- {
- assert( nBucketCount > 0 );
- assert( (nBucketCount & (nBucketCount - 1)) == 0 );
-
- m_Buckets = allocator_type().NewArray( nBucketCount );
- std::fill( m_Buckets, m_Buckets + nBucketCount, nullptr );
- }
-
- ~liberate_set()
- {
- allocator_type().Delete( m_Buckets, m_nBucketCount );
- }
-
- void insert( retired_ptr_node& node )
- {
- node.m_pNext = nullptr;
-
- item_type& refBucket = bucket( node );
- if ( refBucket ) {
- item_type p = refBucket;
- do {
- if ( p->m_ptr.m_p == node.m_ptr.m_p ) {
- assert( node.m_pNextFree == nullptr );
-
- node.m_pNextFree = p->m_pNextFree;
- p->m_pNextFree = &node;
- return;
- }
- p = p->m_pNext;
- } while ( p );
-
- node.m_pNext = refBucket;
- }
- refBucket = &node;
- }
-
- item_type erase( guard_data::guarded_ptr ptr )
- {
- item_type& refBucket = bucket( ptr );
- item_type p = refBucket;
- item_type pPrev = nullptr;
-
- while ( p ) {
- if ( p->m_ptr.m_p == ptr ) {
- if ( pPrev )
- pPrev->m_pNext = p->m_pNext;
- else
- refBucket = p->m_pNext;
- p->m_pNext = nullptr;
- return p;
- }
- pPrev = p;
- p = p->m_pNext;
- }
-
- return nullptr;
- }
-
- typedef std::pair<item_type, item_type> list_range;
-
- list_range free_all()
- {
- item_type pTail = nullptr;
- list_range ret = std::make_pair( pTail, pTail );
-
- item_type const * pEndBucket = m_Buckets + m_nBucketCount;
- for ( item_type * ppBucket = m_Buckets; ppBucket < pEndBucket; ++ppBucket ) {
- item_type pBucket = *ppBucket;
- if ( pBucket ) {
- if ( !ret.first )
- ret.first = pBucket;
- else
- pTail->m_pNextFree = pBucket;
-
- pTail = pBucket;
- for (;;) {
- item_type pNext = pTail->m_pNext;
- pTail->m_ptr.free();
- pTail->m_pNext = nullptr;
-
- while ( pTail->m_pNextFree ) {
- pTail = pTail->m_pNextFree;
- pTail->m_ptr.free();
- pTail->m_pNext = nullptr;
- }
-
- if ( pNext )
- pTail = pTail->m_pNextFree = pNext;
- else
- break;
- }
- }
- }
-
- if ( pTail )
- pTail->m_pNextFree = nullptr;
- ret.second = pTail;
- return ret;
- }
- };
- }
-
- GarbageCollector * GarbageCollector::m_pManager = nullptr;
-
- void CDS_STDCALL GarbageCollector::Construct(
- size_t nLiberateThreshold
- , size_t nInitialThreadGuardCount
- )
- {
- if ( !m_pManager ) {
- m_pManager = new GarbageCollector( nLiberateThreshold, nInitialThreadGuardCount );
- }
- }
-
- void CDS_STDCALL GarbageCollector::Destruct()
- {
- if ( m_pManager ) {
- delete m_pManager;
- m_pManager = nullptr;
- }
- }
-
- GarbageCollector::GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount )
- : m_nLiberateThreshold( nLiberateThreshold ? nLiberateThreshold : 1024 )
- , m_nInitialThreadGuardCount( nInitialThreadGuardCount ? nInitialThreadGuardCount : 8 )
- //, m_nInLiberate(0)
- {
- }
-
- GarbageCollector::~GarbageCollector()
- {
- liberate();
-
-#if 0
- details::retired_ptr_node * pHead = nullptr;
- details::retired_ptr_node * pTail = nullptr;
-
- for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) {
- details::guard_data::handoff_ptr h = pGuard->pHandOff;
- pGuard->pHandOff = nullptr;
- while ( h ) {
- details::guard_data::handoff_ptr pNext = h->m_pNextFree;
- if ( h->m_ptr.m_p )
- h->m_ptr.free();
- if ( !pHead )
- pTail = pHead = h;
- else
- pTail = pTail->m_pNextFree = h;
- h = pNext;
- }
- }
- if ( pHead )
- m_RetiredAllocator.free_range( pHead, pTail );
-#endif
- }
-
- void GarbageCollector::liberate()
- {
- details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
- if ( retiredList.first ) {
-
- size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
- details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
-
- // Get list of retired pointers
- details::retired_ptr_node * pHead = retiredList.first;
- while ( pHead ) {
- details::retired_ptr_node * pNext = pHead->m_pNext;
- pHead->m_pNextFree = nullptr;
- set.insert( *pHead );
- pHead = pNext;
- }
-
- // Liberate cycle
- for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
- {
- // get guarded pointer
- details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
-
- if ( valGuarded ) {
- details::retired_ptr_node * pRetired = set.erase( valGuarded );
- if ( pRetired ) {
- // Retired pointer is being guarded
- // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
- // List is linked on m_pNextFree field
-
- do {
- details::retired_ptr_node * pNext = pRetired->m_pNextFree;
- m_RetiredBuffer.push( *pRetired );
- pRetired = pNext;
- } while ( pRetired );
- }
- }
- }
-
- // Free all retired pointers
- details::liberate_set::list_range range = set.free_all();
-
- m_RetiredAllocator.inc_epoch();
-
- if ( range.first ) {
- assert( range.second != nullptr );
- m_RetiredAllocator.free_range( range.first, range.second );
- }
- else {
- // liberate cycle did not free any retired pointer - double liberate threshold
- m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
- }
- }
- }
-
-#if 0
- void GarbageCollector::liberate( details::liberate_set& set )
- {
- details::guard_data::handoff_ptr const nullHandOff = nullptr;
-
- for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
- {
- // get guarded pointer
- details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
- details::guard_data::handoff_ptr h;
-
- if ( valGuarded ) {
- details::retired_ptr_node * pRetired = set.erase( valGuarded );
- if ( pRetired ) {
- // Retired pointer is being guarded
-
- // pRetired is the head of retired pointers list for which the m_ptr.m_p field is equal
- // List is linked on m_pNextFree field
-
- // Now, try to set retired node pRetired as a hand-off node for the guard
- cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
- if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) {
- if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) {
- h = nullHandOff ; //nullptr;
- details::retired_ptr_node * pTail = pGuard->pHandOff;
- while ( pTail->m_pNextFree )
- pTail = pTail->m_pNextFree;
- pTail->m_pNextFree = pRetired;
- }
- else {
- // swap h and pGuard->pHandOff
- h = pGuard->pHandOff;
- pGuard->pHandOff = pRetired;
- }
- }
- else
- h = pRetired;
- }
- else {
- cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
- h = pGuard->pHandOff;
- if ( h ) {
- if ( h->m_ptr.m_p != valGuarded )
- pGuard->pHandOff = nullHandOff;
- else
- h = nullHandOff;
- }
- }
- }
- else {
- cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
- h = pGuard->pHandOff;
- pGuard->pHandOff = nullHandOff;
- }
-
- // h is the head of a list linked on m_pNextFree field
- if ( h ) {
- set.insert( *h );
- }
- }
- }
-#endif
-}}} // namespace cds::gc::ptb