6 Hazard Pointers memory reclamation strategy implementation
9 2008.02.10 Maxim.Khiszinsky Created
12 #include <cds/gc/details/hp.h>
14 #include <algorithm> // std::sort
17 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
19 namespace cds { namespace gc {
22 /// Max array size of retired pointers
23 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
25 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
27 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
29 if ( !m_pHZPManager ) {
30 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
34 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
36 if ( m_pHZPManager ) {
38 m_pHZPManager->detachAllThread();
41 m_pHZPManager = nullptr;
45 GarbageCollector::GarbageCollector(
46 size_t nHazardPtrCount,
47 size_t nMaxThreadCount,
48 size_t nMaxRetiredPtrCount,
51 : m_pListHead( nullptr )
52 ,m_bStatEnabled( false )
53 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56 ,m_nScanType( nScanType )
59 GarbageCollector::~GarbageCollector()
61 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
64 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
67 hplist_node * pNext = nullptr;
68 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
73 details::retired_vector& vect = hprec->m_arrRetired;
74 details::retired_vector::iterator itRetired = vect.begin();
75 details::retired_vector::iterator itRetiredEnd = vect.end();
76 while ( itRetired != itRetiredEnd ) {
81 pNext = hprec->m_pNextNode;
82 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
87 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
89 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
90 return new hplist_node( *this );
93 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
95 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
96 assert( pNode->m_arrRetired.size() == 0 );
100 details::hp_record * GarbageCollector::alloc_hp_record()
102 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
105 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
106 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
108 // First try to reuse a retired (non-active) HP record
109 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
110 cds::OS::ThreadId thId = nullThreadId;
111 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
113 hprec->m_bFree.store( false, atomics::memory_order_release );
117 // No HP records available for reuse
118 // Allocate and push a new HP record
120 hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
121 hprec->m_bFree.store( false, atomics::memory_order_release );
123 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
125 hprec->m_pNextNode = pOldHead;
126 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
131 void GarbageCollector::free_hp_record( details::hp_record * pRec )
133 assert( pRec != nullptr );
134 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
139 hplist_node * pNode = static_cast<hplist_node *>( pRec );
140 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
143 void GarbageCollector::detachAllThread()
145 hplist_node * pNext = nullptr;
146 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
147 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
148 pNext = hprec->m_pNextNode;
149 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
150 free_hp_record( hprec );
155 void GarbageCollector::classic_scan( details::hp_record * pRec )
157 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
159 std::vector< void * > plist;
160 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
161 assert( plist.size() == 0 );
163 // Stage 1: Scan HP list and insert non-null values in plist
165 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
168 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
170 void * hptr = pNode->m_hzp[i];
172 plist.push_back( hptr );
174 pNode = pNode->m_pNextNode;
177 // Sort plist to simplify search in
178 std::sort( plist.begin(), plist.end() );
180 // Stage 2: Search plist
181 details::retired_vector& arrRetired = pRec->m_arrRetired;
183 details::retired_vector::iterator itRetired = arrRetired.begin();
184 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
185 // arrRetired is not a std::vector!
186 // clear() is just set up item counter to 0, the items is not destroyed
190 std::vector< void * >::iterator itBegin = plist.begin();
191 std::vector< void * >::iterator itEnd = plist.end();
192 size_t nDeferredCount = 0;
193 while ( itRetired != itRetiredEnd ) {
194 if ( std::binary_search( itBegin, itEnd, itRetired->m_p ) ) {
195 arrRetired.push( *itRetired );
202 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
203 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
207 void GarbageCollector::inplace_scan( details::hp_record * pRec )
209 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
211 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
212 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
213 // If it is wrong, we use classic scan algorithm
215 // Check if all retired pointers has zero LSB
216 // LSB is used for marking pointers that cannot be deleted yet
217 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
218 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
219 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
221 // found a pointer with LSB bit set - use classic_scan
222 classic_scan( pRec );
227 // Sort retired pointer array
228 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
235 while ( ++it != itRetiredEnd ) {
236 if ( it->m_p == itPrev->m_p )
237 throw std::runtime_error( "Double free" );
243 // Search guarded pointers in retired array
244 hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
247 details::retired_ptr dummyRetired;
249 if ( !pNode->m_bFree.load( atomics::memory_order_acquire ) ) {
250 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
252 void * hptr = pNode->m_hzp[i];
254 dummyRetired.m_p = hptr;
255 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
256 if ( it != itRetiredEnd && it->m_p == hptr ) {
257 // Mark retired pointer as guarded
263 pNode = pNode->m_pNextNode;
267 // Move all marked pointers to head of array
269 auto itInsert = itRetired;
270 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
273 if ( itInsert != it )
278 // Retired pointer may be freed
282 const size_t nDeferred = itInsert - itRetired;
283 pRec->m_arrRetired.size( nDeferred );
284 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
285 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
289 void GarbageCollector::HelpScan( details::hp_record * pThis )
291 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
293 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id() );
295 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
296 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
297 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
299 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
300 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
303 // Owns hprec if it is empty.
304 // Several threads may work concurrently so we use atomic technique only.
306 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
307 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
308 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
312 curOwner = nullThreadId;
313 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
318 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
319 // If it has ones then we move to pThis that is private for current thread.
320 details::retired_vector& src = hprec->m_arrRetired;
321 details::retired_vector& dest = pThis->m_arrRetired;
322 assert( !dest.isFull());
323 details::retired_vector::iterator itRetired = src.begin();
325 // TSan can issue a warning here:
326 // read src.m_nSize in src.end()
327 // write src.m_nSize in src.clear()
328 // This is false positive since we own hprec
329 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
330 details::retired_vector::iterator itRetiredEnd = src.end();
331 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
333 while ( itRetired != itRetiredEnd ) {
334 dest.push( *itRetired );
335 if ( dest.isFull()) {
336 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
342 // TSan: write src.m_nSize, see a comment above
343 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
345 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
347 hprec->m_bFree.store(true, atomics::memory_order_release);
348 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
354 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
356 stat.nHPCount = m_nHazardPointerCount;
357 stat.nMaxThreadCount = m_nMaxThreadCount;
358 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
359 stat.nHPRecSize = sizeof( hplist_node )
360 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
362 stat.nHPRecAllocated =
364 stat.nTotalRetiredPtrCount =
365 stat.nRetiredPtrInFreeHPRecs = 0;
367 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
368 ++stat.nHPRecAllocated;
369 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
371 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
373 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
382 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
383 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
384 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
385 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
387 stat.evcScanCall = m_Stat.m_ScanCallCount;
388 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
389 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
391 stat.evcDeletedNode = m_Stat.m_DeletedNode;
392 stat.evcDeferredNode = m_Stat.m_DeferredNode;
399 }} // namespace cds::gc