6 Hazard Pointers memory reclamation strategy implementation
9 2008.02.10 Maxim.Khiszinsky Created
12 #include <cds/gc/details/hp.h>
14 #include <algorithm> // std::sort
17 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
19 namespace cds { namespace gc {
22 /// Max array size of retired pointers
23 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
25 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
27 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
29 if ( !m_pHZPManager ) {
30 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
34 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
36 if ( m_pHZPManager ) {
38 m_pHZPManager->detachAllThread();
41 m_pHZPManager = nullptr;
45 GarbageCollector::GarbageCollector(
46 size_t nHazardPtrCount,
47 size_t nMaxThreadCount,
48 size_t nMaxRetiredPtrCount,
51 : m_pListHead( nullptr )
52 ,m_bStatEnabled( true )
53 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56 ,m_nScanType( nScanType )
59 GarbageCollector::~GarbageCollector()
61 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
64 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
67 hplist_node * pNext = nullptr;
68 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71 || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
73 details::retired_vector& vect = hprec->m_arrRetired;
74 details::retired_vector::iterator itRetired = vect.begin();
75 details::retired_vector::iterator itRetiredEnd = vect.end();
76 while ( itRetired != itRetiredEnd ) {
77 DeletePtr( *itRetired );
81 pNext = hprec->m_pNextNode;
82 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
87 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
89 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec );
90 return new hplist_node( *this );
93 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
95 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec );
96 assert( pNode->m_arrRetired.size() == 0 );
100 inline void GarbageCollector::DeletePtr( details::retired_ptr& p )
102 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode );
106 details::hp_record * GarbageCollector::alloc_hp_record()
108 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
111 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
112 const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
114 // First try to reuse a retired (non-active) HP record
115 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
116 cds::OS::ThreadId thId = nullThreadId;
117 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
119 hprec->m_bFree.store( false, atomics::memory_order_release );
123 // No HP records available for reuse
124 // Allocate and push a new HP record
126 hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
127 hprec->m_bFree.store( false, atomics::memory_order_relaxed );
129 atomics::atomic_thread_fence( atomics::memory_order_release );
131 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
133 hprec->m_pNextNode = pOldHead;
134 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
139 void GarbageCollector::free_hp_record( details::hp_record * pRec )
141 assert( pRec != nullptr );
142 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
146 hplist_node * pNode = static_cast<hplist_node *>( pRec );
147 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
150 void GarbageCollector::detachAllThread()
152 hplist_node * pNext = nullptr;
153 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
154 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
155 pNext = hprec->m_pNextNode;
156 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
157 free_hp_record( hprec );
162 void GarbageCollector::classic_scan( details::hp_record * pRec )
164 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
166 std::vector< void * > plist;
167 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
168 assert( plist.size() == 0 );
170 // Stage 1: Scan HP list and insert non-null values in plist
172 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
175 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
176 void * hptr = pNode->m_hzp[i];
178 plist.push_back( hptr );
180 pNode = pNode->m_pNextNode;
183 // Sort plist to simplify search in
184 std::sort( plist.begin(), plist.end() );
186 // Stage 2: Search plist
187 details::retired_vector& arrRetired = pRec->m_arrRetired;
189 details::retired_vector::iterator itRetired = arrRetired.begin();
190 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
191 // arrRetired is not a std::vector!
192 // clear is just set up item counter to 0, the items is not destroying
195 std::vector< void * >::iterator itBegin = plist.begin();
196 std::vector< void * >::iterator itEnd = plist.end();
197 while ( itRetired != itRetiredEnd ) {
198 if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) {
199 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
200 arrRetired.push( *itRetired );
203 DeletePtr( *itRetired );
208 void GarbageCollector::inplace_scan( details::hp_record * pRec )
210 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
212 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
213 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
214 // If it is wrong, we use classic scan algorithm
216 // Check if all retired pointers has zero LSB
217 // LSB is used for marking pointers that cannot be deleted yet
218 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
219 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
220 for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
221 if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
222 // found a pointer with LSB bit set - use classic_scan
223 classic_scan( pRec );
228 // Sort retired pointer array
229 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
231 // Search guarded pointers in retired array
233 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
236 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
237 void * hptr = pNode->m_hzp[i];
239 details::retired_ptr dummyRetired;
240 dummyRetired.m_p = hptr;
241 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
242 if ( it != itRetiredEnd && it->m_p == hptr ) {
243 // Mark retired pointer as guarded
244 it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) | 1);
248 pNode = pNode->m_pNextNode;
251 // Move all marked pointers to head of array
252 details::retired_vector::iterator itInsert = itRetired;
253 for ( details::retired_vector::iterator it = itRetired; it != itRetiredEnd; ++it ) {
254 if ( reinterpret_cast<ptr_atomic_t>(it->m_p) & 1 ) {
255 it->m_p = reinterpret_cast<void *>(reinterpret_cast<ptr_atomic_t>(it->m_p ) & ~1);
258 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
261 // Retired pointer may be freed
265 pRec->m_arrRetired.size( itInsert - itRetired );
268 void GarbageCollector::HelpScan( details::hp_record * pThis )
270 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
272 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
274 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
275 const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
276 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
278 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
279 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
282 // Owns hprec if it is empty.
283 // Several threads may work concurrently so we use atomic technique only.
285 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
286 if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
287 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
291 curOwner = nullThreadId;
292 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
297 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
298 // If it has ones then we move to pThis that is private for current thread.
299 details::retired_vector& src = hprec->m_arrRetired;
300 details::retired_vector& dest = pThis->m_arrRetired;
301 assert( !dest.isFull());
302 details::retired_vector::iterator itRetired = src.begin();
303 details::retired_vector::iterator itRetiredEnd = src.end();
304 while ( itRetired != itRetiredEnd ) {
305 dest.push( *itRetired );
306 if ( dest.isFull()) {
307 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan );
314 hprec->m_bFree.store(true, atomics::memory_order_release);
315 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
319 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
321 stat.nHPCount = m_nHazardPointerCount;
322 stat.nMaxThreadCount = m_nMaxThreadCount;
323 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
324 stat.nHPRecSize = sizeof( hplist_node )
325 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
327 stat.nHPRecAllocated =
329 stat.nTotalRetiredPtrCount =
330 stat.nRetiredPtrInFreeHPRecs = 0;
332 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
333 ++stat.nHPRecAllocated;
334 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
336 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
338 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
347 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
348 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
349 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
350 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
352 stat.evcScanCall = m_Stat.m_ScanCallCount;
353 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
354 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
356 stat.evcDeletedNode = m_Stat.m_DeletedNode;
357 stat.evcDeferredNode = m_Stat.m_DeferredNode;
364 }} // namespace cds::gc