6 Hazard Pointers memory reclamation strategy implementation
9 2008.02.10 Maxim.Khiszinsky Created
12 #include <cds/gc/details/hp.h>
14 #include <algorithm> // std::sort
17 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
19 namespace cds { namespace gc {
22 /// Max array size of retired pointers
23 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
25 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
27 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
29 if ( !m_pHZPManager ) {
30 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
34 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
36 if ( m_pHZPManager ) {
38 m_pHZPManager->detachAllThread();
41 m_pHZPManager = nullptr;
45 GarbageCollector::GarbageCollector(
46 size_t nHazardPtrCount,
47 size_t nMaxThreadCount,
48 size_t nMaxRetiredPtrCount,
51 : m_pListHead( nullptr )
52 ,m_bStatEnabled( true )
53 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
54 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
55 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
56 ,m_nScanType( nScanType )
59 GarbageCollector::~GarbageCollector()
61 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
62 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
64 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
65 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
67 hplist_node * pNext = nullptr;
68 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
69 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
70 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
71 || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
73 details::retired_vector& vect = hprec->m_arrRetired;
74 details::retired_vector::iterator itRetired = vect.begin();
75 details::retired_vector::iterator itRetiredEnd = vect.end();
76 while ( itRetired != itRetiredEnd ) {
77 DeletePtr( *itRetired );
81 pNext = hprec->m_pNextNode;
82 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
87 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
89 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec );
90 return new hplist_node( *this );
93 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
95 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec );
96 assert( pNode->m_arrRetired.size() == 0 );
100 inline void GarbageCollector::DeletePtr( details::retired_ptr& p )
102 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeletedNode );
106 details::hp_record * GarbageCollector::alloc_hp_record()
108 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec );
111 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
112 const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
114 // First try to reuse a retired (non-active) HP record
115 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
116 cds::OS::ThreadId thId = nullThreadId;
117 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
119 hprec->m_bFree.store( false, atomics::memory_order_release );
123 // No HP records available for reuse
124 // Allocate and push a new HP record
126 hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
127 hprec->m_bFree.store( false, atomics::memory_order_relaxed );
129 atomics::atomic_thread_fence( atomics::memory_order_release );
131 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
133 hprec->m_pNextNode = pOldHead;
134 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
139 void GarbageCollector::free_hp_record( details::hp_record * pRec )
141 assert( pRec != nullptr );
142 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec );
146 hplist_node * pNode = static_cast<hplist_node *>( pRec );
147 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
150 void GarbageCollector::detachAllThread()
152 hplist_node * pNext = nullptr;
153 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
154 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
155 pNext = hprec->m_pNextNode;
156 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
157 free_hp_record( hprec );
162 void GarbageCollector::classic_scan( details::hp_record * pRec )
164 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
166 std::vector< void * > plist;
167 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
168 assert( plist.size() == 0 );
170 // Stage 1: Scan HP list and insert non-null values in plist
172 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
175 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
176 void * hptr = pNode->m_hzp[i];
178 plist.push_back( hptr );
180 pNode = pNode->m_pNextNode;
183 // Sort plist to simplify search in
184 std::sort( plist.begin(), plist.end() );
186 // Stage 2: Search plist
187 details::retired_vector& arrRetired = pRec->m_arrRetired;
189 details::retired_vector::iterator itRetired = arrRetired.begin();
190 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
191 // arrRetired is not a std::vector!
192 // clear() is just set up item counter to 0, the items is not destroyed
195 std::vector< void * >::iterator itBegin = plist.begin();
196 std::vector< void * >::iterator itEnd = plist.end();
197 while ( itRetired != itRetiredEnd ) {
198 if ( std::binary_search( itBegin, itEnd, itRetired->m_p) ) {
199 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
200 arrRetired.push( *itRetired );
203 DeletePtr( *itRetired );
208 void GarbageCollector::inplace_scan( details::hp_record * pRec )
210 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount );
212 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
213 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
214 // If it is wrong, we use classic scan algorithm
216 // Check if all retired pointers has zero LSB
217 // LSB is used for marking pointers that cannot be deleted yet
218 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
219 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
220 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
222 // found a pointer with LSB bit set - use classic_scan
223 classic_scan( pRec );
228 // Sort retired pointer array
229 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
231 // Search guarded pointers in retired array
232 hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
235 details::retired_ptr dummyRetired;
237 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
238 void * hptr = pNode->m_hzp[i];
240 dummyRetired.m_p = hptr;
241 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
242 if ( it != itRetiredEnd && it->m_p == hptr ) {
243 // Mark retired pointer as guarded
248 pNode = pNode->m_pNextNode;
252 // Move all marked pointers to head of array
254 auto itInsert = itRetired;
255 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
258 if ( itInsert != it )
261 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeferredNode );
264 // Retired pointer may be freed
268 pRec->m_arrRetired.size( itInsert - itRetired );
272 void GarbageCollector::HelpScan( details::hp_record * pThis )
274 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
276 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
278 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
279 const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
280 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
282 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
283 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
286 // Owns hprec if it is empty.
287 // Several threads may work concurrently so we use atomic technique only.
289 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
290 if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
291 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
295 curOwner = nullThreadId;
296 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
301 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
302 // If it has ones then we move to pThis that is private for current thread.
303 details::retired_vector& src = hprec->m_arrRetired;
304 details::retired_vector& dest = pThis->m_arrRetired;
305 assert( !dest.isFull());
306 details::retired_vector::iterator itRetired = src.begin();
307 details::retired_vector::iterator itRetiredEnd = src.end();
308 while ( itRetired != itRetiredEnd ) {
309 dest.push( *itRetired );
310 if ( dest.isFull()) {
311 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan );
318 hprec->m_bFree.store(true, atomics::memory_order_release);
319 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
323 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
325 stat.nHPCount = m_nHazardPointerCount;
326 stat.nMaxThreadCount = m_nMaxThreadCount;
327 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
328 stat.nHPRecSize = sizeof( hplist_node )
329 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
331 stat.nHPRecAllocated =
333 stat.nTotalRetiredPtrCount =
334 stat.nRetiredPtrInFreeHPRecs = 0;
336 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
337 ++stat.nHPRecAllocated;
338 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
340 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
342 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
351 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
352 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
353 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
354 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
356 stat.evcScanCall = m_Stat.m_ScanCallCount;
357 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
358 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
360 stat.evcDeletedNode = m_Stat.m_DeletedNode;
361 stat.evcDeferredNode = m_Stat.m_DeferredNode;
368 }} // namespace cds::gc