2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 Hazard Pointers memory reclamation strategy implementation
37 2008.02.10 Maxim.Khiszinsky Created
40 #include <cds/gc/details/hp.h>
42 #include <algorithm> // std::sort
45 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
47 namespace cds { namespace gc {
50 /// Max array size of retired pointers
51 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
53 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
55 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
57 if ( !m_pHZPManager ) {
58 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
62 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
64 if ( m_pHZPManager ) {
66 m_pHZPManager->detachAllThread();
69 m_pHZPManager = nullptr;
73 GarbageCollector::GarbageCollector(
74 size_t nHazardPtrCount,
75 size_t nMaxThreadCount,
76 size_t nMaxRetiredPtrCount,
79 : m_pListHead( nullptr )
80 ,m_bStatEnabled( false )
81 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84 ,m_nScanType( nScanType )
87 GarbageCollector::~GarbageCollector()
89 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
92 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
95 hplist_node * pNext = nullptr;
96 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
101 details::retired_vector& vect = hprec->m_arrRetired;
102 details::retired_vector::iterator itRetired = vect.begin();
103 details::retired_vector::iterator itRetiredEnd = vect.end();
104 while ( itRetired != itRetiredEnd ) {
109 pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
110 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111 DeleteHPRec( hprec );
115 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec( OS::ThreadId owner )
117 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118 return new hplist_node( *this, owner );
121 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
123 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124 assert( pNode->m_arrRetired.size() == 0 );
128 details::hp_record * GarbageCollector::alloc_hp_record()
130 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
133 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
136 // First try to reuse a retired (non-active) HP record
137 for ( hprec = m_pListHead.load( atomics::memory_order_relaxed ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
138 cds::OS::ThreadId thId = nullThreadId;
139 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
141 hprec->m_bFree.store( false, atomics::memory_order_relaxed );
145 // No HP records available for reuse
146 // Allocate and push a new HP record
147 hprec = NewHPRec( curThreadId );
149 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_relaxed );
151 hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed );
152 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_acq_rel, atomics::memory_order_acquire ));
157 void GarbageCollector::free_hp_record( details::hp_record * pRec )
159 assert( pRec != nullptr );
160 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
165 hplist_node * pNode = static_cast<hplist_node *>( pRec );
166 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
169 void GarbageCollector::detachAllThread()
171 hplist_node * pNext = nullptr;
172 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
173 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = pNext ) {
174 pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
175 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
176 free_hp_record( hprec );
181 void GarbageCollector::classic_scan( details::hp_record * pRec )
183 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
185 std::vector< void * > plist;
186 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
187 assert( plist.size() == 0 );
189 // Stage 1: Scan HP list and insert non-null values in plist
191 hplist_node * pNode = m_pListHead.load(atomics::memory_order_relaxed);
194 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
196 void * hptr = pNode->m_hzp[i].get();
198 plist.push_back( hptr );
200 pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
203 // Sort plist to simplify search in
204 std::sort( plist.begin(), plist.end());
206 // Stage 2: Search plist
207 details::retired_vector& arrRetired = pRec->m_arrRetired;
209 details::retired_vector::iterator itRetired = arrRetired.begin();
210 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
211 // arrRetired is not a std::vector!
212 // clear() is just set up item counter to 0, the items is not destroyed
216 std::vector< void * >::iterator itBegin = plist.begin();
217 std::vector< void * >::iterator itEnd = plist.end();
218 size_t nDeferredCount = 0;
219 while ( itRetired != itRetiredEnd ) {
220 if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
221 arrRetired.push( *itRetired );
228 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
229 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
233 void GarbageCollector::inplace_scan( details::hp_record * pRec )
235 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
237 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
238 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
239 // If it is wrong, we use classic scan algorithm
241 // Check if all retired pointers has zero LSB
242 // LSB is used for marking pointers that cannot be deleted yet
243 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
244 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
245 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
247 // found a pointer with LSB bit set - use classic_scan
248 classic_scan( pRec );
253 // Sort retired pointer array
254 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
261 while ( ++it != itRetiredEnd ) {
262 if ( it->m_p == itPrev->m_p )
263 throw std::runtime_error( "Double free" );
269 // Search guarded pointers in retired array
270 hplist_node * pNode = m_pListHead.load( atomics::memory_order_relaxed );
273 details::retired_ptr dummyRetired;
275 if ( !pNode->m_bFree.load( atomics::memory_order_relaxed )) {
276 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
278 void * hptr = pNode->m_hzp[i].get();
280 dummyRetired.m_p = hptr;
281 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
282 if ( it != itRetiredEnd && it->m_p == hptr ) {
283 // Mark retired pointer as guarded
289 pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
293 // Move all marked pointers to head of array
295 auto itInsert = itRetired;
296 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
299 if ( itInsert != it )
304 // Retired pointer may be freed
308 const size_t nDeferred = itInsert - itRetired;
309 pRec->m_arrRetired.size( nDeferred );
310 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
311 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
315 void GarbageCollector::HelpScan( details::hp_record * pThis )
317 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
319 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
321 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
322 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
323 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
325 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
326 if ( hprec->m_bFree.load(atomics::memory_order_relaxed))
329 // Owns hprec if it is empty.
330 // Several threads may work concurrently so we use atomic technique only.
332 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_relaxed);
333 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
334 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
340 // curOwner = nullThreadId;
341 // if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ))
346 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
347 // If it has ones then we move to pThis that is private for current thread.
348 details::retired_vector& src = hprec->m_arrRetired;
349 details::retired_vector& dest = pThis->m_arrRetired;
350 assert( !dest.isFull());
351 details::retired_vector::iterator itRetired = src.begin();
353 // TSan can issue a warning here:
354 // read src.m_nSize in src.end()
355 // write src.m_nSize in src.clear()
356 // This is false positive since we own hprec
357 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
358 details::retired_vector::iterator itRetiredEnd = src.end();
359 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
361 while ( itRetired != itRetiredEnd ) {
362 dest.push( *itRetired );
363 if ( dest.isFull()) {
364 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
370 // TSan: write src.m_nSize, see a comment above
371 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
373 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
375 hprec->m_bFree.store(true, atomics::memory_order_relaxed);
376 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
382 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
384 stat.nHPCount = m_nHazardPointerCount;
385 stat.nMaxThreadCount = m_nMaxThreadCount;
386 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
387 stat.nHPRecSize = sizeof( hplist_node )
388 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
390 stat.nHPRecAllocated =
392 stat.nTotalRetiredPtrCount =
393 stat.nRetiredPtrInFreeHPRecs = 0;
395 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_relaxed); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed )) {
396 ++stat.nHPRecAllocated;
397 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
399 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
401 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
410 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
411 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
412 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
413 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
415 stat.evcScanCall = m_Stat.m_ScanCallCount;
416 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
417 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
419 stat.evcDeletedNode = m_Stat.m_DeletedNode;
420 stat.evcDeferredNode = m_Stat.m_DeferredNode;
427 }} // namespace cds::gc