2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 Hazard Pointers memory reclamation strategy implementation
37 2008.02.10 Maxim.Khiszinsky Created
40 #include <cds/gc/details/hp.h>
42 #include <algorithm> // std::sort
45 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
47 namespace cds { namespace gc {
50 /// Max array size of retired pointers
51 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
53 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
55 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
57 if ( !m_pHZPManager ) {
58 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
62 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
64 if ( m_pHZPManager ) {
66 m_pHZPManager->detachAllThread();
69 m_pHZPManager = nullptr;
73 GarbageCollector::GarbageCollector(
74 size_t nHazardPtrCount,
75 size_t nMaxThreadCount,
76 size_t nMaxRetiredPtrCount,
79 : m_pListHead( nullptr )
80 ,m_bStatEnabled( false )
81 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84 ,m_nScanType( nScanType )
87 GarbageCollector::~GarbageCollector()
89 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
92 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
95 hplist_node * pNext = nullptr;
96 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ))
101 details::retired_vector& vect = hprec->m_arrRetired;
102 details::retired_vector::iterator itRetired = vect.begin();
103 details::retired_vector::iterator itRetiredEnd = vect.end();
104 while ( itRetired != itRetiredEnd ) {
109 pNext = hprec->m_pNextNode;
110 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111 DeleteHPRec( hprec );
115 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
117 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118 return new hplist_node( *this );
121 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
123 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124 assert( pNode->m_arrRetired.size() == 0 );
128 details::hp_record * GarbageCollector::alloc_hp_record()
130 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
133 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
136 // First try to reuse a retired (non-active) HP record
137 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
138 cds::OS::ThreadId thId = nullThreadId;
139 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ))
141 hprec->m_bFree.store( false, atomics::memory_order_release );
145 // No HP records available for reuse
146 // Allocate and push a new HP record
148 hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
149 hprec->m_bFree.store( false, atomics::memory_order_release );
151 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
153 hprec->m_pNextNode = pOldHead;
154 CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &( hprec->m_pNextNode ));
155 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
160 void GarbageCollector::free_hp_record( details::hp_record * pRec )
162 assert( pRec != nullptr );
163 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
168 hplist_node * pNode = static_cast<hplist_node *>( pRec );
169 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
172 void GarbageCollector::detachAllThread()
174 hplist_node * pNext = nullptr;
175 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
176 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
177 pNext = hprec->m_pNextNode;
178 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
179 free_hp_record( hprec );
184 void GarbageCollector::classic_scan( details::hp_record * pRec )
186 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
188 std::vector< void * > plist;
189 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
190 assert( plist.size() == 0 );
192 // Stage 1: Scan HP list and insert non-null values in plist
194 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
197 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
199 void * hptr = pNode->m_hzp[i].get();
201 plist.push_back( hptr );
203 pNode = pNode->m_pNextNode;
206 // Sort plist to simplify search in
207 std::sort( plist.begin(), plist.end());
209 // Stage 2: Search plist
210 details::retired_vector& arrRetired = pRec->m_arrRetired;
212 details::retired_vector::iterator itRetired = arrRetired.begin();
213 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
214 // arrRetired is not a std::vector!
215 // clear() is just set up item counter to 0, the items is not destroyed
219 std::vector< void * >::iterator itBegin = plist.begin();
220 std::vector< void * >::iterator itEnd = plist.end();
221 size_t nDeferredCount = 0;
222 while ( itRetired != itRetiredEnd ) {
223 if ( std::binary_search( itBegin, itEnd, itRetired->m_p )) {
224 arrRetired.push( *itRetired );
231 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
232 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
236 void GarbageCollector::inplace_scan( details::hp_record * pRec )
238 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
240 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
241 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
242 // If it is wrong, we use classic scan algorithm
244 // Check if all retired pointers has zero LSB
245 // LSB is used for marking pointers that cannot be deleted yet
246 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
247 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
248 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
250 // found a pointer with LSB bit set - use classic_scan
251 classic_scan( pRec );
256 // Sort retired pointer array
257 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
264 while ( ++it != itRetiredEnd ) {
265 if ( it->m_p == itPrev->m_p )
266 throw std::runtime_error( "Double free" );
272 // Search guarded pointers in retired array
273 hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
276 details::retired_ptr dummyRetired;
278 if ( !pNode->m_bFree.load( atomics::memory_order_acquire )) {
279 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
281 void * hptr = pNode->m_hzp[i].get();
283 dummyRetired.m_p = hptr;
284 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
285 if ( it != itRetiredEnd && it->m_p == hptr ) {
286 // Mark retired pointer as guarded
292 pNode = pNode->m_pNextNode;
296 // Move all marked pointers to head of array
298 auto itInsert = itRetired;
299 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
302 if ( itInsert != it )
307 // Retired pointer may be freed
311 const size_t nDeferred = itInsert - itRetired;
312 pRec->m_arrRetired.size( nDeferred );
313 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
314 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
318 void GarbageCollector::HelpScan( details::hp_record * pThis )
320 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
322 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id());
324 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
325 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
326 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
328 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
329 if ( hprec->m_bFree.load(atomics::memory_order_acquire))
332 // Owns hprec if it is empty.
333 // Several threads may work concurrently so we use atomic technique only.
335 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
336 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
337 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
341 curOwner = nullThreadId;
342 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
347 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
348 // If it has ones then we move to pThis that is private for current thread.
349 details::retired_vector& src = hprec->m_arrRetired;
350 details::retired_vector& dest = pThis->m_arrRetired;
351 assert( !dest.isFull());
352 details::retired_vector::iterator itRetired = src.begin();
354 // TSan can issue a warning here:
355 // read src.m_nSize in src.end()
356 // write src.m_nSize in src.clear()
357 // This is false positive since we own hprec
358 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
359 details::retired_vector::iterator itRetiredEnd = src.end();
360 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
362 while ( itRetired != itRetiredEnd ) {
363 dest.push( *itRetired );
364 if ( dest.isFull()) {
365 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
371 // TSan: write src.m_nSize, see a comment above
372 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
374 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
376 hprec->m_bFree.store(true, atomics::memory_order_release);
377 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
383 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
385 stat.nHPCount = m_nHazardPointerCount;
386 stat.nMaxThreadCount = m_nMaxThreadCount;
387 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
388 stat.nHPRecSize = sizeof( hplist_node )
389 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
391 stat.nHPRecAllocated =
393 stat.nTotalRetiredPtrCount =
394 stat.nRetiredPtrInFreeHPRecs = 0;
396 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
397 ++stat.nHPRecAllocated;
398 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
400 if ( hprec->m_bFree.load(atomics::memory_order_relaxed)) {
402 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
411 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
412 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
413 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
414 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
416 stat.evcScanCall = m_Stat.m_ScanCallCount;
417 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
418 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
420 stat.evcDeletedNode = m_Stat.m_DeletedNode;
421 stat.evcDeferredNode = m_Stat.m_DeferredNode;
428 }} // namespace cds::gc