2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 Hazard Pointers memory reclamation strategy implementation
37 2008.02.10 Maxim.Khiszinsky Created
40 #include <cds/gc/details/hp.h>
42 #include <algorithm> // std::sort
45 #define CDS_HAZARDPTR_STATISTIC( _x ) if ( m_bStatEnabled ) { _x; }
47 namespace cds { namespace gc {
50 /// Max array size of retired pointers
51 static const size_t c_nMaxRetireNodeCount = c_nHazardPointerPerThread * c_nMaxThreadCount * 2;
53 GarbageCollector * GarbageCollector::m_pHZPManager = nullptr;
55 void CDS_STDCALL GarbageCollector::Construct( size_t nHazardPtrCount, size_t nMaxThreadCount, size_t nMaxRetiredPtrCount, scan_type nScanType )
57 if ( !m_pHZPManager ) {
58 m_pHZPManager = new GarbageCollector( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType );
62 void CDS_STDCALL GarbageCollector::Destruct( bool bDetachAll )
64 if ( m_pHZPManager ) {
66 m_pHZPManager->detachAllThread();
69 m_pHZPManager = nullptr;
73 GarbageCollector::GarbageCollector(
74 size_t nHazardPtrCount,
75 size_t nMaxThreadCount,
76 size_t nMaxRetiredPtrCount,
79 : m_pListHead( nullptr )
80 ,m_bStatEnabled( false )
81 ,m_nHazardPointerCount( nHazardPtrCount == 0 ? c_nHazardPointerPerThread : nHazardPtrCount )
82 ,m_nMaxThreadCount( nMaxThreadCount == 0 ? c_nMaxThreadCount : nMaxThreadCount )
83 ,m_nMaxRetiredPtrCount( nMaxRetiredPtrCount > c_nMaxRetireNodeCount ? nMaxRetiredPtrCount : c_nMaxRetireNodeCount )
84 ,m_nScanType( nScanType )
87 GarbageCollector::~GarbageCollector()
89 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
90 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id() ;)
92 hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
93 m_pListHead.store( nullptr, atomics::memory_order_relaxed );
95 hplist_node * pNext = nullptr;
96 for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
97 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
98 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
99 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
101 details::retired_vector& vect = hprec->m_arrRetired;
102 details::retired_vector::iterator itRetired = vect.begin();
103 details::retired_vector::iterator itRetiredEnd = vect.end();
104 while ( itRetired != itRetiredEnd ) {
109 pNext = hprec->m_pNextNode;
110 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
111 DeleteHPRec( hprec );
115 inline GarbageCollector::hplist_node * GarbageCollector::NewHPRec()
117 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocNewHPRec )
118 return new hplist_node( *this );
121 inline void GarbageCollector::DeleteHPRec( hplist_node * pNode )
123 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_DeleteHPRec )
124 assert( pNode->m_arrRetired.size() == 0 );
128 details::hp_record * GarbageCollector::alloc_hp_record()
130 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_AllocHPRec )
133 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
134 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
136 // First try to reuse a retired (non-active) HP record
137 for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
138 cds::OS::ThreadId thId = nullThreadId;
139 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
141 hprec->m_bFree.store( false, atomics::memory_order_release );
145 // No HP records available for reuse
146 // Allocate and push a new HP record
148 hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
149 hprec->m_bFree.store( false, atomics::memory_order_release );
151 hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
153 hprec->m_pNextNode = pOldHead;
154 } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
159 void GarbageCollector::free_hp_record( details::hp_record * pRec )
161 assert( pRec != nullptr );
162 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
167 hplist_node * pNode = static_cast<hplist_node *>( pRec );
168 pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
171 void GarbageCollector::detachAllThread()
173 hplist_node * pNext = nullptr;
174 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
175 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
176 pNext = hprec->m_pNextNode;
177 if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
178 free_hp_record( hprec );
183 void GarbageCollector::classic_scan( details::hp_record * pRec )
185 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
187 std::vector< void * > plist;
188 plist.reserve( m_nMaxThreadCount * m_nHazardPointerCount );
189 assert( plist.size() == 0 );
191 // Stage 1: Scan HP list and insert non-null values in plist
193 hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
196 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
198 void * hptr = pNode->m_hzp[i].get();
200 plist.push_back( hptr );
202 pNode = pNode->m_pNextNode;
205 // Sort plist to simplify search in
206 std::sort( plist.begin(), plist.end() );
208 // Stage 2: Search plist
209 details::retired_vector& arrRetired = pRec->m_arrRetired;
211 details::retired_vector::iterator itRetired = arrRetired.begin();
212 details::retired_vector::iterator itRetiredEnd = arrRetired.end();
213 // arrRetired is not a std::vector!
214 // clear() is just set up item counter to 0, the items is not destroyed
218 std::vector< void * >::iterator itBegin = plist.begin();
219 std::vector< void * >::iterator itEnd = plist.end();
220 size_t nDeferredCount = 0;
221 while ( itRetired != itRetiredEnd ) {
222 if ( std::binary_search( itBegin, itEnd, itRetired->m_p ) ) {
223 arrRetired.push( *itRetired );
230 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferredCount )
231 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - arrRetired.begin()) - nDeferredCount )
235 void GarbageCollector::inplace_scan( details::hp_record * pRec )
237 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_ScanCallCount )
239 // In-place scan algo uses LSB of retired ptr as a mark for internal purposes.
240 // It is correct if all retired pointers are ar least 2-byte aligned (LSB is zero).
241 // If it is wrong, we use classic scan algorithm
243 // Check if all retired pointers has zero LSB
244 // LSB is used for marking pointers that cannot be deleted yet
245 details::retired_vector::iterator itRetired = pRec->m_arrRetired.begin();
246 details::retired_vector::iterator itRetiredEnd = pRec->m_arrRetired.end();
247 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
249 // found a pointer with LSB bit set - use classic_scan
250 classic_scan( pRec );
255 // Sort retired pointer array
256 std::sort( itRetired, itRetiredEnd, cds::gc::details::retired_ptr::less );
263 while ( ++it != itRetiredEnd ) {
264 if ( it->m_p == itPrev->m_p )
265 throw std::runtime_error( "Double free" );
271 // Search guarded pointers in retired array
272 hplist_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
275 details::retired_ptr dummyRetired;
277 if ( !pNode->m_bFree.load( atomics::memory_order_acquire ) ) {
278 for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
280 void * hptr = pNode->m_hzp[i].get();
282 dummyRetired.m_p = hptr;
283 details::retired_vector::iterator it = std::lower_bound( itRetired, itRetiredEnd, dummyRetired, cds::gc::details::retired_ptr::less );
284 if ( it != itRetiredEnd && it->m_p == hptr ) {
285 // Mark retired pointer as guarded
291 pNode = pNode->m_pNextNode;
295 // Move all marked pointers to head of array
297 auto itInsert = itRetired;
298 for ( auto it = itRetired; it != itRetiredEnd; ++it ) {
301 if ( itInsert != it )
306 // Retired pointer may be freed
310 const size_t nDeferred = itInsert - itRetired;
311 pRec->m_arrRetired.size( nDeferred );
312 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeferredNode += nDeferred )
313 CDS_HAZARDPTR_STATISTIC( m_Stat.m_DeletedNode += (itRetiredEnd - itRetired) - nDeferred )
317 void GarbageCollector::HelpScan( details::hp_record * pThis )
319 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount )
321 assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::get_current_thread_id() );
323 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
324 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
325 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
327 // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
328 if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
331 // Owns hprec if it is empty.
332 // Several threads may work concurrently so we use atomic technique only.
334 cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
335 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner )) {
336 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
340 curOwner = nullThreadId;
341 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
346 // We own the thread successfully. Now, we can see whether hp_record has retired pointers.
347 // If it has ones then we move to pThis that is private for current thread.
348 details::retired_vector& src = hprec->m_arrRetired;
349 details::retired_vector& dest = pThis->m_arrRetired;
350 assert( !dest.isFull());
351 details::retired_vector::iterator itRetired = src.begin();
353 // TSan can issue a warning here:
354 // read src.m_nSize in src.end()
355 // write src.m_nSize in src.clear()
356 // This is false positive since we own hprec
357 CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN;
358 details::retired_vector::iterator itRetiredEnd = src.end();
359 CDS_TSAN_ANNOTATE_IGNORE_READS_END;
361 while ( itRetired != itRetiredEnd ) {
362 dest.push( *itRetired );
363 if ( dest.isFull()) {
364 CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_CallScanFromHelpScan )
370 // TSan: write src.m_nSize, see a comment above
371 CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN;
373 CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;
375 hprec->m_bFree.store(true, atomics::memory_order_release);
376 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
382 GarbageCollector::InternalState& GarbageCollector::getInternalState( GarbageCollector::InternalState& stat) const
384 stat.nHPCount = m_nHazardPointerCount;
385 stat.nMaxThreadCount = m_nMaxThreadCount;
386 stat.nMaxRetiredPtrCount = m_nMaxRetiredPtrCount;
387 stat.nHPRecSize = sizeof( hplist_node )
388 + sizeof(details::retired_ptr) * m_nMaxRetiredPtrCount;
390 stat.nHPRecAllocated =
392 stat.nTotalRetiredPtrCount =
393 stat.nRetiredPtrInFreeHPRecs = 0;
395 for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
396 ++stat.nHPRecAllocated;
397 stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
399 if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
401 stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
410 stat.evcAllocHPRec = m_Stat.m_AllocHPRec;
411 stat.evcRetireHPRec = m_Stat.m_RetireHPRec;
412 stat.evcAllocNewHPRec= m_Stat.m_AllocNewHPRec;
413 stat.evcDeleteHPRec = m_Stat.m_DeleteHPRec;
415 stat.evcScanCall = m_Stat.m_ScanCallCount;
416 stat.evcHelpScanCall = m_Stat.m_HelpScanCallCount;
417 stat.evcScanFromHelpScan= m_Stat.m_CallScanFromHelpScan;
419 stat.evcDeletedNode = m_Stat.m_DeletedNode;
420 stat.evcDeferredNode = m_Stat.m_DeferredNode;
427 }} // namespace cds::gc