2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <cds/gc/dhp.h>
35 #include <cds/os/thread.h>
37 namespace cds { namespace gc { namespace dhp {
40 void * default_alloc_memory( size_t size )
42 return new uintptr_t[( size + sizeof( uintptr_t ) - 1 ) / sizeof( uintptr_t )];
45 void default_free_memory( void* p )
47 delete[] reinterpret_cast<uintptr_t*>( p );
51 static size_t const c_extended_guard_block_size = 16;
54 void* ( *s_alloc_memory )( size_t size ) = default_alloc_memory;
55 void( *s_free_memory )( void* p ) = default_free_memory;
64 allocator( allocator const& ) {}
66 explicit allocator( allocator<U> const& ) {}
68 static T* allocate( size_t nCount )
70 return reinterpret_cast<T*>( s_alloc_memory( sizeof( value_type ) * nCount ));
73 static void deallocate( T* p, size_t /*nCount*/ )
75 s_free_memory( reinterpret_cast<void*>( p ));
79 stat s_postmortem_stat;
82 /*static*/ CDS_EXPORT_API smr* smr::instance_ = nullptr;
83 thread_local thread_data* tls_ = nullptr;
85 CDS_EXPORT_API hp_allocator::~hp_allocator()
87 while ( guard_block* gp = static_cast<guard_block*>( free_list_.get())) {
93 CDS_EXPORT_API guard_block* hp_allocator::alloc()
96 auto block = free_list_.get();
98 gb = static_cast< guard_block* >( block );
100 // allocate new block
101 gb = new( s_alloc_memory( sizeof( guard_block ) + sizeof( guard ) * defaults::c_extended_guard_block_size )) guard_block;
102 new ( gb->first() ) guard[defaults::c_extended_guard_block_size];
104 CDS_HPSTAT( block_allocated_.fetch_add( 1, atomics::memory_order_relaxed ));
107 // links guards in the block
108 guard* p = gb->first();
109 for ( guard* last = p + defaults::c_extended_guard_block_size - 1; p != last; ++p ) {
110 p->clear( atomics::memory_order_relaxed );
119 CDS_EXPORT_API retired_allocator::~retired_allocator()
121 while ( retired_block* rb = static_cast<retired_block*>( free_list_.get() ) ) {
122 rb->~retired_block();
127 CDS_EXPORT_API retired_block* retired_allocator::alloc()
130 auto block = free_list_.get();
132 rb = static_cast< retired_block* >( block );
134 // allocate new block
135 rb = new( s_alloc_memory( sizeof( retired_block ) + sizeof( retired_ptr ) * retired_block::c_capacity )) retired_block;
136 new ( rb->first()) retired_ptr[retired_block::c_capacity];
143 struct smr::thread_record: thread_data
145 atomics::atomic<thread_record*> m_pNextNode; ///< next hazard ptr record in list
146 atomics::atomic<cds::OS::ThreadId> m_idOwner; ///< Owner thread id; 0 - the record is free (not owned)
147 atomics::atomic<bool> m_bFree; ///< true if record is free (not owned)
149 thread_record( guard* guards, size_t guard_count )
150 : thread_data( guards, guard_count )
155 /*static*/ CDS_EXPORT_API thread_data* smr::tls()
157 assert( tls_ != nullptr );
161 /*static*/ CDS_EXPORT_API void smr::set_memory_allocator(
162 void* ( *alloc_func )( size_t size ),
163 void( *free_func )( void * p )
166 // The memory allocation functions may be set BEFORE initializing DHP SMR!!!
167 assert( instance_ == nullptr );
169 s_alloc_memory = alloc_func;
170 s_free_memory = free_func;
173 /*static*/ CDS_EXPORT_API void smr::construct( size_t nInitialHazardPtrCount )
176 instance_ = new( s_alloc_memory( sizeof( smr ))) smr( nInitialHazardPtrCount );
180 /*static*/ CDS_EXPORT_API void smr::destruct( bool bDetachAll )
184 instance_->detach_all_thread();
187 s_free_memory( instance_ );
192 CDS_EXPORT_API smr::smr( size_t nInitialHazardPtrCount )
193 : thread_list_( nullptr )
194 , initial_hazard_count_( nInitialHazardPtrCount < 4 ? 16 : nInitialHazardPtrCount )
195 , last_plist_size_( initial_hazard_count_ * 64 )
198 CDS_EXPORT_API smr::~smr()
200 CDS_DEBUG_ONLY( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
201 CDS_DEBUG_ONLY( const cds::OS::ThreadId mainThreadId = cds::OS::get_current_thread_id(); )
203 CDS_HPSTAT( statistics( s_postmortem_stat ) );
205 thread_record* pHead = thread_list_.load( atomics::memory_order_relaxed );
206 thread_list_.store( nullptr, atomics::memory_order_relaxed );
208 thread_record* pNext = nullptr;
209 for ( thread_record* hprec = pHead; hprec; hprec = pNext )
211 assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
212 || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
213 || !cds::OS::is_thread_alive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
216 retired_array& retired = hprec->retired_;
218 // delete retired data
219 for ( retired_block* block = retired.list_head_; block && block != retired.current_block_; block = block->next_ ) {
220 for ( retired_ptr* p = block->first(); p != block->last(); ++p ) {
222 CDS_HPSTAT( ++s_postmortem_stat.free_count );
225 if ( retired.current_block_ ) {
226 for ( retired_ptr* p = retired.current_block_->first(); p != retired.current_cell_; ++p ) {
228 CDS_HPSTAT( ++s_postmortem_stat.free_count );
231 hprec->retired_.fini();
232 hprec->hazards_.clear();
234 pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
235 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
236 destroy_thread_data( hprec );
240 /*static*/ CDS_EXPORT_API void smr::attach_thread()
243 tls_ = instance().alloc_thread_data();
246 /*static*/ CDS_EXPORT_API void smr::detach_thread()
248 thread_data* rec = tls_;
251 instance().free_thread_data( static_cast<thread_record*>( rec ) );
255 CDS_EXPORT_API void smr::detach_all_thread()
257 thread_record * pNext = nullptr;
258 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
260 for ( thread_record * hprec = thread_list_.load( atomics::memory_order_relaxed ); hprec; hprec = pNext ) {
261 pNext = hprec->m_pNextNode.load( atomics::memory_order_relaxed );
262 if ( hprec->m_idOwner.load( atomics::memory_order_relaxed ) != nullThreadId ) {
263 free_thread_data( hprec );
268 CDS_EXPORT_API smr::thread_record* smr::create_thread_data()
270 size_t const guard_array_size = sizeof( guard ) * initial_hazard_count_;
273 The memory is allocated by contnuous block
275 +--------------------------+
281 |--------------------------| |
285 +--------------------------+
288 char* mem = reinterpret_cast<char*>( s_alloc_memory( sizeof( thread_record ) + guard_array_size ));
289 return new( mem ) thread_record(
290 reinterpret_cast<guard*>( mem + sizeof( thread_record ) ), initial_hazard_count_
294 /*static*/ CDS_EXPORT_API void smr::destroy_thread_data( thread_record* pRec )
296 // all retired pointers must be freed
297 pRec->~thread_record();
298 s_free_memory( pRec );
301 CDS_EXPORT_API smr::thread_record* smr::alloc_thread_data()
303 thread_record * hprec = nullptr;
304 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
305 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
307 // First try to reuse a free (non-active) DHP record
308 for ( hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) ) {
309 cds::OS::ThreadId thId = nullThreadId;
310 if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_relaxed, atomics::memory_order_relaxed ) )
312 hprec->m_bFree.store( false, atomics::memory_order_release );
317 // No HP records available for reuse
318 // Allocate and push a new HP record
319 hprec = create_thread_data();
320 hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
322 thread_record* pOldHead = thread_list_.load( atomics::memory_order_relaxed );
324 hprec->m_pNextNode.store( pOldHead, atomics::memory_order_relaxed );
325 } while ( !thread_list_.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_acquire ) );
328 hprec->hazards_.init();
329 hprec->retired_.init();
334 CDS_EXPORT_API void smr::free_thread_data( thread_record* pRec )
336 assert( pRec != nullptr );
337 //CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_RetireHPRec )
339 pRec->hazards_.clear();
343 if ( pRec->retired_.empty() ) {
344 pRec->retired_.fini();
345 pRec->m_bFree.store( true, std::memory_order_release );
348 // Free all empty blocks
349 retired_block* free_block = pRec->retired_.current_block_->next_;
351 pRec->retired_.current_block_->next_ = nullptr;
352 while ( free_block ) {
353 retired_block* next = free_block->next_;
354 retired_allocator_.free( free_block );
356 --pRec->retired_.block_count_;
361 pRec->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
365 typedef std::vector<void*, allocator<void*>> hp_vector;
367 inline void copy_hazards( hp_vector& vect, guard const* arr, size_t size )
369 for ( guard const* end = arr + size; arr != end; ++arr ) {
370 void* hp = arr->get();
372 vect.push_back( hp );
376 inline size_t retire_data( hp_vector const& plist, retired_array& stg, retired_block* block, size_t block_size )
378 auto hp_begin = plist.begin();
379 auto hp_end = plist.end();
382 for ( retired_ptr* p = block->first(), *end = p + block_size; p != end; ++p ) {
383 if ( cds_unlikely( std::binary_search( hp_begin, hp_end, p->m_p )))
396 CDS_EXPORT_API void smr::scan( thread_data* pThreadRec )
398 thread_record* pRec = static_cast<thread_record*>( pThreadRec );
400 CDS_HPSTAT( ++pRec->scan_call_count_ );
403 size_t plist_size = last_plist_size_.load( std::memory_order_relaxed );
404 plist.reserve( plist_size );
406 // Stage 1: Scan HP list and insert non-null values in plist
407 thread_record* pNode = thread_list_.load( atomics::memory_order_acquire );
409 if ( pNode->m_idOwner.load( std::memory_order_relaxed ) != cds::OS::c_NullThreadId ) {
410 copy_hazards( plist, pNode->hazards_.array_, pNode->hazards_.initial_capacity_ );
412 for ( guard_block* block = pNode->hazards_.extended_list_; block; block = block->next_ )
413 copy_hazards( plist, block->first(), defaults::c_extended_guard_block_size );
416 pNode = pNode->m_pNextNode.load( atomics::memory_order_relaxed );
419 // Store plist size for next scan() call (vector reallocation optimization)
420 if ( plist.size() > plist_size )
421 last_plist_size_.compare_exchange_weak( plist_size, plist.size(), std::memory_order_relaxed, std::memory_order_relaxed );
423 // Sort plist to simplify search in
424 std::sort( plist.begin(), plist.end() );
426 // Stage 2: Search plist
427 size_t free_count = 0;
428 retired_block* last_block = pRec->retired_.current_block_;
429 retired_ptr* last_block_cell = pRec->retired_.current_cell_;
431 pRec->retired_.current_block_ = pRec->retired_.list_head_;
432 pRec->retired_.current_cell_ = pRec->retired_.current_block_->first();
434 for ( retired_block* block = pRec->retired_.list_head_; block; block = block->next_ ) {
435 bool const end_block = block == last_block;
436 size_t const size = end_block ? last_block_cell - block->first() : retired_block::c_capacity;
438 free_count += retire_data( plist, pRec->retired_, block, size );
443 CDS_HPSTAT( pRec->free_call_count_ += free_count );
445 // If the count of freed elements is too small, increase retired array
446 if ( free_count == 0 && last_block == pRec->retired_.list_tail_ && last_block_cell == last_block->last() )
447 pRec->retired_.extend();
450 CDS_EXPORT_API void smr::help_scan( thread_data* pThis )
452 assert( static_cast<thread_record*>( pThis )->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::get_current_thread_id() );
453 CDS_HPSTAT( ++pThis->help_scan_call_count_ );
455 const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
456 const cds::OS::ThreadId curThreadId = cds::OS::get_current_thread_id();
457 for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
459 // If m_bFree == true then hprec->retired_ is empty - we don't need to see it
460 if ( hprec->m_bFree.load( atomics::memory_order_acquire ) ) {
461 assert( hprec->retired_.empty() );
466 // Several threads may work concurrently so we use atomic technique
468 cds::OS::ThreadId curOwner = hprec->m_idOwner.load( atomics::memory_order_relaxed );
469 if ( curOwner == nullThreadId || !cds::OS::is_thread_alive( curOwner ) ) {
470 if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed ) )
477 // We own the thread record successfully. Now, we can see whether it has retired pointers.
478 // If it has ones then we move to pThis that is private for current thread.
479 retired_array& src = hprec->retired_;
480 retired_array& dest = pThis->retired_;
482 for ( retired_block* block = src.list_head_; block; block = block->next_ ) {
483 retired_ptr* last = block == src.current_block_ ? src.current_cell_ : block->last();
484 for ( retired_ptr* p = block->first(); p != last; ++p ) {
485 if ( !dest.push( *p ) )
489 if ( block == src.current_block_ )
494 hprec->m_bFree.store( true, atomics::memory_order_relaxed );
495 hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
501 void smr::statistics( stat& st )
504 # ifdef CDS_ENABLE_HPSTAT
505 for ( thread_record* hprec = thread_list_.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode.load( atomics::memory_order_relaxed ) )
507 ++st.thread_rec_count;
508 st.guard_allocated += hprec->hazards_.alloc_guard_count_;
509 st.guard_freed += hprec->hazards_.free_guard_count_;
510 st.hp_extend_count += hprec->hazards_.extend_call_count_;
511 st.retired_count += hprec->retired_.retire_call_count_;
512 st.retired_extend_count += hprec->retired_.extend_call_count_;
513 st.free_count += hprec->free_call_count_;
514 st.scan_count += hprec->scan_call_count_;
515 st.help_scan_count += hprec->help_scan_call_count_;
518 st.hp_block_count = hp_allocator_.block_allocated_.load( atomics::memory_order_relaxed );
519 st.retired_block_count = retired_allocator_.block_allocated_.load( atomics::memory_order_relaxed );
524 }}} // namespace cds::gc::dhp
526 /*static*/ cds::gc::DHP::stat const& cds::gc::DHP::postmortem_statistics()
528 return cds::gc::dhp::s_postmortem_stat;