3 #ifndef __CDS_GC_DETAILS_DHP_H
4 #define __CDS_GC_DETAILS_DHP_H
6 #include <mutex> // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 # pragma warning(push)
15 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
19 namespace cds { namespace gc {
21 /// Dynamic Hazard Pointer reclamation schema
23 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24 Use cds::gc::DHP class in your code.
26 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28 by contructing cds::gc::DHP object in beginning of your main().
29 See cds::gc::DHP class for explanation.
31 \par Implementation issues
32 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34 of guards allocated from the global list of free guards and the access to the global list is occurred only when
35 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36 Guards allocated for the thread is push back to the global list only when the thread terminates.
40 // Forward declarations
42 template <size_t Count> class GuardArray;
44 class GarbageCollector;
46 /// Retired pointer type
47 typedef cds::gc::details::retired_ptr retired_ptr;
49 using cds::gc::details::free_retired_ptr_func;
51 /// Details of Dynamic Hazard Pointer algorithm
54 // Forward declaration
57 /// Retired pointer buffer node
58 struct retired_ptr_node {
59 retired_ptr m_ptr ; ///< retired pointer
60 retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
61 retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
64 /// Internal guard representation
66 typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
67 typedef void * guarded_ptr ; ///< type of value guarded
69 atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
71 atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
72 atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
74 guard_data * pThreadNext ; ///< next item of thread's local list of guards
76 guard_data() CDS_NOEXCEPT
78 , pGlobalNext( nullptr )
79 , pNextFree( nullptr )
80 , pThreadNext( nullptr )
83 void init() CDS_NOEXCEPT
85 pPost.store( nullptr, atomics::memory_order_relaxed );
88 /// Checks if the guard is free, that is, it does not contain any pointer guarded
89 bool isFree() const CDS_NOEXCEPT
91 return pPost.load( atomics::memory_order_acquire ) == nullptr;
96 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
99 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
101 atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
102 atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
103 SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
106 Unfortunately, access to the list of free guard is lock-based.
107 Lock-free manipulations with guard free-list are ABA-prone.
108 TODO: working with m_FreeGuardList in lock-free manner.
112 /// Allocates new guard from the heap. The function uses aligned allocator
113 guard_data * allocNew()
115 //TODO: the allocator should make block allocation
117 details::guard_data * pGuard = m_GuardAllocator.New();
119 // Link guard to the list
120 // m_GuardList is accumulated list and it cannot support concurrent deletion,
121 // so, ABA problem is impossible for it
122 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
124 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
125 // pHead is changed by compare_exchange_weak
126 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
134 guard_allocator() CDS_NOEXCEPT
135 : m_GuardList( nullptr )
136 , m_FreeGuardList( nullptr )
143 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
144 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
145 m_GuardAllocator.Delete( pData );
149 /// Allocates a guard from free list or from heap if free list is empty
152 // Try to pop a guard from free-list
153 details::guard_data * pGuard;
156 std::unique_lock<SpinLock> al( m_freeListLock );
157 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
159 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
168 /// Frees guard \p pGuard
170 The function places the guard \p pGuard into free-list
172 void free( guard_data * pGuard ) CDS_NOEXCEPT
174 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
176 std::unique_lock<SpinLock> al( m_freeListLock );
177 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
178 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
181 /// Allocates list of guard
183 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
185 cds::gc::dhp::ThreadGC supporting method
187 guard_data * allocList( size_t nCount )
189 assert( nCount != 0 );
197 // The guard list allocated is private for the thread,
198 // so, we can use relaxed memory order
200 guard_data * p = alloc();
201 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
205 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
210 /// Frees list of guards
212 The list \p pList is linked by guard's \p pThreadNext field.
214 cds::gc::dhp::ThreadGC supporting method
216 void freeList( guard_data * pList ) CDS_NOEXCEPT
218 assert( pList != nullptr );
220 guard_data * pLast = pList;
221 while ( pLast->pThreadNext ) {
222 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
224 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
228 std::unique_lock<SpinLock> al( m_freeListLock );
229 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
230 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
233 /// Returns the list's head of guards allocated
234 guard_data * begin() CDS_NOEXCEPT
236 return m_GuardList.load(atomics::memory_order_acquire);
240 /// Retired pointer buffer
242 The buffer of retired nodes ready for liberating.
243 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
246 class retired_ptr_buffer
248 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
249 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
252 CDS_CONSTEXPR retired_ptr_buffer() CDS_NOEXCEPT
257 ~retired_ptr_buffer() CDS_NOEXCEPT
259 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
262 /// Pushes new node into the buffer. Returns current buffer size
263 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
265 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
267 node.m_pNext = pHead;
268 // pHead is changed by compare_exchange_weak
269 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
271 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
274 /// Result of \ref dhp_gc_privatve "privatize" function.
276 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
278 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
280 /// Gets current list of retired pointer and clears the list
281 /**@anchor dhp_gc_privatve
283 privatize_result privatize() CDS_NOEXCEPT
285 privatize_result res;
286 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
288 // Item counter is needed only as a threshold for \p scan() function
289 // So, we may clear the item counter without synchronization with m_pHead
290 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
294 /// Returns current size of buffer (approximate)
295 size_t size() const CDS_NOEXCEPT
297 return m_nItemCount.load(atomics::memory_order_relaxed);
301 /// Pool of retired pointers
303 The class acts as an allocator of retired node.
304 Retired pointers are linked in the lock-free list.
306 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
307 class retired_ptr_pool {
309 typedef retired_ptr_node item;
311 /// Count of items in block
312 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
316 block * pNext ; ///< next block
317 item items[m_nItemPerBlock] ; ///< item array
320 atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
322 // To solve ABA problem we use epoch-based approach
323 static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
324 atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
325 atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
326 atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
328 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
333 // allocate new block
334 block * pNew = m_BlockAllocator.New();
336 // link items within the block
337 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
338 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
339 pItem->m_pNextFree = pItem + 1;
340 CDS_STRICT_DO( pItem->m_pNext = nullptr );
343 // link new block to block list
345 block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
348 // pHead is changed by compare_exchange_weak
349 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
352 // link block's items to free list
354 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
356 pLastItem->m_pNextFree = pHead;
357 // pHead is changed by compare_exchange_weak
358 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
362 unsigned int current_epoch() const CDS_NOEXCEPT
364 return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
367 unsigned int next_epoch() const CDS_NOEXCEPT
369 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
374 : m_pBlockListHead( nullptr )
376 , m_pGlobalFreeHead( nullptr )
378 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
379 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
387 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
389 m_BlockAllocator.Delete( pBlock );
393 /// Increments current epoch
394 void inc_epoch() CDS_NOEXCEPT
396 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
399 /// Allocates new retired pointer
400 retired_ptr_node& alloc()
405 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
408 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
412 // Epoch free list is empty
413 // Alloc from global free list
415 pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
421 // pItem is changed by compare_exchange_weak
422 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
425 CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
429 /// Allocates and initializes new retired pointer
430 retired_ptr_node& alloc( const retired_ptr& p )
432 retired_ptr_node& node = alloc();
437 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
439 The list is linked on the m_pNextFree field
441 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
443 assert( pHead != nullptr );
444 assert( pTail != nullptr );
449 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
450 pTail->m_pNextFree = pCurHead;
451 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
455 /// Uninitialized guard
458 friend class ThreadGC;
460 details::guard_data * m_pGuard ; ///< Pointer to guard data
462 /// Initialize empty guard.
463 CDS_CONSTEXPR guard() CDS_NOEXCEPT
464 : m_pGuard( nullptr )
467 /// The object is not copy-constructible
468 guard( guard const& ) = delete;
470 /// Object destructor, does nothing
471 ~guard() CDS_NOEXCEPT
474 /// Guards pointer \p p
475 void set( void * p ) CDS_NOEXCEPT
477 assert( m_pGuard != nullptr );
478 m_pGuard->pPost.store( p, atomics::memory_order_release );
479 //CDS_COMPILER_RW_BARRIER;
483 void clear() CDS_NOEXCEPT
485 assert( m_pGuard != nullptr );
486 m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
487 CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
490 /// Guards pointer \p p
491 template <typename T>
492 T * operator =(T * p) CDS_NOEXCEPT
494 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
498 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
504 public: // for ThreadGC.
506 GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
507 the compiler produces error:
\91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard
\92 is protected
508 despite the fact that ThreadGC is declared as friend for guard class.
509 We should not like to declare m_pGuard member as public one.
510 Therefore, we have to add set_guard/get_guard public functions
513 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
515 assert( m_pGuard == nullptr );
519 /// Get current guard data
520 details::guard_data * get_guard() CDS_NOEXCEPT
524 /// Get current guard data
525 details::guard_data * get_guard() const CDS_NOEXCEPT
531 } // namespace details
535 This class represents auto guard: ctor allocates a guard from guard pool,
536 dtor returns the guard back to the pool of free guard.
538 class Guard: public details::guard
540 typedef details::guard base_class;
541 friend class ThreadGC;
543 ThreadGC& m_gc ; ///< ThreadGC object of current thread
545 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
546 Guard( ThreadGC& gc ) CDS_NOEXCEPT;
548 /// Returns guard allocated back to pool of free guards
549 ~Guard() CDS_NOEXCEPT; // inline after GarbageCollector
551 /// Returns DHP GC object
552 ThreadGC& getGC() CDS_NOEXCEPT
557 /// Guards pointer \p p
558 template <typename T>
559 T * operator =(T * p) CDS_NOEXCEPT
561 return base_class::operator =<T>( p );
564 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
566 return base_class::operator =(nullptr);
572 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
573 dtor returns the guards allocated back to the pool.
575 template <size_t Count>
578 details::guard m_arr[Count] ; ///< array of guard
579 ThreadGC& m_gc ; ///< ThreadGC object of current thread
580 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
583 /// Rebind array for other size \p OtherCount
584 template <size_t OtherCount>
586 typedef GuardArray<OtherCount> other ; ///< rebinding result
590 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
591 GuardArray( ThreadGC& gc ) CDS_NOEXCEPT; // inline below
593 /// The object is not default-constructible
594 GuardArray() = delete;
596 /// The object is not copy-constructible
597 GuardArray( GuardArray const& ) = delete;
599 /// Returns guards allocated back to pool
600 ~GuardArray() CDS_NOEXCEPT; // inline below
602 /// Returns the capacity of array
603 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
608 /// Returns DHP ThreadGC object
609 ThreadGC& getGC() CDS_NOEXCEPT
614 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
615 details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
617 assert( nIndex < capacity() );
618 return m_arr[nIndex];
621 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
622 const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
624 assert( nIndex < capacity() );
625 return m_arr[nIndex];
628 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
629 template <typename T>
630 void set( size_t nIndex, T * p ) CDS_NOEXCEPT
632 assert( nIndex < capacity() );
633 m_arr[nIndex].set( p );
636 /// Clears (sets to \p nullptr) the guard \p nIndex
637 void clear( size_t nIndex ) CDS_NOEXCEPT
639 assert( nIndex < capacity() );
640 m_arr[nIndex].clear();
643 /// Clears all guards in the array
644 void clearAll() CDS_NOEXCEPT
646 for ( size_t i = 0; i < capacity(); ++i )
651 /// Memory manager (Garbage collector)
652 class CDS_EXPORT_API GarbageCollector
655 friend class ThreadGC;
657 /// Internal GC statistics
660 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
661 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
665 , m_nFreeGuardCount(0)
670 /// Exception "No GarbageCollector object is created"
671 CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
673 /// Internal GC statistics
676 size_t m_nGuardCount ; ///< Total guard count
677 size_t m_nFreeGuardCount ; ///< Count of free guard
681 , m_nFreeGuardCount(0)
684 InternalState& operator =( internal_stat const& s )
686 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
687 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
694 static GarbageCollector * m_pManager ; ///< GC global instance
696 details::guard_allocator<> m_GuardPool ; ///< Guard pool
697 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
698 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
700 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call \p scan()
701 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
703 internal_stat m_stat ; ///< Internal statistics
704 bool m_bStatEnabled ; ///< Internal Statistics enabled
707 /// Initializes DHP memory manager singleton
709 This member function creates and initializes DHP global object.
710 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
711 this member function is called in the \p main() function. See cds::gc::dhp for example.
712 After calling of this function you may use CDS data structures based on cds::gc::DHP.
715 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
716 the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
717 If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
718 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
719 is initialized the GC allocates local guard pool for the thread from common guard pool.
720 By perforce the local thread's guard pool is grown automatically from common pool.
721 When the thread terminated its guard pool is backed to common GC's pool.
724 static void CDS_STDCALL Construct(
725 size_t nLiberateThreshold = 1024
726 , size_t nInitialThreadGuardCount = 8
729 /// Destroys DHP memory manager
731 The member function destroys DHP global object. After calling of this function you may \b NOT
732 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
733 at the end of your \p main(). See cds::gc::dhp for example.
735 static void CDS_STDCALL Destruct();
737 /// Returns pointer to GarbageCollector instance
739 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
741 static GarbageCollector& instance()
743 if ( m_pManager == nullptr )
744 throw DHPManagerEmpty();
748 /// Checks if global GC object is constructed and may be used
749 static bool isUsed() CDS_NOEXCEPT
751 return m_pManager != nullptr;
756 /// Internal interface
758 /// Allocates a guard
759 details::guard_data * allocGuard()
761 return m_GuardPool.alloc();
764 /// Frees guard \p g for reusing in future
765 void freeGuard(details::guard_data * pGuard )
767 m_GuardPool.free( pGuard );
770 /// Allocates guard list for a thread.
771 details::guard_data * allocGuardList( size_t nCount )
773 return m_GuardPool.allocList( nCount );
776 /// Frees thread's guard list pointed by \p pList
777 void freeGuardList( details::guard_data * pList )
779 m_GuardPool.freeList( pList );
782 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
783 /**@anchor dhp_gc_retirePtr
785 template <typename T>
786 void retirePtr( T * p, void (* pFunc)(T *) )
788 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
791 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
792 void retirePtr( retired_ptr const& p )
794 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
799 /// Liberate function
800 /** @anchor dhp_gc_liberate
801 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
802 trapped by any guard.
808 /// Get internal statistics
809 InternalState& getInternalState(InternalState& stat) const
811 return stat = m_stat;
814 /// Checks if internal statistics enabled
815 bool isStatisticsEnabled() const
817 return m_bStatEnabled;
820 /// Enables/disables internal statistics
821 bool enableStatistics( bool bEnable )
823 bool bEnabled = m_bStatEnabled;
824 m_bStatEnabled = bEnable;
829 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
835 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
836 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
837 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
838 \ref cds_threading "cds::threading::Manager::detachThread()".
840 The ThreadGC object maintains two list:
841 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
842 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
843 Free guard list is a subset of thread guard list.
847 GarbageCollector& m_gc ; ///< reference to GC singleton
848 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
849 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
852 /// Default constructor
854 : m_gc( GarbageCollector::instance() )
859 /// The object is not copy-constructible
860 ThreadGC( ThreadGC const& ) = delete;
862 /// Dtor calls fini()
868 /// Initialization. Repeat call is available
873 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
877 /// Finalization. Repeat call is available
881 m_gc.freeGuardList( m_pList );
888 /// Initializes guard \p g
889 void allocGuard( Guard& g )
891 assert( m_pList != nullptr );
893 g.m_pGuard = m_pFree;
894 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
897 g.m_pGuard = m_gc.allocGuard();
898 g.m_pGuard->pThreadNext = m_pList;
899 m_pList = g.m_pGuard;
904 void freeGuard( Guard& g )
906 assert( m_pList != nullptr );
907 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
908 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
909 m_pFree = g.m_pGuard;
912 /// Initializes guard array \p arr
913 template <size_t Count>
914 void allocGuard( GuardArray<Count>& arr )
916 assert( m_pList != nullptr );
919 while ( m_pFree && nCount < Count ) {
920 arr[nCount].set_guard( m_pFree );
921 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
925 while ( nCount < Count ) {
926 details::guard& g = arr[nCount++];
927 g.set_guard( m_gc.allocGuard() );
928 g.get_guard()->pThreadNext = m_pList;
929 m_pList = g.get_guard();
933 /// Frees guard array \p arr
934 template <size_t Count>
935 void freeGuard( GuardArray<Count>& arr )
937 assert( m_pList != nullptr );
939 details::guard_data * pGuard;
940 for ( size_t i = 0; i < Count - 1; ++i ) {
941 pGuard = arr[i].get_guard();
942 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
943 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
945 pGuard = arr[Count-1].get_guard();
946 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
947 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
948 m_pFree = arr[0].get_guard();
951 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
952 template <typename T>
953 void retirePtr( T * p, void (* pFunc)(T *) )
955 m_gc.retirePtr( p, pFunc );
964 //////////////////////////////////////////////////////////
967 inline Guard::Guard(ThreadGC& gc)
970 getGC().allocGuard( *this );
972 inline Guard::~Guard()
974 getGC().freeGuard( *this );
977 template <size_t Count>
978 inline GuardArray<Count>::GuardArray( ThreadGC& gc )
981 getGC().allocGuard( *this );
983 template <size_t Count>
984 inline GuardArray<Count>::~GuardArray()
986 getGC().freeGuard( *this );
990 }} // namespace cds::gc
993 #if CDS_COMPILER == CDS_COMPILER_MSVC
994 # pragma warning(pop)
997 #endif // #ifndef __CDS_GC_DETAILS_DHP_H