3 #ifndef CDSLIB_GC_DETAILS_DHP_H
4 #define CDSLIB_GC_DETAILS_DHP_H
6 #include <mutex> // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/sync/spinlock.h>
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 # pragma warning(push)
15 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
19 namespace cds { namespace gc {
21 /// Dynamic Hazard Pointer reclamation schema
23 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
24 Use cds::gc::DHP class in your code.
26 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
27 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
28 by contructing cds::gc::DHP object in beginning of your main().
29 See cds::gc::DHP class for explanation.
31 \par Implementation issues
32 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
33 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
34 of guards allocated from the global list of free guards and the access to the global list is occurred only when
35 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
36 Guards allocated for the thread is push back to the global list only when the thread terminates.
40 // Forward declarations
42 template <size_t Count> class GuardArray;
44 class GarbageCollector;
46 /// Retired pointer type
47 typedef cds::gc::details::retired_ptr retired_ptr;
49 using cds::gc::details::free_retired_ptr_func;
51 /// Details of Dynamic Hazard Pointer algorithm
54 // Forward declaration
57 /// Retired pointer buffer node
58 struct retired_ptr_node {
59 retired_ptr m_ptr ; ///< retired pointer
60 atomics::atomic<retired_ptr_node *> m_pNext ; ///< next retired pointer in buffer
61 atomics::atomic<retired_ptr_node *> m_pNextFree ; ///< next item in free list of \p retired_ptr_node
64 /// Internal guard representation
66 typedef void * guarded_ptr; ///< type of value guarded
68 atomics::atomic<guarded_ptr> pPost; ///< pointer guarded
69 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
70 atomics::atomic<guard_data *> pNextFree; ///< pointer to the next item in global or thread-local free-list
72 guard_data * pThreadNext; ///< next item of thread's local list of guards
74 guard_data() CDS_NOEXCEPT
76 , pGlobalNext( nullptr )
77 , pNextFree( nullptr )
78 , pThreadNext( nullptr )
81 void init() CDS_NOEXCEPT
83 pPost.store( nullptr, atomics::memory_order_relaxed );
86 /// Checks if the guard is free, that is, it does not contain any pointer guarded
87 bool isFree() const CDS_NOEXCEPT
89 return pPost.load( atomics::memory_order_acquire ) == nullptr;
94 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
97 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
99 atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
100 atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
101 cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
104 Unfortunately, access to the list of free guard is lock-based.
105 Lock-free manipulations with guard free-list are ABA-prone.
106 TODO: working with m_FreeGuardList in lock-free manner.
110 /// Allocates new guard from the heap. The function uses aligned allocator
111 guard_data * allocNew()
113 //TODO: the allocator should make block allocation
115 details::guard_data * pGuard = m_GuardAllocator.New();
117 // Link guard to the list
118 // m_GuardList is an accumulating list and it cannot support concurrent deletion,
119 // so, ABA problem is impossible for it
120 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
122 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
123 // pHead is changed by compare_exchange_weak
124 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
132 guard_allocator() CDS_NOEXCEPT
133 : m_GuardList( nullptr )
134 , m_FreeGuardList( nullptr )
141 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
142 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
143 m_GuardAllocator.Delete( pData );
147 /// Allocates a guard from free list or from heap if free list is empty
150 // Try to pop a guard from free-list
151 details::guard_data * pGuard;
154 std::unique_lock<cds::sync::spin> al( m_freeListLock );
155 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
157 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
166 /// Frees guard \p pGuard
168 The function places the guard \p pGuard into free-list
170 void free( guard_data * pGuard ) CDS_NOEXCEPT
172 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
174 std::unique_lock<cds::sync::spin> al( m_freeListLock );
175 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
176 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
179 /// Allocates list of guard
181 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
183 cds::gc::dhp::ThreadGC supporting method
185 guard_data * allocList( size_t nCount )
187 assert( nCount != 0 );
195 // The guard list allocated is private for the thread,
196 // so, we can use relaxed memory order
198 guard_data * p = alloc();
199 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
203 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
208 /// Frees list of guards
210 The list \p pList is linked by guard's \p pThreadNext field.
212 cds::gc::dhp::ThreadGC supporting method
214 void freeList( guard_data * pList ) CDS_NOEXCEPT
216 assert( pList != nullptr );
218 guard_data * pLast = pList;
219 while ( pLast->pThreadNext ) {
220 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
222 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
226 std::unique_lock<cds::sync::spin> al( m_freeListLock );
227 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
228 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
231 /// Returns the list's head of guards allocated
232 guard_data * begin() CDS_NOEXCEPT
234 return m_GuardList.load(atomics::memory_order_acquire);
238 /// Retired pointer buffer
240 The buffer of retired nodes ready for liberating.
241 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
244 class retired_ptr_buffer
246 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
247 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
250 retired_ptr_buffer() CDS_NOEXCEPT
255 ~retired_ptr_buffer() CDS_NOEXCEPT
257 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
260 /// Pushes new node into the buffer. Returns current buffer size
261 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
263 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
265 node.m_pNext.store( pHead, atomics::memory_order_relaxed );
266 // pHead is changed by compare_exchange_weak
267 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
269 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
272 /// Pushes [pFirst, pLast] list linked by pNext field.
273 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
278 retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
280 pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
281 // pHead is changed by compare_exchange_weak
282 } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
284 return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
287 /// Result of \ref dhp_gc_privatve "privatize" function.
289 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
291 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
293 /// Gets current list of retired pointer and clears the list
294 /**@anchor dhp_gc_privatve
296 privatize_result privatize() CDS_NOEXCEPT
298 privatize_result res;
299 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
301 // Item counter is needed only as a threshold for \p scan() function
302 // So, we may clear the item counter without synchronization with m_pHead
303 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
307 /// Returns current size of buffer (approximate)
308 size_t size() const CDS_NOEXCEPT
310 return m_nItemCount.load(atomics::memory_order_relaxed);
314 /// Pool of retired pointers
316 The class acts as an allocator of retired node.
317 Retired pointers are linked in the lock-free list.
319 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
320 class retired_ptr_pool {
322 typedef retired_ptr_node item;
324 /// Count of items in block
325 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
329 atomics::atomic<block *> pNext; ///< next block
330 item items[m_nItemPerBlock]; ///< item array
333 atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
335 // To solve ABA problem we use epoch-based approach
336 static const unsigned int c_nEpochCount = 4; ///< Max epoch count
337 atomics::atomic<unsigned int> m_nCurEpoch; ///< Current epoch
338 atomics::atomic<item *> m_pEpochFree[c_nEpochCount]; ///< List of free item per epoch
339 atomics::atomic<item *> m_pGlobalFreeHead; ///< Head of unallocated item list
341 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
346 // allocate new block
347 block * pNew = m_BlockAllocator.New();
349 // link items within the block
350 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
351 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
352 pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
353 CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
356 // links new block to the block list
358 block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
360 pNew->pNext.store( pHead, atomics::memory_order_relaxed );
361 // pHead is changed by compare_exchange_weak
362 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
365 // links block's items to the free list
367 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
369 pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
370 // pHead is changed by compare_exchange_weak
371 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
375 unsigned int current_epoch() const CDS_NOEXCEPT
377 return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
380 unsigned int next_epoch() const CDS_NOEXCEPT
382 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
387 : m_pBlockListHead( nullptr )
389 , m_pGlobalFreeHead( nullptr )
391 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
392 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
400 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
401 p = pBlock->pNext.load( atomics::memory_order_relaxed );
402 m_BlockAllocator.Delete( pBlock );
406 /// Increments current epoch
407 void inc_epoch() CDS_NOEXCEPT
409 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
412 /// Allocates the new retired pointer
413 retired_ptr_node& alloc()
418 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
421 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
422 pItem->m_pNextFree.load(atomics::memory_order_acquire),
423 atomics::memory_order_acquire, atomics::memory_order_relaxed ))
429 // Epoch free list is empty
430 // Alloc from global free list
432 pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
438 // pItem is changed by compare_exchange_weak
439 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
440 pItem->m_pNextFree.load(atomics::memory_order_acquire),
441 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
444 CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
448 /// Allocates and initializes new retired pointer
449 retired_ptr_node& alloc( const retired_ptr& p )
451 retired_ptr_node& node = alloc();
456 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
458 The list is linked on the m_pNextFree field
460 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
462 assert( pHead != nullptr );
463 assert( pTail != nullptr );
468 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
469 pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
470 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
474 /// Uninitialized guard
477 friend class dhp::ThreadGC;
479 details::guard_data * m_pGuard ; ///< Pointer to guard data
482 /// Initialize empty guard.
483 CDS_CONSTEXPR guard() CDS_NOEXCEPT
484 : m_pGuard( nullptr )
487 /// Copy-ctor is disabled
488 guard( guard const& ) = delete;
490 /// Move-ctor is disabled
491 guard( guard&& ) = delete;
493 /// Object destructor, does nothing
494 ~guard() CDS_NOEXCEPT
497 /// Get current guarded pointer
498 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
500 assert( m_pGuard != nullptr );
501 return m_pGuard->pPost.load( order );
504 /// Guards pointer \p p
505 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
507 assert( m_pGuard != nullptr );
508 m_pGuard->pPost.store( p, order );
512 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
514 assert( m_pGuard != nullptr );
515 m_pGuard->pPost.store( nullptr, order );
518 /// Guards pointer \p p
519 template <typename T>
520 T * operator =(T * p) CDS_NOEXCEPT
522 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
526 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
532 public: // for ThreadGC.
534 GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
535 the compiler produces error:
\91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard
\92 is protected
536 despite the fact that ThreadGC is declared as friend for guard class.
537 Therefore, we have to add set_guard/get_guard public functions
540 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
542 assert( m_pGuard == nullptr );
546 /// Get current guard data
547 details::guard_data * get_guard() CDS_NOEXCEPT
551 /// Get current guard data
552 details::guard_data * get_guard() const CDS_NOEXCEPT
557 details::guard_data * release_guard() CDS_NOEXCEPT
559 details::guard_data * p = m_pGuard;
564 bool is_initialized() const
566 return m_pGuard != nullptr;
570 } // namespace details
574 This class represents auto guard: ctor allocates a guard from guard pool,
575 dtor returns the guard back to the pool of free guard.
577 class Guard: public details::guard
579 typedef details::guard base_class;
580 friend class ThreadGC;
582 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
583 Guard(); // inline in dhp_impl.h
585 /// Returns guard allocated back to pool of free guards
586 ~Guard(); // inline in dhp_impl.h
588 /// Guards pointer \p p
589 template <typename T>
590 T * operator =(T * p) CDS_NOEXCEPT
592 return base_class::operator =<T>( p );
595 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
597 return base_class::operator =(nullptr);
603 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
604 dtor returns the guards allocated back to the pool.
606 template <size_t Count>
609 details::guard m_arr[Count] ; ///< array of guard
610 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
613 /// Rebind array for other size \p OtherCount
614 template <size_t OtherCount>
616 typedef GuardArray<OtherCount> other ; ///< rebinding result
620 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
621 GuardArray(); // inline in dhp_impl.h
623 /// The object is not copy-constructible
624 GuardArray( GuardArray const& ) = delete;
626 /// The object is not move-constructible
627 GuardArray( GuardArray&& ) = delete;
629 /// Returns guards allocated back to pool
630 ~GuardArray(); // inline in dh_impl.h
632 /// Returns the capacity of array
633 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
638 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
639 details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
641 assert( nIndex < capacity() );
642 return m_arr[nIndex];
645 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
646 const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
648 assert( nIndex < capacity() );
649 return m_arr[nIndex];
652 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
653 template <typename T>
654 void set( size_t nIndex, T * p ) CDS_NOEXCEPT
656 assert( nIndex < capacity() );
657 m_arr[nIndex].set( p );
660 /// Clears (sets to \p nullptr) the guard \p nIndex
661 void clear( size_t nIndex ) CDS_NOEXCEPT
663 assert( nIndex < capacity() );
664 m_arr[nIndex].clear();
667 /// Clears all guards in the array
668 void clearAll() CDS_NOEXCEPT
670 for ( size_t i = 0; i < capacity(); ++i )
675 /// Memory manager (Garbage collector)
676 class CDS_EXPORT_API GarbageCollector
679 friend class ThreadGC;
681 /// Internal GC statistics
684 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
685 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
689 , m_nFreeGuardCount(0)
694 /// Exception "No GarbageCollector object is created"
695 class not_initialized : public std::runtime_error
700 : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
705 /// Internal GC statistics
708 size_t m_nGuardCount ; ///< Total guard count
709 size_t m_nFreeGuardCount ; ///< Count of free guard
714 , m_nFreeGuardCount(0)
717 InternalState& operator =( internal_stat const& s )
719 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
720 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
728 static GarbageCollector * m_pManager ; ///< GC global instance
730 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call \p scan()
731 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
733 details::guard_allocator<> m_GuardPool ; ///< Guard pool
734 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
735 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
737 internal_stat m_stat ; ///< Internal statistics
738 bool m_bStatEnabled ; ///< Internal Statistics enabled
741 /// Initializes DHP memory manager singleton
743 This member function creates and initializes DHP global object.
744 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
745 this member function is called in the \p main() function. See cds::gc::dhp for example.
746 After calling of this function you may use CDS data structures based on cds::gc::DHP.
749 \li \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
750 the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
751 If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
752 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
753 is initialized the GC allocates local guard pool for the thread from common guard pool.
754 By perforce the local thread's guard pool is grown automatically from common pool.
755 When the thread terminated its guard pool is backed to common GC's pool.
758 static void CDS_STDCALL Construct(
759 size_t nLiberateThreshold = 1024
760 , size_t nInitialThreadGuardCount = 8
763 /// Destroys DHP memory manager
765 The member function destroys DHP global object. After calling of this function you may \b NOT
766 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
767 at the end of your \p main(). See cds::gc::dhp for example.
769 static void CDS_STDCALL Destruct();
771 /// Returns pointer to GarbageCollector instance
773 If DHP GC is not initialized, \p not_initialized exception is thrown
775 static GarbageCollector& instance()
777 if ( m_pManager == nullptr )
778 throw not_initialized();
782 /// Checks if global GC object is constructed and may be used
783 static bool isUsed() CDS_NOEXCEPT
785 return m_pManager != nullptr;
790 /// Internal interface
792 /// Allocates a guard
793 details::guard_data * allocGuard()
795 return m_GuardPool.alloc();
798 /// Frees guard \p g for reusing in future
799 void freeGuard(details::guard_data * pGuard )
801 m_GuardPool.free( pGuard );
804 /// Allocates guard list for a thread.
805 details::guard_data * allocGuardList( size_t nCount )
807 return m_GuardPool.allocList( nCount );
810 /// Frees thread's guard list pointed by \p pList
811 void freeGuardList( details::guard_data * pList )
813 m_GuardPool.freeList( pList );
816 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
817 /**@anchor dhp_gc_retirePtr
819 template <typename T>
820 void retirePtr( T * p, void (* pFunc)(T *) )
822 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
825 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
826 void retirePtr( retired_ptr const& p )
828 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
833 /// Liberate function
834 /** @anchor dhp_gc_liberate
835 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
836 trapped by any guard.
842 /// Get internal statistics
843 InternalState& getInternalState(InternalState& stat) const
845 return stat = m_stat;
848 /// Checks if internal statistics enabled
849 bool isStatisticsEnabled() const
851 return m_bStatEnabled;
854 /// Enables/disables internal statistics
855 bool enableStatistics( bool bEnable )
857 bool bEnabled = m_bStatEnabled;
858 m_bStatEnabled = bEnable;
863 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
869 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
870 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
871 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
872 \ref cds_threading "cds::threading::Manager::detachThread()".
874 The ThreadGC object maintains two list:
875 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
876 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
877 Free guard list is a subset of thread guard list.
881 GarbageCollector& m_gc ; ///< reference to GC singleton
882 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
883 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
886 /// Default constructor
888 : m_gc( GarbageCollector::instance() )
893 /// The object is not copy-constructible
894 ThreadGC( ThreadGC const& ) = delete;
896 /// Dtor calls fini()
902 /// Initialization. Repeat call is available
907 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
911 /// Finalization. Repeat call is available
915 m_gc.freeGuardList( m_pList );
922 /// Initializes guard \p g
923 void allocGuard( dhp::details::guard& g )
925 assert( m_pList != nullptr );
928 g.m_pGuard = m_pFree;
929 m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
932 g.m_pGuard = m_gc.allocGuard();
933 g.m_pGuard->pThreadNext = m_pList;
934 m_pList = g.m_pGuard;
940 void freeGuard( dhp::details::guard& g )
942 assert( m_pList != nullptr );
944 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
945 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
946 m_pFree = g.m_pGuard;
947 g.m_pGuard = nullptr;
951 /// Initializes guard array \p arr
952 template <size_t Count>
953 void allocGuard( GuardArray<Count>& arr )
955 assert( m_pList != nullptr );
958 while ( m_pFree && nCount < Count ) {
959 arr[nCount].set_guard( m_pFree );
960 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
964 while ( nCount < Count ) {
965 details::guard& g = arr[nCount++];
966 g.set_guard( m_gc.allocGuard() );
967 g.get_guard()->pThreadNext = m_pList;
968 m_pList = g.get_guard();
972 /// Frees guard array \p arr
973 template <size_t Count>
974 void freeGuard( GuardArray<Count>& arr )
976 assert( m_pList != nullptr );
978 details::guard_data * pGuard;
979 for ( size_t i = 0; i < Count - 1; ++i ) {
980 pGuard = arr[i].get_guard();
981 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
982 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
984 pGuard = arr[Count-1].get_guard();
985 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
986 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
987 m_pFree = arr[0].get_guard();
990 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
991 template <typename T>
992 void retirePtr( T * p, void (* pFunc)(T *) )
994 m_gc.retirePtr( p, pFunc );
997 /// Run retiring cycle
1004 }} // namespace cds::gc
1007 #if CDS_COMPILER == CDS_COMPILER_MSVC
1008 # pragma warning(pop)
1011 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H