3 #ifndef CDSLIB_GC_DETAILS_DHP_H
4 #define CDSLIB_GC_DETAILS_DHP_H
6 #include <mutex> // unique_lock
7 #include <cds/algo/atomic.h>
8 #include <cds/algo/int_algo.h>
9 #include <cds/gc/details/retired_ptr.h>
10 #include <cds/details/aligned_allocator.h>
11 #include <cds/details/allocator.h>
12 #include <cds/sync/spinlock.h>
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 # pragma warning(push)
16 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
20 namespace cds { namespace gc {
22 /// Dynamic Hazard Pointer reclamation schema
24 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
25 Use cds::gc::DHP class in your code.
27 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
28 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
29 by contructing cds::gc::DHP object in beginning of your main().
30 See cds::gc::DHP class for explanation.
32 \par Implementation issues
33 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
34 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
35 of guards allocated from the global list of free guards and the access to the global list is occurred only when
36 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
37 Guards allocated for the thread is push back to the global list only when the thread terminates.
41 // Forward declarations
43 template <size_t Count> class GuardArray;
45 class GarbageCollector;
47 /// Retired pointer type
48 typedef cds::gc::details::retired_ptr retired_ptr;
50 using cds::gc::details::free_retired_ptr_func;
52 /// Details of Dynamic Hazard Pointer algorithm
55 // Forward declaration
58 /// Retired pointer buffer node
59 struct retired_ptr_node {
60 retired_ptr m_ptr ; ///< retired pointer
61 atomics::atomic<retired_ptr_node *> m_pNext ; ///< next retired pointer in buffer
62 atomics::atomic<retired_ptr_node *> m_pNextFree ; ///< next item in free list of \p retired_ptr_node
65 /// Internal guard representation
67 typedef void * guarded_ptr; ///< type of value guarded
69 atomics::atomic<guarded_ptr> pPost; ///< pointer guarded
70 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
71 atomics::atomic<guard_data *> pNextFree; ///< pointer to the next item in global or thread-local free-list
73 guard_data * pThreadNext; ///< next item of thread's local list of guards
75 guard_data() CDS_NOEXCEPT
77 , pGlobalNext( nullptr )
78 , pNextFree( nullptr )
79 , pThreadNext( nullptr )
82 void init() CDS_NOEXCEPT
84 pPost.store( nullptr, atomics::memory_order_relaxed );
87 /// Checks if the guard is free, that is, it does not contain any pointer guarded
88 bool isFree() const CDS_NOEXCEPT
90 return pPost.load( atomics::memory_order_acquire ) == nullptr;
95 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
98 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
100 atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
101 atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
102 cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
105 Unfortunately, access to the list of free guard is lock-based.
106 Lock-free manipulations with guard free-list are ABA-prone.
107 TODO: working with m_FreeGuardList in lock-free manner.
111 /// Allocates new guard from the heap. The function uses aligned allocator
112 guard_data * allocNew()
114 //TODO: the allocator should make block allocation
116 details::guard_data * pGuard = m_GuardAllocator.New();
118 // Link guard to the list
119 // m_GuardList is an accumulating list and it cannot support concurrent deletion,
120 // so, ABA problem is impossible for it
121 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
123 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
124 // pHead is changed by compare_exchange_weak
125 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
133 guard_allocator() CDS_NOEXCEPT
134 : m_GuardList( nullptr )
135 , m_FreeGuardList( nullptr )
142 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
143 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
144 m_GuardAllocator.Delete( pData );
148 /// Allocates a guard from free list or from heap if free list is empty
151 // Try to pop a guard from free-list
152 details::guard_data * pGuard;
155 std::unique_lock<cds::sync::spin> al( m_freeListLock );
156 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
158 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
167 /// Frees guard \p pGuard
169 The function places the guard \p pGuard into free-list
171 void free( guard_data * pGuard ) CDS_NOEXCEPT
173 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
175 std::unique_lock<cds::sync::spin> al( m_freeListLock );
176 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
177 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
180 /// Allocates list of guard
182 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
184 cds::gc::dhp::ThreadGC supporting method
186 guard_data * allocList( size_t nCount )
188 assert( nCount != 0 );
196 // The guard list allocated is private for the thread,
197 // so, we can use relaxed memory order
199 guard_data * p = alloc();
200 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
204 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
209 /// Frees list of guards
211 The list \p pList is linked by guard's \p pThreadNext field.
213 cds::gc::dhp::ThreadGC supporting method
215 void freeList( guard_data * pList ) CDS_NOEXCEPT
217 assert( pList != nullptr );
219 guard_data * pLast = pList;
220 while ( pLast->pThreadNext ) {
221 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
223 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
227 std::unique_lock<cds::sync::spin> al( m_freeListLock );
228 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
229 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
232 /// Returns the list's head of guards allocated
233 guard_data * begin() CDS_NOEXCEPT
235 return m_GuardList.load(atomics::memory_order_acquire);
239 /// Retired pointer buffer
241 The buffer of retired nodes ready for liberating.
242 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
245 class retired_ptr_buffer
247 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
248 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
251 retired_ptr_buffer() CDS_NOEXCEPT
256 ~retired_ptr_buffer() CDS_NOEXCEPT
258 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
261 /// Pushes new node into the buffer. Returns current buffer size
262 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
264 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
266 node.m_pNext.store( pHead, atomics::memory_order_relaxed );
267 // pHead is changed by compare_exchange_weak
268 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
270 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
273 /// Pushes [pFirst, pLast] list linked by pNext field.
274 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
279 retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
281 pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
282 // pHead is changed by compare_exchange_weak
283 } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
285 return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
288 /// Result of \ref dhp_gc_privatize "privatize" function.
290 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
292 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
294 /// Gets current list of retired pointer and clears the list
295 /**@anchor dhp_gc_privatize
297 privatize_result privatize() CDS_NOEXCEPT
299 privatize_result res;
300 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
302 // Item counter is needed only as a threshold for \p scan() function
303 // So, we may clear the item counter without synchronization with m_pHead
304 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
308 /// Returns current size of buffer (approximate)
309 size_t size() const CDS_NOEXCEPT
311 return m_nItemCount.load(atomics::memory_order_relaxed);
315 /// Pool of retired pointers
317 The class acts as an allocator of retired node.
318 Retired pointers are linked in the lock-free list.
320 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
321 class retired_ptr_pool {
323 typedef retired_ptr_node item;
325 /// Count of items in block
326 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
330 atomics::atomic<block *> pNext; ///< next block
331 item items[m_nItemPerBlock]; ///< item array
334 atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
336 // To solve ABA problem we use epoch-based approach
337 unsigned int const m_nEpochBitmask; ///< Epoch bitmask (log2( m_nEpochCount))
338 atomics::atomic<unsigned int> m_nCurEpoch; ///< Current epoch
339 atomics::atomic<item *>* m_pEpochFree; ///< List of free item per epoch
340 atomics::atomic<item *> m_pGlobalFreeHead; ///< Head of unallocated item list
342 typedef cds::details::Allocator< block, Alloc > block_allocator;
343 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
348 // allocate new block
349 block * pNew = block_allocator().New();
351 // link items within the block
352 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
353 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
354 pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
355 CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
358 // links new block to the block list
360 block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
362 pNew->pNext.store( pHead, atomics::memory_order_relaxed );
363 // pHead is changed by compare_exchange_weak
364 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
367 // links block's items to the free list
369 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
371 pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
372 // pHead is changed by compare_exchange_weak
373 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
377 unsigned int current_epoch() const CDS_NOEXCEPT
379 return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
382 unsigned int next_epoch() const CDS_NOEXCEPT
384 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
388 retired_ptr_pool( unsigned int nEpochCount = 8 )
389 : m_pBlockListHead( nullptr )
390 , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
392 , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
393 , m_pGlobalFreeHead( nullptr )
396 for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
397 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
406 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
407 p = pBlock->pNext.load( atomics::memory_order_relaxed );
411 epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
414 /// Increments current epoch
415 void inc_epoch() CDS_NOEXCEPT
417 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
420 /// Allocates the new retired pointer
421 retired_ptr_node& alloc()
426 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
429 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
430 pItem->m_pNextFree.load(atomics::memory_order_acquire),
431 atomics::memory_order_acquire, atomics::memory_order_relaxed ))
437 // Epoch free list is empty
438 // Alloc from global free list
440 pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
446 // pItem is changed by compare_exchange_weak
447 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
448 pItem->m_pNextFree.load(atomics::memory_order_acquire),
449 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
452 CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
456 /// Allocates and initializes new retired pointer
457 retired_ptr_node& alloc( const retired_ptr& p )
459 retired_ptr_node& node = alloc();
464 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
466 The list is linked on the m_pNextFree field
468 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
470 assert( pHead != nullptr );
471 assert( pTail != nullptr );
476 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
477 pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
478 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
482 /// Uninitialized guard
485 friend class dhp::ThreadGC;
487 details::guard_data * m_pGuard ; ///< Pointer to guard data
490 /// Initialize empty guard.
491 CDS_CONSTEXPR guard() CDS_NOEXCEPT
492 : m_pGuard( nullptr )
495 /// Copy-ctor is disabled
496 guard( guard const& ) = delete;
498 /// Move-ctor is disabled
499 guard( guard&& ) = delete;
501 /// Object destructor, does nothing
502 ~guard() CDS_NOEXCEPT
505 /// Get current guarded pointer
506 void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
508 assert( m_pGuard != nullptr );
509 return m_pGuard->pPost.load( order );
512 /// Guards pointer \p p
513 void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
515 assert( m_pGuard != nullptr );
516 m_pGuard->pPost.store( p, order );
520 void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
522 assert( m_pGuard != nullptr );
523 m_pGuard->pPost.store( nullptr, order );
526 /// Guards pointer \p p
527 template <typename T>
528 T * operator =(T * p) CDS_NOEXCEPT
530 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
534 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
540 public: // for ThreadGC.
542 GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
543 the compiler produces error:
\91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard
\92 is protected
544 despite the fact that ThreadGC is declared as friend for guard class.
545 Therefore, we have to add set_guard/get_guard public functions
548 void set_guard( details::guard_data * pGuard ) CDS_NOEXCEPT
550 assert( m_pGuard == nullptr );
554 /// Get current guard data
555 details::guard_data * get_guard() CDS_NOEXCEPT
559 /// Get current guard data
560 details::guard_data * get_guard() const CDS_NOEXCEPT
565 details::guard_data * release_guard() CDS_NOEXCEPT
567 details::guard_data * p = m_pGuard;
572 bool is_initialized() const
574 return m_pGuard != nullptr;
578 } // namespace details
582 This class represents auto guard: ctor allocates a guard from guard pool,
583 dtor returns the guard back to the pool of free guard.
585 class Guard: public details::guard
587 typedef details::guard base_class;
588 friend class ThreadGC;
590 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
591 Guard(); // inline in dhp_impl.h
593 /// Returns guard allocated back to pool of free guards
594 ~Guard(); // inline in dhp_impl.h
596 /// Guards pointer \p p
597 template <typename T>
598 T * operator =(T * p) CDS_NOEXCEPT
600 return base_class::operator =<T>( p );
603 std::nullptr_t operator=(std::nullptr_t) CDS_NOEXCEPT
605 return base_class::operator =(nullptr);
611 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
612 dtor returns the guards allocated back to the pool.
614 template <size_t Count>
617 details::guard m_arr[Count] ; ///< array of guard
618 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
621 /// Rebind array for other size \p OtherCount
622 template <size_t OtherCount>
624 typedef GuardArray<OtherCount> other ; ///< rebinding result
628 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
629 GuardArray(); // inline in dhp_impl.h
631 /// The object is not copy-constructible
632 GuardArray( GuardArray const& ) = delete;
634 /// The object is not move-constructible
635 GuardArray( GuardArray&& ) = delete;
637 /// Returns guards allocated back to pool
638 ~GuardArray(); // inline in dh_impl.h
640 /// Returns the capacity of array
641 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
646 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
647 details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
649 assert( nIndex < capacity() );
650 return m_arr[nIndex];
653 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
654 const details::guard& operator []( size_t nIndex ) const CDS_NOEXCEPT
656 assert( nIndex < capacity() );
657 return m_arr[nIndex];
660 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
661 template <typename T>
662 void set( size_t nIndex, T * p ) CDS_NOEXCEPT
664 assert( nIndex < capacity() );
665 m_arr[nIndex].set( p );
668 /// Clears (sets to \p nullptr) the guard \p nIndex
669 void clear( size_t nIndex ) CDS_NOEXCEPT
671 assert( nIndex < capacity() );
672 m_arr[nIndex].clear();
675 /// Clears all guards in the array
676 void clearAll() CDS_NOEXCEPT
678 for ( size_t i = 0; i < capacity(); ++i )
683 /// Memory manager (Garbage collector)
684 class CDS_EXPORT_API GarbageCollector
687 friend class ThreadGC;
689 /// Internal GC statistics
692 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
693 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
697 , m_nFreeGuardCount(0)
702 /// Exception "No GarbageCollector object is created"
703 class not_initialized : public std::runtime_error
708 : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
713 /// Internal GC statistics
716 size_t m_nGuardCount ; ///< Total guard count
717 size_t m_nFreeGuardCount ; ///< Count of free guard
722 , m_nFreeGuardCount(0)
725 InternalState& operator =( internal_stat const& s )
727 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
728 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
736 static GarbageCollector * m_pManager ; ///< GC global instance
738 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call \p scan()
739 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
741 details::guard_allocator<> m_GuardPool ; ///< Guard pool
742 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
743 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
745 internal_stat m_stat ; ///< Internal statistics
746 bool m_bStatEnabled ; ///< Internal Statistics enabled
749 /// Initializes DHP memory manager singleton
751 This member function creates and initializes DHP global object.
752 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
753 this member function is called in the \p main() function. See cds::gc::dhp for example.
754 After calling of this function you may use CDS data structures based on cds::gc::DHP.
757 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
758 the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
759 If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
760 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
761 is initialized the GC allocates local guard pool for the thread from common guard pool.
762 By perforce the local thread's guard pool is grown automatically from common pool.
763 When the thread terminated its guard pool is backed to common GC's pool.
764 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
765 ABA problem for internal data. \p nEpochCount specifies the epoch count,
766 i.e. the count of simultaneously working threads that remove the elements
767 of DHP-based concurrent data structure. Default value is 8.
770 static void CDS_STDCALL Construct(
771 size_t nLiberateThreshold = 1024
772 , size_t nInitialThreadGuardCount = 8
773 , size_t nEpochCount = 8
776 /// Destroys DHP memory manager
778 The member function destroys DHP global object. After calling of this function you may \b NOT
779 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
780 at the end of your \p main(). See cds::gc::dhp for example.
782 static void CDS_STDCALL Destruct();
784 /// Returns pointer to GarbageCollector instance
786 If DHP GC is not initialized, \p not_initialized exception is thrown
788 static GarbageCollector& instance()
790 if ( m_pManager == nullptr )
791 throw not_initialized();
795 /// Checks if global GC object is constructed and may be used
796 static bool isUsed() CDS_NOEXCEPT
798 return m_pManager != nullptr;
803 /// Internal interface
805 /// Allocates a guard
806 details::guard_data * allocGuard()
808 return m_GuardPool.alloc();
811 /// Frees guard \p g for reusing in future
812 void freeGuard(details::guard_data * pGuard )
814 m_GuardPool.free( pGuard );
817 /// Allocates guard list for a thread.
818 details::guard_data * allocGuardList( size_t nCount )
820 return m_GuardPool.allocList( nCount );
823 /// Frees thread's guard list pointed by \p pList
824 void freeGuardList( details::guard_data * pList )
826 m_GuardPool.freeList( pList );
829 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
830 /**@anchor dhp_gc_retirePtr
832 template <typename T>
833 void retirePtr( T * p, void (* pFunc)(T *) )
835 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
838 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
839 void retirePtr( retired_ptr const& p )
841 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
846 /// Liberate function
847 /** @anchor dhp_gc_liberate
848 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
849 trapped by any guard.
855 /// Get internal statistics
856 InternalState& getInternalState(InternalState& stat) const
858 return stat = m_stat;
861 /// Checks if internal statistics enabled
862 bool isStatisticsEnabled() const
864 return m_bStatEnabled;
867 /// Enables/disables internal statistics
868 bool enableStatistics( bool bEnable )
870 bool bEnabled = m_bStatEnabled;
871 m_bStatEnabled = bEnable;
876 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
882 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
883 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
884 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
885 \ref cds_threading "cds::threading::Manager::detachThread()".
887 The ThreadGC object maintains two list:
888 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
889 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
890 Free guard list is a subset of thread guard list.
894 GarbageCollector& m_gc ; ///< reference to GC singleton
895 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
896 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
899 /// Default constructor
901 : m_gc( GarbageCollector::instance() )
906 /// The object is not copy-constructible
907 ThreadGC( ThreadGC const& ) = delete;
909 /// Dtor calls fini()
915 /// Initialization. Repeat call is available
920 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
924 /// Finalization. Repeat call is available
928 m_gc.freeGuardList( m_pList );
935 /// Initializes guard \p g
936 void allocGuard( dhp::details::guard& g )
938 assert( m_pList != nullptr );
941 g.m_pGuard = m_pFree;
942 m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
945 g.m_pGuard = m_gc.allocGuard();
946 g.m_pGuard->pThreadNext = m_pList;
947 m_pList = g.m_pGuard;
953 void freeGuard( dhp::details::guard& g )
955 assert( m_pList != nullptr );
957 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
958 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
959 m_pFree = g.m_pGuard;
960 g.m_pGuard = nullptr;
964 /// Initializes guard array \p arr
965 template <size_t Count>
966 void allocGuard( GuardArray<Count>& arr )
968 assert( m_pList != nullptr );
971 while ( m_pFree && nCount < Count ) {
972 arr[nCount].set_guard( m_pFree );
973 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
977 while ( nCount < Count ) {
978 details::guard& g = arr[nCount++];
979 g.set_guard( m_gc.allocGuard() );
980 g.get_guard()->pThreadNext = m_pList;
981 m_pList = g.get_guard();
985 /// Frees guard array \p arr
986 template <size_t Count>
987 void freeGuard( GuardArray<Count>& arr )
989 assert( m_pList != nullptr );
991 details::guard_data * pGuard;
992 for ( size_t i = 0; i < Count - 1; ++i ) {
993 pGuard = arr[i].get_guard();
994 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
995 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
997 pGuard = arr[Count-1].get_guard();
998 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
999 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
1000 m_pFree = arr[0].get_guard();
1003 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
1004 template <typename T>
1005 void retirePtr( T * p, void (* pFunc)(T *) )
1007 m_gc.retirePtr( p, pFunc );
1010 /// Run retiring cycle
1017 }} // namespace cds::gc
1020 #if CDS_COMPILER == CDS_COMPILER_MSVC
1021 # pragma warning(pop)
1024 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H