3 #ifndef __CDS_GC_DHP_DHP_H
4 #define __CDS_GC_DHP_DHP_H
6 #include <mutex> // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/lock/spinlock.h>
13 #if CDS_COMPILER == CDS_COMPILER_MSVC
14 # pragma warning(push)
15 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
18 namespace cds { namespace gc {
20 /// Dynamic Hazard Pointer reclamation schema
22 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
23 Use cds::gc::DHP class in your code.
25 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
26 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
27 by contructing cds::gc::DHP object in beginning of your main().
28 See cds::gc::DHP class for explanation.
30 \par Implementation issues
31 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
32 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
33 of guards allocated from the global list of free guards and the access to the global list is occurred only when
34 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
35 Guards allocated for the thread is push back to the global list only when the thread terminates.
39 // Forward declarations
41 template <size_t Count> class GuardArray;
43 class GarbageCollector;
45 /// Retired pointer type
46 typedef cds::gc::details::retired_ptr retired_ptr;
48 using cds::gc::details::free_retired_ptr_func;
50 /// Details of Dynamic Hazard Pointer algorithm
53 // Forward declaration
56 /// Retired pointer buffer node
57 struct retired_ptr_node {
58 retired_ptr m_ptr ; ///< retired pointer
59 retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
60 retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
63 /// Internal guard representation
65 typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
66 typedef void * guarded_ptr ; ///< type of value guarded
68 atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
71 typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
72 handoff_spin spinHandOff ; ///< access to \p pHandOff field
73 handoff_ptr pHandOff ; ///< trapped pointer
76 atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
77 atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
79 guard_data * pThreadNext ; ///< next item of thread's local list of guards
87 , pGlobalNext( nullptr )
88 , pNextFree( nullptr )
89 , pThreadNext( nullptr )
94 pPost.store( nullptr, atomics::memory_order_relaxed );
98 /// Checks if the guard is free, that is, it does not contain any pointer guarded
101 return pPost.load( atomics::memory_order_acquire ) == nullptr;
106 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
107 class guard_allocator
109 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
111 atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
112 atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
113 SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
116 Unfortunately, access to the list of free guard is lock-based.
117 Lock-free manipulations with guard free-list are ABA-prone.
118 TODO: working with m_FreeGuardList in lock-free manner.
122 /// Allocates new guard from the heap. The function uses aligned allocator
123 guard_data * allocNew()
125 //TODO: the allocator should make block allocation
127 details::guard_data * pGuard = m_GuardAllocator.New();
129 // Link guard to the list
130 // m_GuardList is accumulated list and it cannot support concurrent deletion,
131 // so, ABA problem is impossible for it
132 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
134 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
135 // pHead is changed by compare_exchange_weak
136 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
145 : m_GuardList( nullptr )
146 , m_FreeGuardList( nullptr )
153 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
154 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
155 m_GuardAllocator.Delete( pData );
159 /// Allocates a guard from free list or from heap if free list is empty
162 // Try to pop a guard from free-list
163 details::guard_data * pGuard;
166 std::unique_lock<SpinLock> al( m_freeListLock );
167 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
169 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
178 /// Frees guard \p pGuard
180 The function places the guard \p pGuard into free-list
182 void free( guard_data * pGuard )
184 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
186 std::unique_lock<SpinLock> al( m_freeListLock );
187 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
188 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
191 /// Allocates list of guard
193 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
195 cds::gc::dhp::ThreadGC supporting method
197 guard_data * allocList( size_t nCount )
199 assert( nCount != 0 );
207 // The guard list allocated is private for the thread,
208 // so, we can use relaxed memory order
210 guard_data * p = alloc();
211 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
215 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
220 /// Frees list of guards
222 The list \p pList is linked by guard's \p pThreadNext field.
224 cds::gc::dhp::ThreadGC supporting method
226 void freeList( guard_data * pList )
228 assert( pList != nullptr );
230 guard_data * pLast = pList;
231 while ( pLast->pThreadNext ) {
232 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
234 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
238 std::unique_lock<SpinLock> al( m_freeListLock );
239 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
240 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
243 /// Returns the list's head of guards allocated
246 return m_GuardList.load(atomics::memory_order_acquire);
250 /// Retired pointer buffer
252 The buffer of retired nodes ready for liberating.
253 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
256 class retired_ptr_buffer
258 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
259 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
268 ~retired_ptr_buffer()
270 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
274 /// Pushes new node into the buffer. Returns current buffer size
275 size_t push( retired_ptr_node& node )
277 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
279 node.m_pNext = pHead;
280 // pHead is changed by compare_exchange_weak
281 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
283 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
286 /// Result of \ref dhp_gc_privatve "privatize" function.
288 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
290 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
292 /// Gets current list of retired pointer and clears the list
293 /**@anchor dhp_gc_privatve
295 privatize_result privatize()
297 privatize_result res;
298 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
300 // Item counter is needed only as a threshold for liberate function
301 // So, we may clear the item counter without synchronization with m_pHead
302 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
306 /// Returns current size of buffer (approximate)
309 return m_nItemCount.load(atomics::memory_order_relaxed);
313 /// Pool of retired pointers
315 The class acts as an allocator of retired node.
316 Retired pointers are linked in the lock-free list.
318 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
319 class retired_ptr_pool {
321 typedef retired_ptr_node item;
323 /// Count of items in block
324 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
328 block * pNext ; ///< next block
329 item items[m_nItemPerBlock] ; ///< item array
332 atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
334 // To solve ABA problem we use epoch-based approach
335 static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
336 atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
337 atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
338 atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
340 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
346 // allocate new block
347 block * pNew = m_BlockAllocator.New();
349 // link items within the block
350 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
351 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
352 pItem->m_pNextFree = pItem + 1;
353 CDS_STRICT_DO( pItem->m_pNext = nullptr );
356 // link new block to block list
358 block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
361 // pHead is changed by compare_exchange_weak
362 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
365 // link block's items to free list
367 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
369 pLastItem->m_pNextFree = pHead;
370 // pHead is changed by compare_exchange_weak
371 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
375 unsigned int current_epoch() const
377 return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
379 unsigned int next_epoch() const
381 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
388 : m_pBlockListHead( nullptr )
390 , m_pGlobalFreeHead( nullptr )
392 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
393 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
401 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
403 m_BlockAllocator.Delete( pBlock );
407 /// Increments current epoch
410 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
415 /// Allocates new retired pointer
416 retired_ptr_node& alloc()
421 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
424 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
429 item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
431 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
436 // Epoch free list is empty
437 // Alloc from global free list
439 pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
445 // pItem is changed by compare_exchange_weak
446 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
449 CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
453 /// Allocates and initializes new retired pointer
454 retired_ptr_node& alloc( const retired_ptr& p )
456 retired_ptr_node& node = alloc();
461 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
463 The list is linked on the m_pNextFree field
465 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
467 assert( pHead != nullptr );
468 assert( pTail != nullptr );
473 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
474 pTail->m_pNextFree = pCurHead;
475 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
479 /// Uninitialized guard
482 friend class ThreadGC;
484 details::guard_data * m_pGuard ; ///< Pointer to guard data
486 /// Initialize empty guard.
488 : m_pGuard( nullptr )
491 /// The object is not copy-constructible
492 guard( guard const& ) = delete;
494 /// Object destructor, does nothing
498 /// Guards pointer \p p
501 assert( m_pGuard != nullptr );
502 m_pGuard->pPost.store( p, atomics::memory_order_release );
503 //CDS_COMPILER_RW_BARRIER;
509 assert( m_pGuard != nullptr );
510 m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
511 CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
514 /// Guards pointer \p p
515 template <typename T>
516 T * operator =( T * p )
518 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
523 std::nullptr_t operator=(std::nullptr_t)
530 public: // for ThreadGC.
532 GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
533 the compiler produces error:
\91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard
\92 is protected
534 despite the fact that ThreadGC is declared as friend for guard class.
535 We should not like to declare m_pGuard member as public one.
536 Therefore, we have to add set_guard/get_guard public functions
539 void set_guard( details::guard_data * pGuard )
541 assert( m_pGuard == nullptr );
545 /// Get current guard data
546 details::guard_data * get_guard()
550 /// Get current guard data
551 details::guard_data * get_guard() const
557 } // namespace details
561 This class represents auto guard: ctor allocates a guard from guard pool,
562 dtor returns the guard back to the pool of free guard.
564 class Guard: public details::guard
567 typedef details::guard base_class;
568 friend class ThreadGC;
571 ThreadGC& m_gc ; ///< ThreadGC object of current thread
573 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
576 /// Returns guard allocated back to pool of free guards
577 ~Guard(); // inline after GarbageCollector
579 /// Returns DHP GC object
585 /// Guards pointer \p p
586 template <typename T>
587 T * operator =( T * p )
589 return base_class::operator =<T>( p );
593 std::nullptr_t operator=(std::nullptr_t)
595 return base_class::operator =(nullptr);
602 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
603 dtor returns the guards allocated back to the pool.
605 template <size_t Count>
608 details::guard m_arr[Count] ; ///< array of guard
609 ThreadGC& m_gc ; ///< ThreadGC object of current thread
610 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
613 /// Rebind array for other size \p OtherCount
614 template <size_t OtherCount>
616 typedef GuardArray<OtherCount> other ; ///< rebinding result
620 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
621 GuardArray( ThreadGC& gc ) ; // inline below
623 /// The object is not default-constructible
624 GuardArray() = delete;
626 /// The object is not copy-constructible
627 GuardArray( GuardArray const& ) = delete;
629 /// Returns guards allocated back to pool
630 ~GuardArray() ; // inline below
632 /// Returns the capacity of array
633 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
638 /// Returns DHP ThreadGC object
639 ThreadGC& getGC() CDS_NOEXCEPT
644 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
645 details::guard& operator []( size_t nIndex )
647 assert( nIndex < capacity() );
648 return m_arr[nIndex];
651 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
652 const details::guard& operator []( size_t nIndex ) const
654 assert( nIndex < capacity() );
655 return m_arr[nIndex];
658 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
659 template <typename T>
660 void set( size_t nIndex, T * p )
662 assert( nIndex < capacity() );
663 m_arr[nIndex].set( p );
666 /// Clears (sets to \p nullptr) the guard \p nIndex
667 void clear( size_t nIndex )
669 assert( nIndex < capacity() );
670 m_arr[nIndex].clear();
673 /// Clears all guards in the array
676 for ( size_t i = 0; i < capacity(); ++i )
681 /// Memory manager (Garbage collector)
682 class CDS_EXPORT_API GarbageCollector
686 friend class ThreadGC;
688 /// Internal GC statistics
691 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
692 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
696 , m_nFreeGuardCount(0)
702 /// Exception "No GarbageCollector object is created"
703 CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
705 /// Internal GC statistics
708 size_t m_nGuardCount ; ///< Total guard count
709 size_t m_nFreeGuardCount ; ///< Count of free guard
714 , m_nFreeGuardCount(0)
717 InternalState& operator =( internal_stat const& s )
719 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
720 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
728 static GarbageCollector * m_pManager ; ///< GC global instance
730 details::guard_allocator<> m_GuardPool ; ///< Guard pool
731 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
732 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
733 //atomics::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
735 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
736 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
738 internal_stat m_stat ; ///< Internal statistics
739 bool m_bStatEnabled ; ///< Internal Statistics enabled
742 /// Initializes DHP memory manager singleton
744 This member function creates and initializes DHP global object.
745 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
746 this member function is called in the \p main() function. See cds::gc::dhp for example.
747 After calling of this function you may use CDS data structures based on cds::gc::DHP.
750 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
751 the \ref dhp_gc_liberate "liberate" member function would be called for freeing retired pointers.
752 If \p nLiberateThreshold <= 1, \p liberate would called after each \ref dhp_gc_retirePtr "retirePtr" call.
753 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
754 is initialized the GC allocates local guard pool for the thread from common guard pool.
755 By perforce the local thread's guard pool is grown automatically from common pool.
756 When the thread terminated its guard pool is backed to common GC's pool.
759 static void CDS_STDCALL Construct(
760 size_t nLiberateThreshold = 1024
761 , size_t nInitialThreadGuardCount = 8
764 /// Destroys DHP memory manager
766 The member function destroys DHP global object. After calling of this function you may \b NOT
767 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
768 at the end of your \p main(). See cds::gc::dhp for example.
770 static void CDS_STDCALL Destruct();
772 /// Returns pointer to GarbageCollector instance
774 If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
776 static GarbageCollector& instance()
778 if ( m_pManager == nullptr )
779 throw DHPManagerEmpty();
783 /// Checks if global GC object is constructed and may be used
784 static bool isUsed() CDS_NOEXCEPT
786 return m_pManager != nullptr;
791 /// Internal interface
793 /// Allocates a guard
794 details::guard_data * allocGuard()
796 return m_GuardPool.alloc();
799 /// Frees guard \p g for reusing in future
800 void freeGuard(details::guard_data * pGuard )
802 m_GuardPool.free( pGuard );
805 /// Allocates guard list for a thread.
806 details::guard_data * allocGuardList( size_t nCount )
808 return m_GuardPool.allocList( nCount );
811 /// Frees thread's guard list pointed by \p pList
812 void freeGuardList( details::guard_data * pList )
814 m_GuardPool.freeList( pList );
817 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
818 /**@anchor dhp_gc_retirePtr
820 template <typename T>
821 void retirePtr( T * p, void (* pFunc)(T *) )
823 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
826 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
827 void retirePtr( retired_ptr const& p )
829 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
834 /// Liberate function
835 /** @anchor dhp_gc_liberate
836 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
837 trapped by any guard.
846 void liberate( details::liberate_set& set );
851 /// Get internal statistics
852 InternalState& getInternalState(InternalState& stat) const
854 return stat = m_stat;
857 /// Checks if internal statistics enabled
858 bool isStatisticsEnabled() const
860 return m_bStatEnabled;
863 /// Enables/disables internal statistics
864 bool enableStatistics( bool bEnable )
866 bool bEnabled = m_bStatEnabled;
867 m_bStatEnabled = bEnable;
873 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
880 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
881 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
882 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
883 \ref cds_threading "cds::threading::Manager::detachThread()".
885 The ThreadGC object maintains two list:
886 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
887 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
888 Free guard list is a subset of thread guard list.
892 GarbageCollector& m_gc ; ///< reference to GC singleton
893 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
894 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
897 /// Default constructor
899 : m_gc( GarbageCollector::instance() )
904 /// The object is not copy-constructible
905 ThreadGC( ThreadGC const& ) = delete;
907 /// Dtor calls fini()
913 /// Initialization. Repeat call is available
918 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
922 /// Finalization. Repeat call is available
926 m_gc.freeGuardList( m_pList );
933 /// Initializes guard \p g
934 void allocGuard( Guard& g )
936 assert( m_pList != nullptr );
938 g.m_pGuard = m_pFree;
939 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
942 g.m_pGuard = m_gc.allocGuard();
943 g.m_pGuard->pThreadNext = m_pList;
944 m_pList = g.m_pGuard;
949 void freeGuard( Guard& g )
951 assert( m_pList != nullptr );
952 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
953 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
954 m_pFree = g.m_pGuard;
957 /// Initializes guard array \p arr
958 template <size_t Count>
959 void allocGuard( GuardArray<Count>& arr )
961 assert( m_pList != nullptr );
964 while ( m_pFree && nCount < Count ) {
965 arr[nCount].set_guard( m_pFree );
966 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
970 while ( nCount < Count ) {
971 details::guard& g = arr[nCount++];
972 g.set_guard( m_gc.allocGuard() );
973 g.get_guard()->pThreadNext = m_pList;
974 m_pList = g.get_guard();
978 /// Frees guard array \p arr
979 template <size_t Count>
980 void freeGuard( GuardArray<Count>& arr )
982 assert( m_pList != nullptr );
984 details::guard_data * pGuard;
985 for ( size_t i = 0; i < Count - 1; ++i ) {
986 pGuard = arr[i].get_guard();
987 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
988 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
990 pGuard = arr[Count-1].get_guard();
991 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
992 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
993 m_pFree = arr[0].get_guard();
996 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
997 template <typename T>
998 void retirePtr( T * p, void (* pFunc)(T *) )
1000 m_gc.retirePtr( p, pFunc );
1012 //////////////////////////////////////////////////////////
1015 inline Guard::Guard(ThreadGC& gc)
1018 getGC().allocGuard( *this );
1020 inline Guard::~Guard()
1022 getGC().freeGuard( *this );
1025 template <size_t Count>
1026 inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1029 getGC().allocGuard( *this );
1031 template <size_t Count>
1032 inline GuardArray<Count>::~GuardArray()
1034 getGC().freeGuard( *this );
1038 }} // namespace cds::gc
1040 #if CDS_COMPILER == CDS_COMPILER_MSVC
1041 # pragma warning(pop)
1045 #endif // #ifndef __CDS_GC_DHP_DHP_H