3 #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
4 #define __CDS_GC_PTB_PASS_THE_BUCK_H
6 #include <mutex> // unique_lock
7 #include <cds/cxx11_atomic.h>
8 #include <cds/gc/details/retired_ptr.h>
9 #include <cds/details/aligned_allocator.h>
10 #include <cds/details/allocator.h>
11 #include <cds/details/noncopyable.h>
12 #include <cds/lock/spinlock.h>
14 #if CDS_COMPILER == CDS_COMPILER_MSVC
15 # pragma warning(push)
16 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
19 namespace cds { namespace gc {
21 /// Pass The Buck reclamation schema
24 - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
25 dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
26 - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
27 Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
28 - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
29 for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
32 The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
33 Use cds::gc::PTB class in your code.
35 Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
36 GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
37 by contructing cds::gc::PTB object in beginning of your main().
38 See cds::gc::PTB class for explanation.
40 \par Implementation issues
41 The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
42 It seems that solution should not introduce significant performance bottleneck, because each thread has own set
43 of guards allocated from global list of free guards and access to global list is occurred only when
44 all thread's guard is busy. In this case the thread allocates next block of guards from global list.
45 Guards allocated for the thread is push back to the global list only when the thread terminates.
49 // Forward declarations
51 template <size_t Count> class GuardArray;
53 class GarbageCollector;
55 /// Retired pointer type
56 typedef cds::gc::details::retired_ptr retired_ptr;
58 using cds::gc::details::free_retired_ptr_func;
60 /// Details of Pass the Buck algorithm
63 // Forward declaration
66 /// Retired pointer buffer node
67 struct retired_ptr_node {
68 retired_ptr m_ptr ; ///< retired pointer
69 retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
70 retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
73 /// Internal guard representation
75 typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
76 typedef void * guarded_ptr ; ///< type of value guarded
78 atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
81 typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
82 handoff_spin spinHandOff ; ///< access to \p pHandOff field
83 handoff_ptr pHandOff ; ///< trapped pointer
86 atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
87 atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
89 guard_data * pThreadNext ; ///< next item of thread's local list of guards
97 , pGlobalNext( nullptr )
98 , pNextFree( nullptr )
99 , pThreadNext( nullptr )
104 pPost.store( nullptr, atomics::memory_order_relaxed );
108 /// Checks if the guard is free, that is, it does not contain any pointer guarded
111 return pPost.load( atomics::memory_order_acquire ) == nullptr;
116 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
117 class guard_allocator
119 cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
121 atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
122 atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
123 SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
126 Unfortunately, access to the list of free guard is lock-based.
127 Lock-free manipulations with guard free-list are ABA-prone.
128 TODO: working with m_FreeGuardList in lock-free manner.
132 /// Allocates new guard from the heap. The function uses aligned allocator
133 guard_data * allocNew()
135 //TODO: the allocator should make block allocation
137 details::guard_data * pGuard = m_GuardAllocator.New();
139 // Link guard to the list
140 // m_GuardList is accumulated list and it cannot support concurrent deletion,
141 // so, ABA problem is impossible for it
142 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
144 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
145 // pHead is changed by compare_exchange_weak
146 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
155 : m_GuardList( nullptr )
156 , m_FreeGuardList( nullptr )
163 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
164 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
165 m_GuardAllocator.Delete( pData );
169 /// Allocates a guard from free list or from heap if free list is empty
172 // Try to pop a guard from free-list
173 details::guard_data * pGuard;
176 std::unique_lock<SpinLock> al( m_freeListLock );
177 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
179 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
188 /// Frees guard \p pGuard
190 The function places the guard \p pGuard into free-list
192 void free( guard_data * pGuard )
194 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
196 std::unique_lock<SpinLock> al( m_freeListLock );
197 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
198 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
201 /// Allocates list of guard
203 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
205 cds::gc::ptb::ThreadGC supporting method
207 guard_data * allocList( size_t nCount )
209 assert( nCount != 0 );
217 // The guard list allocated is private for the thread,
218 // so, we can use relaxed memory order
220 guard_data * p = alloc();
221 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
225 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
230 /// Frees list of guards
232 The list \p pList is linked by guard's \p pThreadNext field.
234 cds::gc::ptb::ThreadGC supporting method
236 void freeList( guard_data * pList )
238 assert( pList != nullptr );
240 guard_data * pLast = pList;
241 while ( pLast->pThreadNext ) {
242 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
244 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
248 std::unique_lock<SpinLock> al( m_freeListLock );
249 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
250 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
253 /// Returns the list's head of guards allocated
256 return m_GuardList.load(atomics::memory_order_acquire);
260 /// Retired pointer buffer
262 The buffer of retired nodes ready for liberating.
263 When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
266 class retired_ptr_buffer
268 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
269 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
278 ~retired_ptr_buffer()
280 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
284 /// Pushes new node into the buffer. Returns current buffer size
285 size_t push( retired_ptr_node& node )
287 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
289 node.m_pNext = pHead;
290 // pHead is changed by compare_exchange_weak
291 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
293 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
296 /// Result of \ref ptb_gc_privatve "privatize" function.
298 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
300 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
302 /// Gets current list of retired pointer and clears the list
303 /**@anchor ptb_gc_privatve
305 privatize_result privatize()
307 privatize_result res;
308 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
310 // Item counter is needed only as a threshold for liberate function
311 // So, we may clear the item counter without synchronization with m_pHead
312 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
316 /// Returns current size of buffer (approximate)
319 return m_nItemCount.load(atomics::memory_order_relaxed);
323 /// Pool of retired pointers
325 The class acts as an allocator of retired node.
326 Retired pointers are linked in the lock-free list.
328 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
329 class retired_ptr_pool {
331 typedef retired_ptr_node item;
333 /// Count of items in block
334 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
338 block * pNext ; ///< next block
339 item items[m_nItemPerBlock] ; ///< item array
342 atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
344 // To solve ABA problem we use epoch-based approach
345 static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
346 atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
347 atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
348 atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
350 cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
356 // allocate new block
357 block * pNew = m_BlockAllocator.New();
359 // link items within the block
360 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
361 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
362 pItem->m_pNextFree = pItem + 1;
363 CDS_STRICT_DO( pItem->m_pNext = nullptr );
366 // link new block to block list
368 block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
371 // pHead is changed by compare_exchange_weak
372 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
375 // link block's items to free list
377 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
379 pLastItem->m_pNextFree = pHead;
380 // pHead is changed by compare_exchange_weak
381 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
385 unsigned int current_epoch() const
387 return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
389 unsigned int next_epoch() const
391 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
398 : m_pBlockListHead( nullptr )
400 , m_pGlobalFreeHead( nullptr )
402 for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
403 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
411 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
413 m_BlockAllocator.Delete( pBlock );
417 /// Increments current epoch
420 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
425 /// Allocates new retired pointer
426 retired_ptr_node& alloc()
431 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
434 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
439 item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
441 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
446 // Epoch free list is empty
447 // Alloc from global free list
449 pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
455 // pItem is changed by compare_exchange_weak
456 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
459 CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
463 /// Allocates and initializes new retired pointer
464 retired_ptr_node& alloc( const retired_ptr& p )
466 retired_ptr_node& node = alloc();
471 /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
473 The list is linked on the m_pNextFree field
475 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
477 assert( pHead != nullptr );
478 assert( pTail != nullptr );
483 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
484 pTail->m_pNextFree = pCurHead;
485 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
489 /// Uninitialized guard
490 class guard: public cds::details::noncopyable
492 friend class ThreadGC;
494 details::guard_data * m_pGuard ; ///< Pointer to guard data
496 /// Initialize empty guard.
498 : m_pGuard( nullptr )
501 /// Object destructor, does nothing
505 /// Guards pointer \p p
508 assert( m_pGuard != nullptr );
509 m_pGuard->pPost.store( p, atomics::memory_order_release );
510 //CDS_COMPILER_RW_BARRIER;
516 assert( m_pGuard != nullptr );
517 m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
518 CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
521 /// Guards pointer \p p
522 template <typename T>
523 T * operator =( T * p )
525 set( reinterpret_cast<void *>( const_cast<T *>(p) ));
530 std::nullptr_t operator=(std::nullptr_t)
537 public: // for ThreadGC.
539 GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
540 the compiler produces error:
\91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard
\92 is protected
541 despite the fact that ThreadGC is declared as friend for guard class.
542 We should not like to declare m_pGuard member as public one.
543 Therefore, we have to add set_guard/get_guard public functions
546 void set_guard( details::guard_data * pGuard )
548 assert( m_pGuard == nullptr );
552 /// Get current guard data
553 details::guard_data * get_guard()
557 /// Get current guard data
558 details::guard_data * get_guard() const
564 } // namespace details
568 This class represents auto guard: ctor allocates a guard from guard pool,
569 dtor returns the guard back to the pool of free guard.
571 class Guard: public details::guard
574 typedef details::guard base_class;
575 friend class ThreadGC;
578 ThreadGC& m_gc ; ///< ThreadGC object of current thread
580 /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
583 /// Returns guard allocated back to pool of free guards
584 ~Guard(); // inline after GarbageCollector
586 /// Returns PTB GC object
592 /// Guards pointer \p p
593 template <typename T>
594 T * operator =( T * p )
596 return base_class::operator =<T>( p );
600 std::nullptr_t operator=(std::nullptr_t)
602 return base_class::operator =(nullptr);
609 This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
610 dtor returns the guards allocated back to the pool.
612 template <size_t Count>
613 class GuardArray: public cds::details::noncopyable
615 details::guard m_arr[Count] ; ///< array of guard
616 ThreadGC& m_gc ; ///< ThreadGC object of current thread
617 const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
620 /// Rebind array for other size \p OtherCount
621 template <size_t OtherCount>
623 typedef GuardArray<OtherCount> other ; ///< rebinding result
627 /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
628 GuardArray( ThreadGC& gc ) ; // inline below
630 /// Returns guards allocated back to pool
631 ~GuardArray() ; // inline below
633 /// Returns the capacity of array
634 CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
639 /// Returns PTB ThreadGC object
640 ThreadGC& getGC() CDS_NOEXCEPT
645 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
646 details::guard& operator []( size_t nIndex )
648 assert( nIndex < capacity() );
649 return m_arr[nIndex];
652 /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
653 const details::guard& operator []( size_t nIndex ) const
655 assert( nIndex < capacity() );
656 return m_arr[nIndex];
659 /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
660 template <typename T>
661 void set( size_t nIndex, T * p )
663 assert( nIndex < capacity() );
664 m_arr[nIndex].set( p );
667 /// Clears (sets to \p nullptr) the guard \p nIndex
668 void clear( size_t nIndex )
670 assert( nIndex < capacity() );
671 m_arr[nIndex].clear();
674 /// Clears all guards in the array
677 for ( size_t i = 0; i < capacity(); ++i )
682 /// Memory manager (Garbage collector)
683 class CDS_EXPORT_API GarbageCollector
687 friend class ThreadGC;
689 /// Internal GC statistics
692 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
693 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
697 , m_nFreeGuardCount(0)
703 /// Exception "No GarbageCollector object is created"
704 CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
706 /// Internal GC statistics
709 size_t m_nGuardCount ; ///< Total guard count
710 size_t m_nFreeGuardCount ; ///< Count of free guard
715 , m_nFreeGuardCount(0)
718 InternalState& operator =( internal_stat const& s )
720 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
721 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
729 static GarbageCollector * m_pManager ; ///< GC global instance
731 details::guard_allocator<> m_GuardPool ; ///< Guard pool
732 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
733 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
734 //atomics::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
736 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
737 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
739 internal_stat m_stat ; ///< Internal statistics
740 bool m_bStatEnabled ; ///< Internal Statistics enabled
743 /// Initializes PTB memory manager singleton
745 This member function creates and initializes PTB global object.
746 The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
747 this member function is called in the \p main() function. See cds::gc::ptb for example.
748 After calling of this function you may use CDS data structures based on cds::gc::PTB.
751 \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
752 the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
753 If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
754 \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
755 is initialized the GC allocates local guard pool for the thread from common guard pool.
756 By perforce the local thread's guard pool is grown automatically from common pool.
757 When the thread terminated its guard pool is backed to common GC's pool.
760 static void CDS_STDCALL Construct(
761 size_t nLiberateThreshold = 1024
762 , size_t nInitialThreadGuardCount = 8
765 /// Destroys PTB memory manager
767 The member function destroys PTB global object. After calling of this function you may \b NOT
768 use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
769 at the end of your \p main(). See cds::gc::ptb for example.
771 static void CDS_STDCALL Destruct();
773 /// Returns pointer to GarbageCollector instance
775 If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
777 static GarbageCollector& instance()
779 if ( m_pManager == nullptr )
780 throw PTBManagerEmpty();
784 /// Checks if global GC object is constructed and may be used
785 static bool isUsed() CDS_NOEXCEPT
787 return m_pManager != nullptr;
792 /// Internal interface
794 /// Allocates a guard
795 details::guard_data * allocGuard()
797 return m_GuardPool.alloc();
800 /// Frees guard \p g for reusing in future
801 void freeGuard(details::guard_data * pGuard )
803 m_GuardPool.free( pGuard );
806 /// Allocates guard list for a thread.
807 details::guard_data * allocGuardList( size_t nCount )
809 return m_GuardPool.allocList( nCount );
812 /// Frees thread's guard list pointed by \p pList
813 void freeGuardList( details::guard_data * pList )
815 m_GuardPool.freeList( pList );
818 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
819 /**@anchor ptb_gc_retirePtr
821 template <typename T>
822 void retirePtr( T * p, void (* pFunc)(T *) )
824 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
827 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
828 void retirePtr( retired_ptr const& p )
830 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
835 /// Liberate function
836 /** @anchor ptb_gc_liberate
837 The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
838 trapped by any guard.
847 void liberate( details::liberate_set& set );
852 /// Get internal statistics
853 InternalState& getInternalState(InternalState& stat) const
855 return stat = m_stat;
858 /// Checks if internal statistics enabled
859 bool isStatisticsEnabled() const
861 return m_bStatEnabled;
864 /// Enables/disables internal statistics
865 bool enableStatistics( bool bEnable )
867 bool bEnabled = m_bStatEnabled;
868 m_bStatEnabled = bEnable;
874 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
881 To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
882 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
883 on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
884 \ref cds_threading "cds::threading::Manager::detachThread()".
886 The ThreadGC object maintains two list:
887 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
888 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
889 Free guard list is a subset of thread guard list.
891 class ThreadGC: public cds::details::noncopyable
893 GarbageCollector& m_gc ; ///< reference to GC singleton
894 details::guard_data * m_pList ; ///< Local list of guards owned by the thread
895 details::guard_data * m_pFree ; ///< The list of free guard from m_pList
899 : m_gc( GarbageCollector::instance() )
904 /// Dtor calls fini()
910 /// Initialization. Repeat call is available
915 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
919 /// Finalization. Repeat call is available
923 m_gc.freeGuardList( m_pList );
930 /// Initializes guard \p g
931 void allocGuard( Guard& g )
933 assert( m_pList != nullptr );
935 g.m_pGuard = m_pFree;
936 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
939 g.m_pGuard = m_gc.allocGuard();
940 g.m_pGuard->pThreadNext = m_pList;
941 m_pList = g.m_pGuard;
946 void freeGuard( Guard& g )
948 assert( m_pList != nullptr );
949 g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
950 g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
951 m_pFree = g.m_pGuard;
954 /// Initializes guard array \p arr
955 template <size_t Count>
956 void allocGuard( GuardArray<Count>& arr )
958 assert( m_pList != nullptr );
961 while ( m_pFree && nCount < Count ) {
962 arr[nCount].set_guard( m_pFree );
963 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
967 while ( nCount < Count ) {
968 details::guard& g = arr[nCount++];
969 g.set_guard( m_gc.allocGuard() );
970 g.get_guard()->pThreadNext = m_pList;
971 m_pList = g.get_guard();
975 /// Frees guard array \p arr
976 template <size_t Count>
977 void freeGuard( GuardArray<Count>& arr )
979 assert( m_pList != nullptr );
981 details::guard_data * pGuard;
982 for ( size_t i = 0; i < Count - 1; ++i ) {
983 pGuard = arr[i].get_guard();
984 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
985 pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
987 pGuard = arr[Count-1].get_guard();
988 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
989 pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
990 m_pFree = arr[0].get_guard();
993 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
994 template <typename T>
995 void retirePtr( T * p, void (* pFunc)(T *) )
997 m_gc.retirePtr( p, pFunc );
1009 //////////////////////////////////////////////////////////
1012 inline Guard::Guard(ThreadGC& gc)
1015 getGC().allocGuard( *this );
1017 inline Guard::~Guard()
1019 getGC().freeGuard( *this );
1022 template <size_t Count>
1023 inline GuardArray<Count>::GuardArray( ThreadGC& gc )
1026 getGC().allocGuard( *this );
1028 template <size_t Count>
1029 inline GuardArray<Count>::~GuardArray()
1031 getGC().freeGuard( *this );
1035 }} // namespace cds::gc
1037 #if CDS_COMPILER == CDS_COMPILER_MSVC
1038 # pragma warning(pop)
1042 #endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H