2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_GC_DETAILS_DHP_H
32 #define CDSLIB_GC_DETAILS_DHP_H
34 #include <mutex> // unique_lock
35 #include <cds/algo/atomic.h>
36 #include <cds/algo/int_algo.h>
37 #include <cds/gc/details/retired_ptr.h>
38 #include <cds/details/aligned_allocator.h>
39 #include <cds/details/allocator.h>
40 #include <cds/sync/spinlock.h>
42 #if CDS_COMPILER == CDS_COMPILER_MSVC
43 # pragma warning(push)
44 # pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
48 namespace cds { namespace gc {
50 /// Dynamic Hazard Pointer reclamation schema
52 The cds::gc::dhp namespace and its members are internal representation of the GC and should not be used directly.
53 Use cds::gc::DHP class in your code.
55 Dynamic Hazard Pointer (DHP) garbage collector is a singleton. The main user-level part of DHP schema is
56 GC class and its nested classes. Before use any DHP-related class you must initialize DHP garbage collector
57 by contructing cds::gc::DHP object in beginning of your main().
58 See cds::gc::DHP class for explanation.
60 \par Implementation issues
61 The global list of free guards (\p cds::gc::dhp::details::guard_allocator) is protected by a spin-lock (i.e. serialized).
62 It seems that this solution should not introduce significant performance bottleneck, because each thread has its own set
63 of guards allocated from the global list of free guards and the access to the global list is occurred only when
64 all thread's guard is busy. In this case the thread allocates a next block of guards from the global list.
65 Guards allocated for the thread is push back to the global list only when the thread terminates.
69 // Forward declarations
71 template <size_t Count> class GuardArray;
73 class GarbageCollector;
75 /// Retired pointer type
76 typedef cds::gc::details::retired_ptr retired_ptr;
78 using cds::gc::details::free_retired_ptr_func;
80 /// Details of Dynamic Hazard Pointer algorithm
83 // Forward declaration
86 /// Retired pointer buffer node
87 struct retired_ptr_node {
88 retired_ptr m_ptr ; ///< retired pointer
89 atomics::atomic<retired_ptr_node *> m_pNext ; ///< next retired pointer in buffer
90 atomics::atomic<retired_ptr_node *> m_pNextFree ; ///< next item in free list of \p retired_ptr_node
93 /// Internal guard representation
95 typedef void * guarded_ptr; ///< type of value guarded
97 atomics::atomic<guarded_ptr> pPost; ///< pointer guarded
98 atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
99 atomics::atomic<guard_data *> pNextFree; ///< pointer to the next item in global or thread-local free-list
101 guard_data * pThreadNext; ///< next item of thread's local list of guards
103 guard_data() CDS_NOEXCEPT
105 , pGlobalNext( nullptr )
106 , pNextFree( nullptr )
107 , pThreadNext( nullptr )
110 void init() CDS_NOEXCEPT
112 pPost.store( nullptr, atomics::memory_order_relaxed );
115 /// Checks if the guard is free, that is, it does not contain any pointer guarded
116 bool isFree() const CDS_NOEXCEPT
118 return pPost.load( atomics::memory_order_acquire ) == nullptr;
121 guarded_ptr get( atomics::memory_order order = atomics::memory_order_acquire )
123 return pPost.load( order );
126 void set( guarded_ptr p, atomics::memory_order order = atomics::memory_order_release )
128 pPost.store( p, order );
133 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
134 class guard_allocator
136 cds::details::Allocator<details::guard_data> m_GuardAllocator; ///< guard allocator
138 atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
139 atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
140 cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
143 Unfortunately, access to the list of free guard is lock-based.
144 Lock-free manipulations with guard free-list are ABA-prone.
145 TODO: working with m_FreeGuardList in lock-free manner.
149 /// Allocates new guard from the heap. The function uses aligned allocator
150 guard_data * allocNew()
152 //TODO: the allocator should make block allocation
154 details::guard_data * pGuard = m_GuardAllocator.New();
156 // Link guard to the list
157 // m_GuardList is an accumulating list and it cannot support concurrent deletion,
158 // so, ABA problem is impossible for it
159 details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
161 pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
162 // pHead is changed by compare_exchange_weak
163 } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
171 guard_allocator() CDS_NOEXCEPT
172 : m_GuardList( nullptr )
173 , m_FreeGuardList( nullptr )
180 for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
181 pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
182 m_GuardAllocator.Delete( pData );
186 /// Allocates a guard from free list or from heap if free list is empty
189 // Try to pop a guard from free-list
190 details::guard_data * pGuard;
193 std::unique_lock<cds::sync::spin> al( m_freeListLock );
194 pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
196 m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
205 /// Frees guard \p pGuard
207 The function places the guard \p pGuard into free-list
209 void free( guard_data* pGuard ) CDS_NOEXCEPT
211 pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
213 std::unique_lock<cds::sync::spin> al( m_freeListLock );
214 pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
215 m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
218 /// Allocates list of guard
220 The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
222 cds::gc::dhp::ThreadGC supporting method
224 guard_data * allocList( size_t nCount )
226 assert( nCount != 0 );
234 // The guard list allocated is private for the thread,
235 // so, we can use relaxed memory order
237 guard_data * p = alloc();
238 pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
242 pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
247 /// Frees list of guards
249 The list \p pList is linked by guard's \p pThreadNext field.
251 cds::gc::dhp::ThreadGC supporting method
253 void freeList( guard_data * pList ) CDS_NOEXCEPT
255 assert( pList != nullptr );
257 guard_data * pLast = pList;
258 while ( pLast->pThreadNext ) {
259 pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
261 pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
265 std::unique_lock<cds::sync::spin> al( m_freeListLock );
266 pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
267 m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
270 /// Returns the list's head of guards allocated
271 guard_data * begin() CDS_NOEXCEPT
273 return m_GuardList.load(atomics::memory_order_acquire);
277 /// Retired pointer buffer
279 The buffer of retired nodes ready for liberating.
280 When size of buffer exceeds a threshold the GC calls \p scan() procedure to free
283 class retired_ptr_buffer
285 atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
286 atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
289 retired_ptr_buffer() CDS_NOEXCEPT
294 ~retired_ptr_buffer() CDS_NOEXCEPT
296 assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
299 /// Pushes new node into the buffer. Returns current buffer size
300 size_t push( retired_ptr_node& node ) CDS_NOEXCEPT
302 retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
304 node.m_pNext.store( pHead, atomics::memory_order_relaxed );
305 // pHead is changed by compare_exchange_weak
306 } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
308 return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
311 /// Pushes [pFirst, pLast] list linked by pNext field.
312 size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
317 retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
319 pLast->m_pNext.store( pHead, atomics::memory_order_relaxed );
320 // pHead is changed by compare_exchange_weak
321 } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
323 return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
326 /// Result of \ref dhp_gc_privatize "privatize" function.
328 The \p privatize function returns retired node list as \p first and the size of that list as \p second.
330 typedef std::pair<retired_ptr_node *, size_t> privatize_result;
332 /// Gets current list of retired pointer and clears the list
333 /**@anchor dhp_gc_privatize
335 privatize_result privatize() CDS_NOEXCEPT
337 privatize_result res;
339 // Item counter is needed only as a threshold for \p scan() function
340 // So, we may clear the item counter without synchronization with m_pHead
341 res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
342 res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
346 /// Returns current size of buffer (approximate)
347 size_t size() const CDS_NOEXCEPT
349 return m_nItemCount.load(atomics::memory_order_relaxed);
353 /// Pool of retired pointers
355 The class acts as an allocator of retired node.
356 Retired pointers are linked in the lock-free list.
358 template <class Alloc = CDS_DEFAULT_ALLOCATOR>
359 class retired_ptr_pool {
361 typedef retired_ptr_node item;
363 /// Count of items in block
364 static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
368 atomics::atomic<block *> pNext; ///< next block
369 item items[m_nItemPerBlock]; ///< item array
372 atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
374 // To solve ABA problem we use epoch-based approach
375 unsigned int const m_nEpochBitmask; ///< Epoch bitmask (log2( m_nEpochCount))
376 atomics::atomic<unsigned int> m_nCurEpoch; ///< Current epoch
377 atomics::atomic<item *>* m_pEpochFree; ///< List of free item per epoch
378 atomics::atomic<item *> m_pGlobalFreeHead; ///< Head of unallocated item list
380 typedef cds::details::Allocator< block, Alloc > block_allocator;
381 typedef cds::details::Allocator< atomics::atomic<item *>, Alloc > epoch_array_alloc;
386 // allocate new block
387 block * pNew = block_allocator().New();
389 // link items within the block
390 item * pLastItem = pNew->items + m_nItemPerBlock - 1;
391 for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
392 pItem->m_pNextFree.store( pItem + 1, atomics::memory_order_release );
393 CDS_STRICT_DO( pItem->m_pNext.store( nullptr, atomics::memory_order_relaxed ));
396 // links new block to the block list
398 block * pHead = m_pBlockListHead.load(atomics::memory_order_relaxed);
400 pNew->pNext.store( pHead, atomics::memory_order_relaxed );
401 // pHead is changed by compare_exchange_weak
402 } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_relaxed, atomics::memory_order_relaxed ));
405 // links block's items to the free list
407 item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_relaxed);
409 pLastItem->m_pNextFree.store( pHead, atomics::memory_order_release );
410 // pHead is changed by compare_exchange_weak
411 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
415 unsigned int current_epoch() const CDS_NOEXCEPT
417 return m_nCurEpoch.load(atomics::memory_order_acquire) & m_nEpochBitmask;
420 unsigned int next_epoch() const CDS_NOEXCEPT
422 return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & m_nEpochBitmask;
426 retired_ptr_pool( unsigned int nEpochCount = 8 )
427 : m_pBlockListHead( nullptr )
428 , m_nEpochBitmask( static_cast<unsigned int>(beans::ceil2(nEpochCount)) - 1 )
430 , m_pEpochFree( epoch_array_alloc().NewArray( m_nEpochBitmask + 1))
431 , m_pGlobalFreeHead( nullptr )
435 for (unsigned int i = 0; i <= m_nEpochBitmask; ++i )
436 m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
445 for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
446 p = pBlock->pNext.load( atomics::memory_order_relaxed );
450 epoch_array_alloc().Delete( m_pEpochFree, m_nEpochBitmask + 1 );
453 /// Increments current epoch
454 void inc_epoch() CDS_NOEXCEPT
456 m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
459 /// Allocates the new retired pointer
460 retired_ptr_node& alloc()
465 pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
468 if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem,
469 pItem->m_pNextFree.load(atomics::memory_order_acquire),
470 atomics::memory_order_acquire, atomics::memory_order_relaxed ))
476 // Epoch free list is empty
477 // Alloc from global free list
479 pItem = m_pGlobalFreeHead.load( atomics::memory_order_relaxed );
485 // pItem is changed by compare_exchange_weak
486 } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem,
487 pItem->m_pNextFree.load(atomics::memory_order_acquire),
488 atomics::memory_order_acquire, atomics::memory_order_relaxed ));
491 CDS_STRICT_DO( pItem->m_pNextFree.store( nullptr, atomics::memory_order_relaxed ));
495 /// Allocates and initializes new retired pointer
496 retired_ptr_node& alloc( const retired_ptr& p )
498 retired_ptr_node& node = alloc();
503 /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
505 The list is linked on the m_pNextFree field
507 void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail ) CDS_NOEXCEPT
509 assert( pHead != nullptr );
510 assert( pTail != nullptr );
515 pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
516 pTail->m_pNextFree.store( pCurHead, atomics::memory_order_release );
517 } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
520 } // namespace details
522 /// Memory manager (Garbage collector)
523 class CDS_EXPORT_API GarbageCollector
526 friend class ThreadGC;
528 /// Internal GC statistics
531 atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
532 atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
536 , m_nFreeGuardCount(0)
541 /// Exception "No GarbageCollector object is created"
542 class not_initialized : public std::runtime_error
547 : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
552 /// Internal GC statistics
555 size_t m_nGuardCount ; ///< Total guard count
556 size_t m_nFreeGuardCount ; ///< Count of free guard
561 , m_nFreeGuardCount(0)
564 InternalState& operator =( internal_stat const& s )
566 m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
567 m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
575 static GarbageCollector * m_pManager ; ///< GC global instance
577 atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call \p scan()
578 const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
580 details::guard_allocator<> m_GuardPool ; ///< Guard pool
581 details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
582 details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
584 internal_stat m_stat ; ///< Internal statistics
585 bool m_bStatEnabled ; ///< Internal Statistics enabled
588 /// Initializes DHP memory manager singleton
590 This member function creates and initializes DHP global object.
591 The function should be called before using CDS data structure based on cds::gc::DHP GC. Usually,
592 this member function is called in the \p main() function. See cds::gc::dhp for example.
593 After calling of this function you may use CDS data structures based on cds::gc::DHP.
596 - \p nLiberateThreshold - \p scan() threshold. When count of retired pointers reaches this value,
597 the \ref dhp_gc_liberate "scan()" member function would be called for freeing retired pointers.
598 If \p nLiberateThreshold <= 1, \p scan() would called after each \ref dhp_gc_retirePtr "retirePtr" call.
599 - \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
600 is initialized the GC allocates local guard pool for the thread from common guard pool.
601 By perforce the local thread's guard pool is grown automatically from common pool.
602 When the thread terminated its guard pool is backed to common GC's pool.
603 - \p nEpochCount: internally, DHP memory manager uses epoch-based schema to solve
604 ABA problem for internal data. \p nEpochCount specifies the epoch count,
605 i.e. the count of simultaneously working threads that remove the elements
606 of DHP-based concurrent data structure. Default value is 16.
608 static void CDS_STDCALL Construct(
609 size_t nLiberateThreshold = 1024
610 , size_t nInitialThreadGuardCount = 8
611 , size_t nEpochCount = 16
614 /// Destroys DHP memory manager
616 The member function destroys DHP global object. After calling of this function you may \b NOT
617 use CDS data structures based on cds::gc::DHP. Usually, the \p Destruct function is called
618 at the end of your \p main(). See cds::gc::dhp for example.
620 static void CDS_STDCALL Destruct();
622 /// Returns pointer to GarbageCollector instance
624 If DHP GC is not initialized, \p not_initialized exception is thrown
626 static GarbageCollector& instance()
628 if ( m_pManager == nullptr )
629 throw not_initialized();
633 /// Checks if global GC object is constructed and may be used
634 static bool isUsed() CDS_NOEXCEPT
636 return m_pManager != nullptr;
641 /// Internal interface
643 /// Allocates a guard
644 details::guard_data * allocGuard()
646 return m_GuardPool.alloc();
649 /// Frees guard \p g for reusing in future
650 void freeGuard(details::guard_data * pGuard )
652 m_GuardPool.free( pGuard );
655 /// Allocates guard list for a thread.
656 details::guard_data* allocGuardList( size_t nCount )
658 return m_GuardPool.allocList( nCount );
661 /// Frees thread's guard list pointed by \p pList
662 void freeGuardList( details::guard_data * pList )
664 m_GuardPool.freeList( pList );
667 /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
668 /**@anchor dhp_gc_retirePtr
670 template <typename T>
671 void retirePtr( T * p, void (* pFunc)(T *) )
673 retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
676 /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
677 void retirePtr( retired_ptr const& p )
679 if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
684 /// Liberate function
685 /** @anchor dhp_gc_liberate
686 The main function of Dynamic Hazard Pointer algorithm. It tries to free retired pointers if they are not
687 trapped by any guard.
693 /// Get internal statistics
694 InternalState& getInternalState(InternalState& stat) const
696 return stat = m_stat;
699 /// Checks if internal statistics enabled
700 bool isStatisticsEnabled() const
702 return m_bStatEnabled;
705 /// Enables/disables internal statistics
706 bool enableStatistics( bool bEnable )
708 bool bEnabled = m_bStatEnabled;
709 m_bStatEnabled = bEnable;
714 GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount, size_t nEpochCount );
720 To use Dynamic Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
721 that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
722 on the start of each thread that uses DHP GC. Before terminating the thread linked to DHP GC it is necessary to call
723 \ref cds_threading "cds::threading::Manager::detachThread()".
725 The ThreadGC object maintains two list:
726 \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
727 \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
728 Free guard list is a subset of thread guard list.
732 GarbageCollector& m_gc; ///< reference to GC singleton
733 details::guard_data * m_pList; ///< Local list of guards owned by the thread
734 details::guard_data * m_pFree; ///< The list of free guard from m_pList
737 /// Default constructor
739 : m_gc( GarbageCollector::instance() )
744 /// The object is not copy-constructible
745 ThreadGC( ThreadGC const& ) = delete;
747 /// Dtor calls fini()
753 /// Initialization. Repeat call is available
758 m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
762 /// Finalization. Repeat call is available
766 m_gc.freeGuardList( m_pList );
773 /// Allocates new guard
774 dhp::details::guard_data* allocGuard()
776 assert( m_pList != nullptr );
778 dhp::details::guard_data* ret;
779 if ( cds_likely( m_pFree )) {
781 m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
784 ret = m_gc.allocGuard();
785 ret->pThreadNext = m_pList;
792 void freeGuard( dhp::details::guard_data* g )
794 assert( m_pList != nullptr );
795 if ( cds_likely( g )) {
796 g->pPost.store( nullptr, atomics::memory_order_relaxed );
797 g->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
803 template <size_t Count>
804 using guard_array = dhp::details::guard_data* [Count];
806 /// Initializes guard array \p arr
807 template <size_t Count>
808 void allocGuard( guard_array<Count>& arr )
810 assert( m_pList != nullptr );
813 while ( m_pFree && nCount < Count ) {
814 arr[nCount] = m_pFree;
815 m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
819 while ( nCount < Count ) {
820 dhp::details::guard_data*& g = arr[nCount];
821 g = m_gc.allocGuard();
822 g->pThreadNext = m_pList;
828 /// Frees guard array \p arr
829 template <size_t Count>
830 void freeGuard( guard_array<Count>& arr )
832 assert( m_pList != nullptr );
834 details::guard_data* first = nullptr;
835 details::guard_data* last;
836 for ( size_t i = 0; i < Count; ++i ) {
837 details::guard_data* guard = arr[i];
838 if ( cds_likely( guard )) {
839 guard->pPost.store( nullptr, atomics::memory_order_relaxed );
841 last->pNextFree.store( guard, atomics::memory_order_relaxed );
848 last->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
853 /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
854 template <typename T>
855 void retirePtr( T * p, void (* pFunc)(T *) )
857 m_gc.retirePtr( p, pFunc );
860 /// Run retiring cycle
867 }} // namespace cds::gc
870 #if CDS_COMPILER == CDS_COMPILER_MSVC
871 # pragma warning(pop)
874 #endif // #ifndef CDSLIB_GC_DETAILS_DHP_H