#ifndef __CDS_GC_PTB_H
#define __CDS_GC_PTB_H
-#include <cds/gc/dhp_decl.h>
-#include <cds/gc/dhp_impl.h>
+#include <cds/gc/dhp/dhp_decl.h>
+#include <cds/gc/dhp/dhp_impl.h>
#include <cds/details/lib.h>
#endif // #ifndef __CDS_GC_PTB_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
+#define __CDS_GC_PTB_PASS_THE_BUCK_H
+
+#include <mutex> // unique_lock
+#include <cds/cxx11_atomic.h>
+#include <cds/gc/details/retired_ptr.h>
+#include <cds/details/aligned_allocator.h>
+#include <cds/details/allocator.h>
+#include <cds/lock/spinlock.h>
+
+#if CDS_COMPILER == CDS_COMPILER_MSVC
+# pragma warning(push)
+# pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
+#endif
+
+namespace cds { namespace gc {
+
+ /// Pass The Buck reclamation schema
+ /**
+ \par Sources:
+ - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
+ dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
+ - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
+ Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
+ - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
+ for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
+
+
+ The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
+ Use cds::gc::PTB class in your code.
+
+ Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
+ GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
+ by contructing cds::gc::PTB object in beginning of your main().
+ See cds::gc::PTB class for explanation.
+
+ \par Implementation issues
+ The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
+ It seems that solution should not introduce significant performance bottleneck, because each thread has own set
+ of guards allocated from global list of free guards and access to global list is occurred only when
+ all thread's guard is busy. In this case the thread allocates next block of guards from global list.
+ Guards allocated for the thread is push back to the global list only when the thread terminates.
+ */
+ namespace ptb {
+
+ // Forward declarations
+ class Guard;
+ template <size_t Count> class GuardArray;
+ class ThreadGC;
+ class GarbageCollector;
+
+ /// Retired pointer type
+ typedef cds::gc::details::retired_ptr retired_ptr;
+
+ using cds::gc::details::free_retired_ptr_func;
+
+ /// Details of Pass the Buck algorithm
+ namespace details {
+
+ // Forward declaration
+ class liberate_set;
+
+ /// Retired pointer buffer node
+ struct retired_ptr_node {
+ retired_ptr m_ptr ; ///< retired pointer
+ retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
+ retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
+ };
+
+ /// Internal guard representation
+ struct guard_data {
+ typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
+ typedef void * guarded_ptr ; ///< type of value guarded
+
+ atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
+
+#if 0
+ typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
+ handoff_spin spinHandOff ; ///< access to \p pHandOff field
+ handoff_ptr pHandOff ; ///< trapped pointer
+#endif
+
+ atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
+ atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
+
+ guard_data * pThreadNext ; ///< next item of thread's local list of guards
+
+ //@cond
+ guard_data()
+ : pPost( nullptr )
+#if 0
+ , pHandOff( nullptr )
+#endif
+ , pGlobalNext( nullptr )
+ , pNextFree( nullptr )
+ , pThreadNext( nullptr )
+ {}
+
+ void init()
+ {
+ pPost.store( nullptr, atomics::memory_order_relaxed );
+ }
+ //@endcond
+
+ /// Checks if the guard is free, that is, it does not contain any pointer guarded
+ bool isFree() const
+ {
+ return pPost.load( atomics::memory_order_acquire ) == nullptr;
+ }
+ };
+
+ /// Guard allocator
+ template <class Alloc = CDS_DEFAULT_ALLOCATOR>
+ class guard_allocator
+ {
+ cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
+
+ atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+ atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
+ SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
+
+ /*
+ Unfortunately, access to the list of free guard is lock-based.
+ Lock-free manipulations with guard free-list are ABA-prone.
+ TODO: working with m_FreeGuardList in lock-free manner.
+ */
+
+ private:
+ /// Allocates new guard from the heap. The function uses aligned allocator
+ guard_data * allocNew()
+ {
+ //TODO: the allocator should make block allocation
+
+ details::guard_data * pGuard = m_GuardAllocator.New();
+
+ // Link guard to the list
+ // m_GuardList is accumulated list and it cannot support concurrent deletion,
+ // so, ABA problem is impossible for it
+ details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
+ do {
+ pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
+ // pHead is changed by compare_exchange_weak
+ } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
+
+ pGuard->init();
+ return pGuard;
+ }
+
+ public:
+ // Default ctor
+ guard_allocator()
+ : m_GuardList( nullptr )
+ , m_FreeGuardList( nullptr )
+ {}
+
+ // Destructor
+ ~guard_allocator()
+ {
+ guard_data * pNext;
+ for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
+ pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
+ m_GuardAllocator.Delete( pData );
+ }
+ }
+
+ /// Allocates a guard from free list or from heap if free list is empty
+ guard_data * alloc()
+ {
+ // Try to pop a guard from free-list
+ details::guard_data * pGuard;
+
+ {
+ std::unique_lock<SpinLock> al( m_freeListLock );
+ pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
+ if ( pGuard )
+ m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+ }
+ if ( !pGuard )
+ return allocNew();
+
+ pGuard->init();
+ return pGuard;
+ }
+
+ /// Frees guard \p pGuard
+ /**
+ The function places the guard \p pGuard into free-list
+ */
+ void free( guard_data * pGuard )
+ {
+ pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+
+ std::unique_lock<SpinLock> al( m_freeListLock );
+ pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+ m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
+ }
+
+ /// Allocates list of guard
+ /**
+ The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
+
+ cds::gc::ptb::ThreadGC supporting method
+ */
+ guard_data * allocList( size_t nCount )
+ {
+ assert( nCount != 0 );
+
+ guard_data * pHead;
+ guard_data * pLast;
+
+ pHead =
+ pLast = alloc();
+
+ // The guard list allocated is private for the thread,
+ // so, we can use relaxed memory order
+ while ( --nCount ) {
+ guard_data * p = alloc();
+ pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
+ pLast = p;
+ }
+
+ pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
+
+ return pHead;
+ }
+
+ /// Frees list of guards
+ /**
+ The list \p pList is linked by guard's \p pThreadNext field.
+
+ cds::gc::ptb::ThreadGC supporting method
+ */
+ void freeList( guard_data * pList )
+ {
+ assert( pList != nullptr );
+
+ guard_data * pLast = pList;
+ while ( pLast->pThreadNext ) {
+ pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
+ guard_data * p;
+ pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
+ pLast = p;
+ }
+
+ std::unique_lock<SpinLock> al( m_freeListLock );
+ pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+ m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
+ }
+
+ /// Returns the list's head of guards allocated
+ guard_data * begin()
+ {
+ return m_GuardList.load(atomics::memory_order_acquire);
+ }
+ };
+
+ /// Retired pointer buffer
+ /**
+ The buffer of retired nodes ready for liberating.
+ When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
+ retired nodes.
+ */
+ class retired_ptr_buffer
+ {
+ atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
+ atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
+
+ public:
+ //@cond
+ retired_ptr_buffer()
+ : m_pHead( nullptr )
+ , m_nItemCount(0)
+ {}
+
+ ~retired_ptr_buffer()
+ {
+ assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
+ }
+ //@endcond
+
+ /// Pushes new node into the buffer. Returns current buffer size
+ size_t push( retired_ptr_node& node )
+ {
+ retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
+ do {
+ node.m_pNext = pHead;
+ // pHead is changed by compare_exchange_weak
+ } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
+
+ return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
+ }
+
+ /// Result of \ref ptb_gc_privatve "privatize" function.
+ /**
+ The \p privatize function returns retired node list as \p first and the size of that list as \p second.
+ */
+ typedef std::pair<retired_ptr_node *, size_t> privatize_result;
+
+ /// Gets current list of retired pointer and clears the list
+ /**@anchor ptb_gc_privatve
+ */
+ privatize_result privatize()
+ {
+ privatize_result res;
+ res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
+
+ // Item counter is needed only as a threshold for liberate function
+ // So, we may clear the item counter without synchronization with m_pHead
+ res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
+ return res;
+ }
+
+ /// Returns current size of buffer (approximate)
+ size_t size() const
+ {
+ return m_nItemCount.load(atomics::memory_order_relaxed);
+ }
+ };
+
+ /// Pool of retired pointers
+ /**
+ The class acts as an allocator of retired node.
+ Retired pointers are linked in the lock-free list.
+ */
+ template <class Alloc = CDS_DEFAULT_ALLOCATOR>
+ class retired_ptr_pool {
+ /// Pool item
+ typedef retired_ptr_node item;
+
+ /// Count of items in block
+ static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
+
+ /// Pool block
+ struct block {
+ block * pNext ; ///< next block
+ item items[m_nItemPerBlock] ; ///< item array
+ };
+
+ atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
+
+ // To solve ABA problem we use epoch-based approach
+ static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
+ atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
+ atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
+ atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
+
+ cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
+
+ private:
+ //@cond
+ void allocNewBlock()
+ {
+ // allocate new block
+ block * pNew = m_BlockAllocator.New();
+
+ // link items within the block
+ item * pLastItem = pNew->items + m_nItemPerBlock - 1;
+ for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
+ pItem->m_pNextFree = pItem + 1;
+ CDS_STRICT_DO( pItem->m_pNext = nullptr );
+ }
+
+ // link new block to block list
+ {
+ block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
+ do {
+ pNew->pNext = pHead;
+ // pHead is changed by compare_exchange_weak
+ } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ }
+
+ // link block's items to free list
+ {
+ item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
+ do {
+ pLastItem->m_pNextFree = pHead;
+ // pHead is changed by compare_exchange_weak
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ }
+ }
+
+ unsigned int current_epoch() const
+ {
+ return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
+ }
+ unsigned int next_epoch() const
+ {
+ return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
+ }
+ //@endcond
+
+ public:
+ //@cond
+ retired_ptr_pool()
+ : m_pBlockListHead( nullptr )
+ , m_nCurEpoch(0)
+ , m_pGlobalFreeHead( nullptr )
+ {
+ for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
+ m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
+
+ allocNewBlock();
+ }
+
+ ~retired_ptr_pool()
+ {
+ block * p;
+ for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
+ p = pBlock->pNext;
+ m_BlockAllocator.Delete( pBlock );
+ }
+ }
+
+ /// Increments current epoch
+ void inc_epoch()
+ {
+ m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
+ }
+
+ //@endcond
+
+ /// Allocates new retired pointer
+ retired_ptr_node& alloc()
+ {
+ unsigned int nEpoch;
+ item * pItem;
+ for (;;) {
+ pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
+ if ( !pItem )
+ goto retry;
+ if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ goto success;
+ }
+
+ /*
+ item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
+ while ( pItem ) {
+ if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
+ goto success;
+ }
+ */
+
+ // Epoch free list is empty
+ // Alloc from global free list
+ retry:
+ pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
+ do {
+ if ( !pItem ) {
+ allocNewBlock();
+ goto retry;
+ }
+ // pItem is changed by compare_exchange_weak
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
+
+ success:
+ CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
+ return *pItem;
+ }
+
+ /// Allocates and initializes new retired pointer
+ retired_ptr_node& alloc( const retired_ptr& p )
+ {
+ retired_ptr_node& node = alloc();
+ node.m_ptr = p;
+ return node;
+ }
+
+ /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
+ /**
+ The list is linked on the m_pNextFree field
+ */
+ void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
+ {
+ assert( pHead != nullptr );
+ assert( pTail != nullptr );
+
+ unsigned int nEpoch;
+ item * pCurHead;
+ do {
+ pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
+ pTail->m_pNextFree = pCurHead;
+ } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
+ }
+ };
+
+ /// Uninitialized guard
+ class guard
+ {
+ friend class ThreadGC;
+ protected:
+ details::guard_data * m_pGuard ; ///< Pointer to guard data
+ public:
+ /// Initialize empty guard.
+ guard()
+ : m_pGuard( nullptr )
+ {}
+
+ /// The object is not copy-constructible
+ guard( guard const& ) = delete;
+
+ /// Object destructor, does nothing
+ ~guard()
+ {}
+
+ /// Guards pointer \p p
+ void set( void * p )
+ {
+ assert( m_pGuard != nullptr );
+ m_pGuard->pPost.store( p, atomics::memory_order_release );
+ //CDS_COMPILER_RW_BARRIER;
+ }
+
+ /// Clears the guard
+ void clear()
+ {
+ assert( m_pGuard != nullptr );
+ m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
+ }
+
+ /// Guards pointer \p p
+ template <typename T>
+ T * operator =( T * p )
+ {
+ set( reinterpret_cast<void *>( const_cast<T *>(p) ));
+ return p;
+ }
+
+ //@cond
+ std::nullptr_t operator=(std::nullptr_t)
+ {
+ clear();
+ return nullptr;
+ }
+ //@endcond
+
+ public: // for ThreadGC.
+ /*
+ GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
+ the compiler produces error: \91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard\92 is protected
+ despite the fact that ThreadGC is declared as friend for guard class.
+ We should not like to declare m_pGuard member as public one.
+ Therefore, we have to add set_guard/get_guard public functions
+ */
+ /// Set guard data
+ void set_guard( details::guard_data * pGuard )
+ {
+ assert( m_pGuard == nullptr );
+ m_pGuard = pGuard;
+ }
+
+ /// Get current guard data
+ details::guard_data * get_guard()
+ {
+ return m_pGuard;
+ }
+ /// Get current guard data
+ details::guard_data * get_guard() const
+ {
+ return m_pGuard;
+ }
+ };
+
+ } // namespace details
+
+ /// Guard
+ /**
+ This class represents auto guard: ctor allocates a guard from guard pool,
+ dtor returns the guard back to the pool of free guard.
+ */
+ class Guard: public details::guard
+ {
+ //@cond
+ typedef details::guard base_class;
+ friend class ThreadGC;
+ //@endcond
+
+ ThreadGC& m_gc ; ///< ThreadGC object of current thread
+ public:
+ /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
+ Guard(ThreadGC& gc);
+
+ /// Returns guard allocated back to pool of free guards
+ ~Guard(); // inline after GarbageCollector
+
+ /// Returns PTB GC object
+ ThreadGC& getGC()
+ {
+ return m_gc;
+ }
+
+ /// Guards pointer \p p
+ template <typename T>
+ T * operator =( T * p )
+ {
+ return base_class::operator =<T>( p );
+ }
+
+ //@cond
+ std::nullptr_t operator=(std::nullptr_t)
+ {
+ return base_class::operator =(nullptr);
+ }
+ //@endcond
+ };
+
+ /// Array of guards
+ /**
+ This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
+ dtor returns the guards allocated back to the pool.
+ */
+ template <size_t Count>
+ class GuardArray
+ {
+ details::guard m_arr[Count] ; ///< array of guard
+ ThreadGC& m_gc ; ///< ThreadGC object of current thread
+ const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
+
+ public:
+ /// Rebind array for other size \p OtherCount
+ template <size_t OtherCount>
+ struct rebind {
+ typedef GuardArray<OtherCount> other ; ///< rebinding result
+ };
+
+ public:
+ /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
+ GuardArray( ThreadGC& gc ) ; // inline below
+
+ /// The object is not default-constructible
+ GuardArray() = delete;
+
+ /// The object is not copy-constructible
+ GuardArray( GuardArray const& ) = delete;
+
+ /// Returns guards allocated back to pool
+ ~GuardArray() ; // inline below
+
+ /// Returns the capacity of array
+ CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
+ {
+ return c_nCapacity;
+ }
+
+ /// Returns PTB ThreadGC object
+ ThreadGC& getGC() CDS_NOEXCEPT
+ {
+ return m_gc;
+ }
+
+ /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
+ details::guard& operator []( size_t nIndex )
+ {
+ assert( nIndex < capacity() );
+ return m_arr[nIndex];
+ }
+
+ /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
+ const details::guard& operator []( size_t nIndex ) const
+ {
+ assert( nIndex < capacity() );
+ return m_arr[nIndex];
+ }
+
+ /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
+ template <typename T>
+ void set( size_t nIndex, T * p )
+ {
+ assert( nIndex < capacity() );
+ m_arr[nIndex].set( p );
+ }
+
+ /// Clears (sets to \p nullptr) the guard \p nIndex
+ void clear( size_t nIndex )
+ {
+ assert( nIndex < capacity() );
+ m_arr[nIndex].clear();
+ }
+
+ /// Clears all guards in the array
+ void clearAll()
+ {
+ for ( size_t i = 0; i < capacity(); ++i )
+ clear(i);
+ }
+ };
+
+ /// Memory manager (Garbage collector)
+ class CDS_EXPORT_API GarbageCollector
+ {
+ private:
+ //@cond
+ friend class ThreadGC;
+
+ /// Internal GC statistics
+ struct internal_stat
+ {
+ atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
+ atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
+
+ internal_stat()
+ : m_nGuardCount(0)
+ , m_nFreeGuardCount(0)
+ {}
+ };
+ //@endcond
+
+ public:
+ /// Exception "No GarbageCollector object is created"
+ CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
+
+ /// Internal GC statistics
+ struct InternalState
+ {
+ size_t m_nGuardCount ; ///< Total guard count
+ size_t m_nFreeGuardCount ; ///< Count of free guard
+
+ //@cond
+ InternalState()
+ : m_nGuardCount(0)
+ , m_nFreeGuardCount(0)
+ {}
+
+ InternalState& operator =( internal_stat const& s )
+ {
+ m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
+ m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
+
+ return *this;
+ }
+ //@endcond
+ };
+
+ private:
+ static GarbageCollector * m_pManager ; ///< GC global instance
+
+ details::guard_allocator<> m_GuardPool ; ///< Guard pool
+ details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
+ details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
+ //atomics::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
+
+ atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
+ const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
+
+ internal_stat m_stat ; ///< Internal statistics
+ bool m_bStatEnabled ; ///< Internal Statistics enabled
+
+ public:
+ /// Initializes PTB memory manager singleton
+ /**
+ This member function creates and initializes PTB global object.
+ The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
+ this member function is called in the \p main() function. See cds::gc::ptb for example.
+ After calling of this function you may use CDS data structures based on cds::gc::PTB.
+
+ \par Parameters
+ \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
+ the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
+ If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
+ \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
+ is initialized the GC allocates local guard pool for the thread from common guard pool.
+ By perforce the local thread's guard pool is grown automatically from common pool.
+ When the thread terminated its guard pool is backed to common GC's pool.
+
+ */
+ static void CDS_STDCALL Construct(
+ size_t nLiberateThreshold = 1024
+ , size_t nInitialThreadGuardCount = 8
+ );
+
+ /// Destroys PTB memory manager
+ /**
+ The member function destroys PTB global object. After calling of this function you may \b NOT
+ use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
+ at the end of your \p main(). See cds::gc::ptb for example.
+ */
+ static void CDS_STDCALL Destruct();
+
+ /// Returns pointer to GarbageCollector instance
+ /**
+ If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
+ */
+ static GarbageCollector& instance()
+ {
+ if ( m_pManager == nullptr )
+ throw PTBManagerEmpty();
+ return *m_pManager;
+ }
+
+ /// Checks if global GC object is constructed and may be used
+ static bool isUsed() CDS_NOEXCEPT
+ {
+ return m_pManager != nullptr;
+ }
+
+ public:
+ //@{
+ /// Internal interface
+
+ /// Allocates a guard
+ details::guard_data * allocGuard()
+ {
+ return m_GuardPool.alloc();
+ }
+
+ /// Frees guard \p g for reusing in future
+ void freeGuard(details::guard_data * pGuard )
+ {
+ m_GuardPool.free( pGuard );
+ }
+
+ /// Allocates guard list for a thread.
+ details::guard_data * allocGuardList( size_t nCount )
+ {
+ return m_GuardPool.allocList( nCount );
+ }
+
+ /// Frees thread's guard list pointed by \p pList
+ void freeGuardList( details::guard_data * pList )
+ {
+ m_GuardPool.freeList( pList );
+ }
+
+ /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
+ /**@anchor ptb_gc_retirePtr
+ */
+ template <typename T>
+ void retirePtr( T * p, void (* pFunc)(T *) )
+ {
+ retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
+ }
+
+ /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
+ void retirePtr( retired_ptr const& p )
+ {
+ if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
+ liberate();
+ }
+
+ protected:
+ /// Liberate function
+ /** @anchor ptb_gc_liberate
+ The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
+ trapped by any guard.
+ */
+ void liberate();
+
+ //@}
+
+ private:
+ //@cond
+#if 0
+ void liberate( details::liberate_set& set );
+#endif
+ //@endcond
+
+ public:
+ /// Get internal statistics
+ InternalState& getInternalState(InternalState& stat) const
+ {
+ return stat = m_stat;
+ }
+
+ /// Checks if internal statistics enabled
+ bool isStatisticsEnabled() const
+ {
+ return m_bStatEnabled;
+ }
+
+ /// Enables/disables internal statistics
+ bool enableStatistics( bool bEnable )
+ {
+ bool bEnabled = m_bStatEnabled;
+ m_bStatEnabled = bEnable;
+ return bEnabled;
+ }
+
+ private:
+ //@cond none
+ GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
+ ~GarbageCollector();
+ //@endcond
+ };
+
+ /// Thread GC
+ /**
+ To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
+ that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
+ on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
+ \ref cds_threading "cds::threading::Manager::detachThread()".
+
+ The ThreadGC object maintains two list:
+ \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
+ \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
+ Free guard list is a subset of thread guard list.
+ */
+ class ThreadGC
+ {
+ GarbageCollector& m_gc ; ///< reference to GC singleton
+ details::guard_data * m_pList ; ///< Local list of guards owned by the thread
+ details::guard_data * m_pFree ; ///< The list of free guard from m_pList
+
+ public:
+ /// Default constructor
+ ThreadGC()
+ : m_gc( GarbageCollector::instance() )
+ , m_pList( nullptr )
+ , m_pFree( nullptr )
+ {}
+
+ /// The object is not copy-constructible
+ ThreadGC( ThreadGC const& ) = delete;
+
+ /// Dtor calls fini()
+ ~ThreadGC()
+ {
+ fini();
+ }
+
+ /// Initialization. Repeat call is available
+ void init()
+ {
+ if ( !m_pList ) {
+ m_pList =
+ m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
+ }
+ }
+
+ /// Finalization. Repeat call is available
+ void fini()
+ {
+ if ( m_pList ) {
+ m_gc.freeGuardList( m_pList );
+ m_pList =
+ m_pFree = nullptr;
+ }
+ }
+
+ public:
+ /// Initializes guard \p g
+ void allocGuard( Guard& g )
+ {
+ assert( m_pList != nullptr );
+ if ( m_pFree ) {
+ g.m_pGuard = m_pFree;
+ m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
+ }
+ else {
+ g.m_pGuard = m_gc.allocGuard();
+ g.m_pGuard->pThreadNext = m_pList;
+ m_pList = g.m_pGuard;
+ }
+ }
+
+ /// Frees guard \p g
+ void freeGuard( Guard& g )
+ {
+ assert( m_pList != nullptr );
+ g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
+ m_pFree = g.m_pGuard;
+ }
+
+ /// Initializes guard array \p arr
+ template <size_t Count>
+ void allocGuard( GuardArray<Count>& arr )
+ {
+ assert( m_pList != nullptr );
+ size_t nCount = 0;
+
+ while ( m_pFree && nCount < Count ) {
+ arr[nCount].set_guard( m_pFree );
+ m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
+ ++nCount;
+ }
+
+ while ( nCount < Count ) {
+ details::guard& g = arr[nCount++];
+ g.set_guard( m_gc.allocGuard() );
+ g.get_guard()->pThreadNext = m_pList;
+ m_pList = g.get_guard();
+ }
+ }
+
+ /// Frees guard array \p arr
+ template <size_t Count>
+ void freeGuard( GuardArray<Count>& arr )
+ {
+ assert( m_pList != nullptr );
+
+ details::guard_data * pGuard;
+ for ( size_t i = 0; i < Count - 1; ++i ) {
+ pGuard = arr[i].get_guard();
+ pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
+ }
+ pGuard = arr[Count-1].get_guard();
+ pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
+ m_pFree = arr[0].get_guard();
+ }
+
+ /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
+ template <typename T>
+ void retirePtr( T * p, void (* pFunc)(T *) )
+ {
+ m_gc.retirePtr( p, pFunc );
+ }
+
+ //@cond
+ void scan()
+ {
+ m_gc.liberate();
+ }
+ //@endcond
+
+ };
+
+ //////////////////////////////////////////////////////////
+ // Inlines
+
+ inline Guard::Guard(ThreadGC& gc)
+ : m_gc( gc )
+ {
+ getGC().allocGuard( *this );
+ }
+ inline Guard::~Guard()
+ {
+ getGC().freeGuard( *this );
+ }
+
+ template <size_t Count>
+ inline GuardArray<Count>::GuardArray( ThreadGC& gc )
+ : m_gc( gc )
+ {
+ getGC().allocGuard( *this );
+ }
+ template <size_t Count>
+ inline GuardArray<Count>::~GuardArray()
+ {
+ getGC().freeGuard( *this );
+ }
+
+ } // namespace ptb
+}} // namespace cds::gc
+
+#if CDS_COMPILER == CDS_COMPILER_MSVC
+# pragma warning(pop)
+#endif
+
+
+#endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDS_GC_PTB_DECL_H
+#define __CDS_GC_PTB_DECL_H
+
+#include <cds/gc/dhp/dhp.h>
+#include <cds/details/marked_ptr.h>
+#include <cds/details/static_functor.h>
+
+namespace cds { namespace gc {
+
+ /// Pass-the-Buck garbage collector
+ /** @ingroup cds_garbage_collector
+ @headerfile cds/gc/dhp.h
+ This class is a wrapper for Pass-the-Buck garbage collector internal implementation.
+
+ Sources:
+ - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
+ dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
+ - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
+ Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
+ - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
+ for Dynamic_Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
+
+ See \ref cds_how_to_use "How to use" section for details of garbage collector applying.
+ */
+ class PTB
+ {
+ public:
+ /// Native guarded pointer type
+ typedef void * guarded_pointer;
+
+ /// Atomic reference
+ /**
+ @headerfile cds/gc/dhp.h
+ */
+ template <typename T> using atomic_ref = atomics::atomic<T *>;
+
+ /// Atomic type
+ /**
+ @headerfile cds/gc/dhp.h
+ */
+ template <typename T> using atomic_type = atomics::atomic<T>;
+
+ /// Atomic marked pointer
+ /**
+ @headerfile cds/gc/dhp.h
+ */
+ template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
+
+ /// Thread GC implementation for internal usage
+ typedef ptb::ThreadGC thread_gc_impl;
+
+ /// Wrapper for ptb::ThreadGC class
+ /**
+ @headerfile cds/gc/dhp.h
+ This class performs automatically attaching/detaching Pass-the-Buck GC
+ for the current thread.
+ */
+ class thread_gc: public thread_gc_impl
+ {
+ //@cond
+ bool m_bPersistent;
+ //@endcond
+ public:
+ /// Constructor
+ /**
+ The constructor attaches the current thread to the Pass-the-Buck GC
+ if it is not yet attached.
+ The \p bPersistent parameter specifies attachment persistence:
+ - \p true - the class destructor will not detach the thread from Pass-the-Buck GC.
+ - \p false (default) - the class destructor will detach the thread from Pass-the-Buck GC.
+ */
+ thread_gc(
+ bool bPersistent = false
+ ) ; // inline in ptb_impl.h
+
+ /// Destructor
+ /**
+ If the object has been created in persistent mode, the destructor does nothing.
+ Otherwise it detaches the current thread from Pass-the-Buck GC.
+ */
+ ~thread_gc() ; // inline in ptb_impl.h
+ };
+
+
+ /// Pass-the-Buck guard
+ /**
+ @headerfile cds/gc/dhp.h
+ This class is a wrapper for ptb::Guard.
+ */
+ class Guard: public ptb::Guard
+ {
+ //@cond
+ typedef ptb::Guard base_class;
+ //@endcond
+
+ public:
+ //@cond
+ Guard() ; // inline in ptb_impl.h
+ //@endcond
+
+ /// Protects a pointer of type <tt> atomic<T*> </tt>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store it
+ to the HP slot repeatedly until the guard's value equals \p toGuard
+ */
+ template <typename T>
+ T protect( atomics::atomic<T> const& toGuard )
+ {
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pRet;
+ do {
+ pRet = assign( pCur );
+ pCur = toGuard.load(atomics::memory_order_acquire);
+ } while ( pRet != pCur );
+ return pCur;
+ }
+
+ /// Protects a converted pointer of type <tt> atomic<T*> </tt>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store result of \p f functor
+ to the HP slot repeatedly until the guard's value equals \p toGuard.
+
+ The function is useful for intrusive containers when \p toGuard is a node pointer
+ that should be converted to a pointer to the value type before guarding.
+ The parameter \p f of type Func is a functor that makes this conversion:
+ \code
+ struct functor {
+ value_type * operator()( T * p );
+ };
+ \endcode
+ Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
+ */
+ template <typename T, class Func>
+ T protect( atomics::atomic<T> const& toGuard, Func f )
+ {
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pRet;
+ do {
+ pRet = pCur;
+ assign( f( pCur ) );
+ pCur = toGuard.load(atomics::memory_order_acquire);
+ } while ( pRet != pCur );
+ return pCur;
+ }
+
+ /// Store \p p to the guard
+ /**
+ The function equals to a simple assignment, no loop is performed.
+ Can be used for a pointer that cannot be changed concurrently.
+ */
+ template <typename T>
+ T * assign( T * p )
+ {
+ return base_class::operator =(p);
+ }
+
+ //@cond
+ std::nullptr_t assign( std::nullptr_t )
+ {
+ return base_class::operator =(nullptr);
+ }
+ //@endcond
+
+ /// Store marked pointer \p p to the guard
+ /**
+ The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
+ Can be used for a marked pointer that cannot be changed concurrently.
+ */
+ template <typename T, int BITMASK>
+ T * assign( cds::details::marked_ptr<T, BITMASK> p )
+ {
+ return base_class::operator =( p.ptr() );
+ }
+
+ /// Copy from \p src guard to \p this guard
+ void copy( Guard const& src )
+ {
+ assign( src.get_native() );
+ }
+
+ /// Clear value of the guard
+ void clear()
+ {
+ base_class::clear();
+ }
+
+ /// Get the value currently protected (relaxed read)
+ template <typename T>
+ T * get() const
+ {
+ return reinterpret_cast<T *>( get_native() );
+ }
+
+ /// Get native guarded pointer stored
+ guarded_pointer get_native() const
+ {
+ return base_class::get_guard()->pPost.load(atomics::memory_order_relaxed);
+ }
+
+ };
+
+ /// Array of Pass-the-Buck guards
+ /**
+ @headerfile cds/gc/dhp.h
+ This class is a wrapper for ptb::GuardArray template.
+ Template parameter \p Count defines the size of PTB array.
+ */
+ template <size_t Count>
+ class GuardArray: public ptb::GuardArray<Count>
+ {
+ //@cond
+ typedef ptb::GuardArray<Count> base_class;
+ //@endcond
+ public:
+ /// Rebind array for other size \p COUNT2
+ template <size_t OtherCount>
+ struct rebind {
+ typedef GuardArray<OtherCount> other ; ///< rebinding result
+ };
+
+ public:
+ //@cond
+ GuardArray() ; // inline in ptb_impl.h
+ //@endcond
+
+ /// Protects a pointer of type \p atomic<T*>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store it
+ to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+ */
+ template <typename T>
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
+ {
+ T pRet;
+ do {
+ pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) );
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+
+ return pRet;
+ }
+
+ /// Protects a pointer of type \p atomic<T*>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store it
+ to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+
+ The function is useful for intrusive containers when \p toGuard is a node pointer
+ that should be converted to a pointer to the value type before guarding.
+ The parameter \p f of type Func is a functor that makes this conversion:
+ \code
+ struct functor {
+ value_type * operator()( T * p );
+ };
+ \endcode
+ Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
+ */
+ template <typename T, class Func>
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
+ {
+ T pRet;
+ do {
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
+
+ return pRet;
+ }
+
+ /// Store \p to the slot \p nIndex
+ /**
+ The function equals to a simple assignment, no loop is performed.
+ */
+ template <typename T>
+ T * assign( size_t nIndex, T * p )
+ {
+ base_class::set(nIndex, p);
+ return p;
+ }
+
+ /// Store marked pointer \p p to the guard
+ /**
+ The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
+ Can be used for a marked pointer that cannot be changed concurrently.
+ */
+ template <typename T, int Bitmask>
+ T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
+ {
+ return assign( nIndex, p.ptr() );
+ }
+
+ /// Copy guarded value from \p src guard to slot at index \p nIndex
+ void copy( size_t nIndex, Guard const& src )
+ {
+ assign( nIndex, src.get_native() );
+ }
+
+ /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
+ void copy( size_t nDestIndex, size_t nSrcIndex )
+ {
+ assign( nDestIndex, get_native( nSrcIndex ));
+ }
+
+ /// Clear value of the slot \p nIndex
+ void clear( size_t nIndex)
+ {
+ base_class::clear( nIndex );
+ }
+
+ /// Get current value of slot \p nIndex
+ template <typename T>
+ T * get( size_t nIndex) const
+ {
+ return reinterpret_cast<T *>( get_native( nIndex ) );
+ }
+
+ /// Get native guarded pointer stored
+ guarded_pointer get_native( size_t nIndex ) const
+ {
+ return base_class::operator[](nIndex).get_guard()->pPost.load(atomics::memory_order_relaxed);
+ }
+
+ /// Capacity of the guard array
+ static CDS_CONSTEXPR size_t capacity()
+ {
+ return Count;
+ }
+ };
+
+ public:
+ /// Initializes ptb::GarbageCollector singleton
+ /**
+ The constructor calls GarbageCollector::Construct with passed parameters.
+ See ptb::GarbageCollector::Construct for explanation of parameters meaning.
+ */
+ PTB(
+ size_t nLiberateThreshold = 1024
+ , size_t nInitialThreadGuardCount = 8
+ )
+ {
+ ptb::GarbageCollector::Construct(
+ nLiberateThreshold,
+ nInitialThreadGuardCount
+ );
+ }
+
+ /// Terminates ptb::GarbageCollector singleton
+ /**
+ The destructor calls \code ptb::GarbageCollector::Destruct() \endcode
+ */
+ ~PTB()
+ {
+ ptb::GarbageCollector::Destruct();
+ }
+
+ /// Checks if count of hazard pointer is no less than \p nCountNeeded
+ /**
+ The function always returns \p true since the guard count is unlimited for
+ PTB garbage collector.
+ */
+ static bool check_available_guards( size_t nCountNeeded, bool /*bRaiseException*/ = true )
+ {
+ CDS_UNUSED( nCountNeeded );
+ return true;
+ }
+
+ /// Retire pointer \p p with function \p pFunc
+ /**
+ The function places pointer \p p to array of pointers ready for removing.
+ (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
+ Deleting the pointer is the function \p pFunc call.
+ */
+ template <typename T>
+ static void retire( T * p, void (* pFunc)(T *) )
+ {
+ ptb::GarbageCollector::instance().retirePtr( p, pFunc );
+ }
+
+ /// Retire pointer \p p with functor of type \p Disposer
+ /**
+ The function places pointer \p p to array of pointers ready for removing.
+ (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
+
+ See gc::HP::retire for \p Disposer requirements.
+ */
+ template <class Disposer, typename T>
+ static void retire( T * p )
+ {
+ retire( p, cds::details::static_functor<Disposer, T>::call );
+ }
+
+ /// Checks if Pass-the-Buck GC is constructed and may be used
+ static bool isUsed()
+ {
+ return ptb::GarbageCollector::isUsed();
+ }
+
+ /// Forced GC cycle call for current thread
+ /**
+ Usually, this function should not be called directly.
+ */
+ static void scan() ; // inline in ptb_impl.h
+
+ /// Synonym for \ref scan()
+ static void force_dispose()
+ {
+ scan();
+ }
+ };
+
+}} // namespace cds::gc
+
+#endif // #ifndef __CDS_GC_PTB_DECL_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDS_GC_PTB_IMPL_H
+#define __CDS_GC_PTB_IMPL_H
+
+#include <cds/threading/model.h>
+
+//@cond
+namespace cds { namespace gc {
+
+ inline PTB::thread_gc::thread_gc(
+ bool bPersistent
+ )
+ : m_bPersistent( bPersistent )
+ {
+ if ( !cds::threading::Manager::isThreadAttached() )
+ cds::threading::Manager::attachThread();
+ }
+
+ inline PTB::thread_gc::~thread_gc()
+ {
+ if ( !m_bPersistent )
+ cds::threading::Manager::detachThread();
+ }
+
+ inline PTB::Guard::Guard()
+ : Guard::base_class( cds::threading::getGC<PTB>() )
+ {}
+
+ template <size_t COUNT>
+ inline PTB::GuardArray<COUNT>::GuardArray()
+ : GuardArray::base_class( cds::threading::getGC<PTB>() )
+ {}
+
+ inline void PTB::scan()
+ {
+ cds::threading::getGC<PTB>().scan();
+ }
+
+}} // namespace cds::gc
+//@endcond
+
+#endif // #ifndef __CDS_GC_PTB_IMPL_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_PTB_DECL_H
-#define __CDS_GC_PTB_DECL_H
-
-#include <cds/gc/ptb/ptb.h>
-#include <cds/details/marked_ptr.h>
-#include <cds/details/static_functor.h>
-
-namespace cds { namespace gc {
-
- /// Pass-the-Buck garbage collector
- /** @ingroup cds_garbage_collector
- @headerfile cds/gc/dhp.h
- This class is a wrapper for Pass-the-Buck garbage collector internal implementation.
-
- Sources:
- - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
- dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
- - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
- Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
- - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
- for Dynamic_Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
-
- See \ref cds_how_to_use "How to use" section for details of garbage collector applying.
- */
- class PTB
- {
- public:
- /// Native guarded pointer type
- typedef void * guarded_pointer;
-
- /// Atomic reference
- /**
- @headerfile cds/gc/dhp.h
- */
- template <typename T> using atomic_ref = atomics::atomic<T *>;
-
- /// Atomic type
- /**
- @headerfile cds/gc/dhp.h
- */
- template <typename T> using atomic_type = atomics::atomic<T>;
-
- /// Atomic marked pointer
- /**
- @headerfile cds/gc/dhp.h
- */
- template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
-
- /// Thread GC implementation for internal usage
- typedef ptb::ThreadGC thread_gc_impl;
-
- /// Wrapper for ptb::ThreadGC class
- /**
- @headerfile cds/gc/dhp.h
- This class performs automatically attaching/detaching Pass-the-Buck GC
- for the current thread.
- */
- class thread_gc: public thread_gc_impl
- {
- //@cond
- bool m_bPersistent;
- //@endcond
- public:
- /// Constructor
- /**
- The constructor attaches the current thread to the Pass-the-Buck GC
- if it is not yet attached.
- The \p bPersistent parameter specifies attachment persistence:
- - \p true - the class destructor will not detach the thread from Pass-the-Buck GC.
- - \p false (default) - the class destructor will detach the thread from Pass-the-Buck GC.
- */
- thread_gc(
- bool bPersistent = false
- ) ; // inline in ptb_impl.h
-
- /// Destructor
- /**
- If the object has been created in persistent mode, the destructor does nothing.
- Otherwise it detaches the current thread from Pass-the-Buck GC.
- */
- ~thread_gc() ; // inline in ptb_impl.h
- };
-
-
- /// Pass-the-Buck guard
- /**
- @headerfile cds/gc/dhp.h
- This class is a wrapper for ptb::Guard.
- */
- class Guard: public ptb::Guard
- {
- //@cond
- typedef ptb::Guard base_class;
- //@endcond
-
- public:
- //@cond
- Guard() ; // inline in ptb_impl.h
- //@endcond
-
- /// Protects a pointer of type <tt> atomic<T*> </tt>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store it
- to the HP slot repeatedly until the guard's value equals \p toGuard
- */
- template <typename T>
- T protect( atomics::atomic<T> const& toGuard )
- {
- T pCur = toGuard.load(atomics::memory_order_relaxed);
- T pRet;
- do {
- pRet = assign( pCur );
- pCur = toGuard.load(atomics::memory_order_acquire);
- } while ( pRet != pCur );
- return pCur;
- }
-
- /// Protects a converted pointer of type <tt> atomic<T*> </tt>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store result of \p f functor
- to the HP slot repeatedly until the guard's value equals \p toGuard.
-
- The function is useful for intrusive containers when \p toGuard is a node pointer
- that should be converted to a pointer to the value type before guarding.
- The parameter \p f of type Func is a functor that makes this conversion:
- \code
- struct functor {
- value_type * operator()( T * p );
- };
- \endcode
- Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
- */
- template <typename T, class Func>
- T protect( atomics::atomic<T> const& toGuard, Func f )
- {
- T pCur = toGuard.load(atomics::memory_order_relaxed);
- T pRet;
- do {
- pRet = pCur;
- assign( f( pCur ) );
- pCur = toGuard.load(atomics::memory_order_acquire);
- } while ( pRet != pCur );
- return pCur;
- }
-
- /// Store \p p to the guard
- /**
- The function equals to a simple assignment, no loop is performed.
- Can be used for a pointer that cannot be changed concurrently.
- */
- template <typename T>
- T * assign( T * p )
- {
- return base_class::operator =(p);
- }
-
- //@cond
- std::nullptr_t assign( std::nullptr_t )
- {
- return base_class::operator =(nullptr);
- }
- //@endcond
-
- /// Store marked pointer \p p to the guard
- /**
- The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
- Can be used for a marked pointer that cannot be changed concurrently.
- */
- template <typename T, int BITMASK>
- T * assign( cds::details::marked_ptr<T, BITMASK> p )
- {
- return base_class::operator =( p.ptr() );
- }
-
- /// Copy from \p src guard to \p this guard
- void copy( Guard const& src )
- {
- assign( src.get_native() );
- }
-
- /// Clear value of the guard
- void clear()
- {
- base_class::clear();
- }
-
- /// Get the value currently protected (relaxed read)
- template <typename T>
- T * get() const
- {
- return reinterpret_cast<T *>( get_native() );
- }
-
- /// Get native guarded pointer stored
- guarded_pointer get_native() const
- {
- return base_class::get_guard()->pPost.load(atomics::memory_order_relaxed);
- }
-
- };
-
- /// Array of Pass-the-Buck guards
- /**
- @headerfile cds/gc/dhp.h
- This class is a wrapper for ptb::GuardArray template.
- Template parameter \p Count defines the size of PTB array.
- */
- template <size_t Count>
- class GuardArray: public ptb::GuardArray<Count>
- {
- //@cond
- typedef ptb::GuardArray<Count> base_class;
- //@endcond
- public:
- /// Rebind array for other size \p COUNT2
- template <size_t OtherCount>
- struct rebind {
- typedef GuardArray<OtherCount> other ; ///< rebinding result
- };
-
- public:
- //@cond
- GuardArray() ; // inline in ptb_impl.h
- //@endcond
-
- /// Protects a pointer of type \p atomic<T*>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store it
- to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
- */
- template <typename T>
- T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
- {
- T pRet;
- do {
- pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) );
- } while ( pRet != toGuard.load(atomics::memory_order_acquire));
-
- return pRet;
- }
-
- /// Protects a pointer of type \p atomic<T*>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store it
- to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
-
- The function is useful for intrusive containers when \p toGuard is a node pointer
- that should be converted to a pointer to the value type before guarding.
- The parameter \p f of type Func is a functor that makes this conversion:
- \code
- struct functor {
- value_type * operator()( T * p );
- };
- \endcode
- Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
- */
- template <typename T, class Func>
- T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
- {
- T pRet;
- do {
- assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
- } while ( pRet != toGuard.load(atomics::memory_order_acquire));
-
- return pRet;
- }
-
- /// Store \p to the slot \p nIndex
- /**
- The function equals to a simple assignment, no loop is performed.
- */
- template <typename T>
- T * assign( size_t nIndex, T * p )
- {
- base_class::set(nIndex, p);
- return p;
- }
-
- /// Store marked pointer \p p to the guard
- /**
- The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
- Can be used for a marked pointer that cannot be changed concurrently.
- */
- template <typename T, int Bitmask>
- T * assign( size_t nIndex, cds::details::marked_ptr<T, Bitmask> p )
- {
- return assign( nIndex, p.ptr() );
- }
-
- /// Copy guarded value from \p src guard to slot at index \p nIndex
- void copy( size_t nIndex, Guard const& src )
- {
- assign( nIndex, src.get_native() );
- }
-
- /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
- void copy( size_t nDestIndex, size_t nSrcIndex )
- {
- assign( nDestIndex, get_native( nSrcIndex ));
- }
-
- /// Clear value of the slot \p nIndex
- void clear( size_t nIndex)
- {
- base_class::clear( nIndex );
- }
-
- /// Get current value of slot \p nIndex
- template <typename T>
- T * get( size_t nIndex) const
- {
- return reinterpret_cast<T *>( get_native( nIndex ) );
- }
-
- /// Get native guarded pointer stored
- guarded_pointer get_native( size_t nIndex ) const
- {
- return base_class::operator[](nIndex).get_guard()->pPost.load(atomics::memory_order_relaxed);
- }
-
- /// Capacity of the guard array
- static CDS_CONSTEXPR size_t capacity()
- {
- return Count;
- }
- };
-
- public:
- /// Initializes ptb::GarbageCollector singleton
- /**
- The constructor calls GarbageCollector::Construct with passed parameters.
- See ptb::GarbageCollector::Construct for explanation of parameters meaning.
- */
- PTB(
- size_t nLiberateThreshold = 1024
- , size_t nInitialThreadGuardCount = 8
- )
- {
- ptb::GarbageCollector::Construct(
- nLiberateThreshold,
- nInitialThreadGuardCount
- );
- }
-
- /// Terminates ptb::GarbageCollector singleton
- /**
- The destructor calls \code ptb::GarbageCollector::Destruct() \endcode
- */
- ~PTB()
- {
- ptb::GarbageCollector::Destruct();
- }
-
- /// Checks if count of hazard pointer is no less than \p nCountNeeded
- /**
- The function always returns \p true since the guard count is unlimited for
- PTB garbage collector.
- */
- static bool check_available_guards( size_t nCountNeeded, bool /*bRaiseException*/ = true )
- {
- CDS_UNUSED( nCountNeeded );
- return true;
- }
-
- /// Retire pointer \p p with function \p pFunc
- /**
- The function places pointer \p p to array of pointers ready for removing.
- (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
- Deleting the pointer is the function \p pFunc call.
- */
- template <typename T>
- static void retire( T * p, void (* pFunc)(T *) )
- {
- ptb::GarbageCollector::instance().retirePtr( p, pFunc );
- }
-
- /// Retire pointer \p p with functor of type \p Disposer
- /**
- The function places pointer \p p to array of pointers ready for removing.
- (so called retired pointer array). The pointer can be safely removed when no guarded pointer points to it.
-
- See gc::HP::retire for \p Disposer requirements.
- */
- template <class Disposer, typename T>
- static void retire( T * p )
- {
- retire( p, cds::details::static_functor<Disposer, T>::call );
- }
-
- /// Checks if Pass-the-Buck GC is constructed and may be used
- static bool isUsed()
- {
- return ptb::GarbageCollector::isUsed();
- }
-
- /// Forced GC cycle call for current thread
- /**
- Usually, this function should not be called directly.
- */
- static void scan() ; // inline in ptb_impl.h
-
- /// Synonym for \ref scan()
- static void force_dispose()
- {
- scan();
- }
- };
-
-}} // namespace cds::gc
-
-#endif // #ifndef __CDS_GC_PTB_DECL_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_PTB_IMPL_H
-#define __CDS_GC_PTB_IMPL_H
-
-#include <cds/threading/model.h>
-
-//@cond
-namespace cds { namespace gc {
-
- inline PTB::thread_gc::thread_gc(
- bool bPersistent
- )
- : m_bPersistent( bPersistent )
- {
- if ( !cds::threading::Manager::isThreadAttached() )
- cds::threading::Manager::attachThread();
- }
-
- inline PTB::thread_gc::~thread_gc()
- {
- if ( !m_bPersistent )
- cds::threading::Manager::detachThread();
- }
-
- inline PTB::Guard::Guard()
- : Guard::base_class( cds::threading::getGC<PTB>() )
- {}
-
- template <size_t COUNT>
- inline PTB::GuardArray<COUNT>::GuardArray()
- : GuardArray::base_class( cds::threading::getGC<PTB>() )
- {}
-
- inline void PTB::scan()
- {
- cds::threading::getGC<PTB>().scan();
- }
-
-}} // namespace cds::gc
-//@endcond
-
-#endif // #ifndef __CDS_GC_PTB_IMPL_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_FORWARD_H
-#define __CDS_GC_FORWARD_H
-
-#include <cds/details/defs.h>
-
-//@cond
-namespace cds { namespace gc {
- class HP;
- class PTB;
-
- class nogc;
-}} // namespace cds::gc
-
-//@endcond
-
-#endif // #ifndef __CDS_GC_FORWARD_H
#ifndef __CDS_GC_HP_H
#define __CDS_GC_HP_H
-#include <cds/gc/hp_decl.h>
-#include <cds/gc/hp_impl.h>
+#include <cds/gc/hp/hp_decl.h>
+#include <cds/gc/hp/hp_impl.h>
#include <cds/details/lib.h>
#endif // #ifndef __CDS_GC_HP_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDS_GC_HZP_HZP_H
+#define __CDS_GC_HZP_HZP_H
+
+#include <vector>
+#include <cds/cxx11_atomic.h>
+#include <cds/os/thread.h>
+#include <cds/gc/hp/details/hp_fwd.h>
+#include <cds/gc/hp/details/hp_alloc.h>
+#include <cds/gc/hp/details/hp_retired.h>
+
+#if CDS_COMPILER == CDS_COMPILER_MSVC
+# pragma warning(push)
+ // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
+ // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector'
+# pragma warning(disable: 4251)
+#endif
+
+/*
+ Editions:
+ 2007.12.24 khizmax Add statistics and CDS_GATHER_HAZARDPTR_STAT macro
+ 2008.03.06 khizmax Refactoring: implementation of HazardPtrMgr is moved to hazardptr.cpp
+ 2008.03.08 khizmax Remove HazardPtrMgr singleton. Now you must initialize/destroy HazardPtrMgr calling
+ HazardPtrMgr::Construct / HazardPtrMgr::Destruct before use (usually in main() function).
+ 2008.12.06 khizmax Refactoring. Changes class name, namespace hierarchy, all helper defs have been moved to details namespace
+ 2010.01.27 khizmax Introducing memory order constraint
+*/
+
+namespace cds {
+ /**
+ @page cds_garbage_collectors_comparison GC comparison
+ @ingroup cds_garbage_collector
+
+ <table>
+ <tr>
+ <th>Feature</th>
+ <th>%cds::gc::HP</th>
+ <th>%cds::gc::PTB</th>
+ </tr>
+ <tr>
+ <td>Implementation quality</td>
+ <td>stable</td>
+ <td>stable</td>
+ </tr>
+ <tr>
+ <td>Performance rank (1 - slowest, 5 - fastest)</td>
+ <td>5</td>
+ <td>4</td>
+ </tr>
+ <tr>
+ <td>Max number of guarded (hazard) pointers per thread</td>
+ <td>limited (specifies in GC object ctor)</td>
+ <td>unlimited (dynamically allocated when needed)</td>
+ </tr>
+ <tr>
+ <td>Max number of retired pointers<sup>1</sup></td>
+ <td>bounded</td>
+ <td>bounded</td>
+ </tr>
+ <tr>
+ <td>Array of retired pointers</td>
+ <td>preallocated for each thread, limited in size</td>
+ <td>global for the entire process, unlimited (dynamically allocated when needed)</td>
+ </tr>
+ <tr>
+ <td>Support direct pointer to item of lock-free container (useful for iterators)</td>
+ <td>not supported</td>
+ <td>not supported</td>
+ </tr>
+ </table>
+
+ <sup>1</sup>Unbounded count of retired pointer means a possibility of memory exhaustion.
+ */
+
+ /// Different safe memory reclamation schemas (garbage collectors)
+ /** @ingroup cds_garbage_collector
+
+ This namespace specifies different safe memory reclamation (SMR) algorithms.
+ See \ref cds_garbage_collector "Garbage collectors"
+ */
+ namespace gc {
+
+ /// Michael's Hazard Pointers reclamation schema
+ /**
+ \par Sources:
+ - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
+ - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
+ - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
+
+
+ The cds::gc::hzp namespace and its members are internal representation of Hazard Pointer GC and should not be used directly.
+ Use cds::gc::HP class in your code.
+
+ Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
+ GC class and its nested classes. Before use any HP-related class you must initialize HP garbage collector
+ by contructing cds::gc::HP object in beginning of your main().
+ See cds::gc::HP class for explanation.
+ */
+ namespace hzp {
+
+ namespace details {
+ /// Hazard pointer record of the thread
+ /**
+ The structure of type "single writer - multiple reader": only the owner thread may write to this structure
+ other threads have read-only access.
+ */
+ struct HPRec {
+ HPAllocator<hazard_pointer> m_hzp ; ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependency
+ retired_vector m_arrRetired ; ///< Retired pointer array
+
+ /// Ctor
+ HPRec( const cds::gc::hzp::GarbageCollector& HzpMgr ) ; // inline
+ ~HPRec()
+ {}
+
+ /// Clears all hazard pointers
+ void clear()
+ {
+ m_hzp.clear();
+ }
+ };
+ } // namespace details
+
+ /// GarbageCollector::Scan phase strategy
+ /**
+ See GarbageCollector::Scan for explanation
+ */
+ enum scan_type {
+ classic, ///< classic scan as described in Michael's works (see GarbageCollector::classic_scan)
+ inplace ///< inplace scan without allocation (see GarbageCollector::inplace_scan)
+ };
+
+ /// Hazard Pointer singleton
+ /**
+ Safe memory reclamation schema by Michael "Hazard Pointers"
+
+ \par Sources:
+ \li [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
+ \li [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
+ \li [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
+
+ */
+ class CDS_EXPORT_API GarbageCollector
+ {
+ public:
+ typedef cds::atomicity::event_counter event_counter ; ///< event counter type
+
+ /// Internal GC statistics
+ struct InternalState {
+ size_t nHPCount ; ///< HP count per thread (const)
+ size_t nMaxThreadCount ; ///< Max thread count (const)
+ size_t nMaxRetiredPtrCount ; ///< Max retired pointer count per thread (const)
+ size_t nHPRecSize ; ///< Size of HP record, bytes (const)
+
+ size_t nHPRecAllocated ; ///< Count of HP record allocations
+ size_t nHPRecUsed ; ///< Count of HP record used
+ size_t nTotalRetiredPtrCount ; ///< Current total count of retired pointers
+ size_t nRetiredPtrInFreeHPRecs ; ///< Count of retired pointer in free (unused) HP records
+
+ event_counter::value_type evcAllocHPRec ; ///< Count of HPRec allocations
+ event_counter::value_type evcRetireHPRec ; ///< Count of HPRec retire events
+ event_counter::value_type evcAllocNewHPRec; ///< Count of new HPRec allocations from heap
+ event_counter::value_type evcDeleteHPRec ; ///< Count of HPRec deletions
+
+ event_counter::value_type evcScanCall ; ///< Count of Scan calling
+ event_counter::value_type evcHelpScanCall ; ///< Count of HelpScan calling
+ event_counter::value_type evcScanFromHelpScan;///< Count of Scan calls from HelpScan
+
+ event_counter::value_type evcDeletedNode ; ///< Count of deleting of retired objects
+ event_counter::value_type evcDeferredNode ; ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it
+ };
+
+ /// No GarbageCollector object is created
+ CDS_DECLARE_EXCEPTION( HZPManagerEmpty, "Global Hazard Pointer GarbageCollector is NULL" );
+
+ /// Not enough required Hazard Pointer count
+ CDS_DECLARE_EXCEPTION( HZPTooMany, "Not enough required Hazard Pointer count" );
+
+ private:
+ /// Internal GC statistics
+ struct Statistics {
+ event_counter m_AllocHPRec ; ///< Count of HPRec allocations
+ event_counter m_RetireHPRec ; ///< Count of HPRec retire events
+ event_counter m_AllocNewHPRec ; ///< Count of new HPRec allocations from heap
+ event_counter m_DeleteHPRec ; ///< Count of HPRec deletions
+
+ event_counter m_ScanCallCount ; ///< Count of Scan calling
+ event_counter m_HelpScanCallCount ; ///< Count of HelpScan calling
+ event_counter m_CallScanFromHelpScan ; ///< Count of Scan calls from HelpScan
+
+ event_counter m_DeletedNode ; ///< Count of retired objects deleting
+ event_counter m_DeferredNode ; ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it
+ };
+
+ /// Internal list of cds::gc::hzp::details::HPRec
+ struct hplist_node: public details::HPRec
+ {
+ hplist_node * m_pNextNode ; ///< next hazard ptr record in list
+ atomics::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
+ atomics::atomic<bool> m_bFree ; ///< true if record if free (not owned)
+
+ //@cond
+ hplist_node( const GarbageCollector& HzpMgr )
+ : HPRec( HzpMgr ),
+ m_pNextNode( nullptr ),
+ m_idOwner( OS::c_NullThreadId ),
+ m_bFree( true )
+ {}
+
+ ~hplist_node()
+ {
+ assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
+ assert( m_bFree.load(atomics::memory_order_relaxed) );
+ }
+ //@endcond
+ };
+
+ atomics::atomic<hplist_node *> m_pListHead ; ///< Head of GC list
+
+ static GarbageCollector * m_pHZPManager ; ///< GC instance pointer
+
+ Statistics m_Stat ; ///< Internal statistics
+ bool m_bStatEnabled ; ///< true - statistics enabled
+
+ const size_t m_nHazardPointerCount ; ///< max count of thread's hazard pointer
+ const size_t m_nMaxThreadCount ; ///< max count of thread
+ const size_t m_nMaxRetiredPtrCount ; ///< max count of retired ptr per thread
+ scan_type m_nScanType ; ///< scan type (see \ref scan_type enum)
+
+
+ private:
+ /// Ctor
+ GarbageCollector(
+ size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
+ size_t nMaxThreadCount = 0, ///< Max count of thread
+ size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects
+ scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
+ );
+
+ /// Dtor
+ ~GarbageCollector();
+
+ /// Allocate new HP record
+ hplist_node * NewHPRec();
+
+ /// Permanently deletes HPrecord \p pNode
+ /**
+ Caveat: for performance reason this function is defined as inline and cannot be called directly
+ */
+ void DeleteHPRec( hplist_node * pNode );
+
+ /// Permanently deletes retired pointer \p p
+ /**
+ Caveat: for performance reason this function is defined as inline and cannot be called directly
+ */
+ void DeletePtr( details::retired_ptr& p );
+
+ //@cond
+ void detachAllThread();
+ //@endcond
+
+ public:
+ /// Creates GarbageCollector singleton
+ /**
+ GC is the singleton. If GC instance is not exist then the function creates the instance.
+ Otherwise it does nothing.
+
+ The Michael's HP reclamation schema depends of three parameters:
+
+ \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
+ the data structure algorithms. By default, if \p nHazardPtrCount = 0,
+ the function uses maximum of HP count for CDS library.
+
+ \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
+
+ \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
+ \p nHazardPtrCount * \p nMaxThreadCount.
+ Default is 2 * \p nHazardPtrCount * \p nMaxThreadCount.
+ */
+ static void CDS_STDCALL Construct(
+ size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
+ size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
+ size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
+ scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
+ );
+
+ /// Destroys global instance of GarbageCollector
+ /**
+ The parameter \p bDetachAll should be used carefully: if its value is \p true,
+ then the destroying GC automatically detaches all attached threads. This feature
+ can be useful when you have no control over the thread termination, for example,
+ when \p libcds is injected into existing external thread.
+ */
+ static void CDS_STDCALL Destruct(
+ bool bDetachAll = false ///< Detach all threads
+ );
+
+ /// Returns pointer to GarbageCollector instance
+ static GarbageCollector& instance()
+ {
+ if ( !m_pHZPManager )
+ throw HZPManagerEmpty();
+ return *m_pHZPManager;
+ }
+
+ /// Checks if global GC object is constructed and may be used
+ static bool isUsed()
+ {
+ return m_pHZPManager != nullptr;
+ }
+
+ /// Returns max Hazard Pointer count defined in construction time
+ size_t getHazardPointerCount() const { return m_nHazardPointerCount; }
+
+ /// Returns max thread count defined in construction time
+ size_t getMaxThreadCount() const { return m_nMaxThreadCount; }
+
+ /// Returns max size of retired objects array. It is defined in construction time
+ size_t getMaxRetiredPtrCount() const { return m_nMaxRetiredPtrCount; }
+
+ // Internal statistics
+
+ /// Get internal statistics
+ InternalState& getInternalState(InternalState& stat) const;
+
+ /// Checks if internal statistics enabled
+ bool isStatisticsEnabled() const { return m_bStatEnabled; }
+
+ /// Enables/disables internal statistics
+ bool enableStatistics( bool bEnable )
+ {
+ bool bEnabled = m_bStatEnabled;
+ m_bStatEnabled = bEnable;
+ return bEnabled;
+ }
+
+ /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
+ /**
+ If \p nRequiredCount > getHazardPointerCount() then the exception HZPTooMany is thrown
+ */
+ static void checkHPCount( unsigned int nRequiredCount )
+ {
+ if ( instance().getHazardPointerCount() < nRequiredCount )
+ throw HZPTooMany();
+ }
+
+ /// Get current scan strategy
+ scan_type getScanType() const
+ {
+ return m_nScanType;
+ }
+
+ /// Set current scan strategy
+ /** @anchor hzp_gc_setScanType
+ Scan strategy changing is allowed on the fly.
+ */
+ void setScanType(
+ scan_type nScanType ///< new scan strategy
+ )
+ {
+ m_nScanType = nScanType;
+ }
+
+ public: // Internals for threads
+
+ /// Allocates Hazard Pointer GC record. For internal use only
+ details::HPRec * AllocateHPRec();
+
+ /// Free HP record. For internal use only
+ void RetireHPRec( details::HPRec * pRec );
+
+ /// The main garbage collecting function
+ /**
+ This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
+ is reached.
+
+ There are the following scan algorithm:
+ - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
+ - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
+
+ Use \ref hzp_gc_setScanType "setScanType" member function to setup appropriate scan algorithm.
+ */
+ void Scan( details::HPRec * pRec )
+ {
+ switch ( m_nScanType ) {
+ case inplace:
+ inplace_scan( pRec );
+ break;
+ default:
+ assert(false) ; // Forgotten something?..
+ case classic:
+ classic_scan( pRec );
+ break;
+ }
+ }
+
+ /// Helper scan routine
+ /**
+ The function guarantees that every node that is eligible for reuse is eventually freed, barring
+ thread failures. To do so, after executing Scan, a thread executes a HelpScan,
+ where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
+ to thread's list of reclaimed pointers.
+
+ The function is called internally by Scan.
+ */
+ void HelpScan( details::HPRec * pThis );
+
+ protected:
+ /// Classic scan algorithm
+ /** @anchor hzp_gc_classic_scan
+ Classical scan algorithm as described in Michael's paper.
+
+ A scan includes four stages. The first stage involves scanning the array HP for non-null values.
+ Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
+ Only stage 1 accesses shared variables. The following stages operate only on private variables.
+
+ The second stage of a scan involves sorting local list of protected pointers to allow
+ binary search in the third stage.
+
+ The third stage of a scan involves checking each reclaimed node
+ against the pointers in local list of protected pointers. If the binary search yields
+ no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
+ of reclaimed pointers.
+
+ The forth stage prepares new thread's private list of reclaimed pointers
+ that could not be freed during the current scan, where they remain until the next scan.
+
+ This algorithm allocates memory for internal HP array.
+
+ This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
+ is reached.
+ */
+ void classic_scan( details::HPRec * pRec );
+
+ /// In-place scan algorithm
+ /** @anchor hzp_gc_inplace_scan
+ Unlike the \ref hzp_gc_classic_scan "classic_scan" algorithm, \p inplace_scan does not allocate any memory.
+ All operations are performed in-place.
+ */
+ void inplace_scan( details::HPRec * pRec );
+ };
+
+ /// Thread's hazard pointer manager
+ /**
+ To use Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
+ that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
+ on the start of each thread that uses HP GC. Before terminating the thread linked to HP GC it is necessary to call
+ \ref cds_threading "cds::threading::Manager::detachThread()".
+ */
+ class ThreadGC
+ {
+ GarbageCollector& m_HzpManager ; ///< Hazard Pointer GC singleton
+ details::HPRec * m_pHzpRec ; ///< Pointer to thread's HZP record
+
+ public:
+ /// Default constructor
+ ThreadGC()
+ : m_HzpManager( GarbageCollector::instance() ),
+ m_pHzpRec( nullptr )
+ {}
+
+ /// The object is not copy-constructible
+ ThreadGC( ThreadGC const& ) = delete;
+
+ ~ThreadGC()
+ {
+ fini();
+ }
+
+ /// Checks if thread GC is initialized
+ bool isInitialized() const { return m_pHzpRec != nullptr; }
+
+ /// Initialization. Repeat call is available
+ void init()
+ {
+ if ( !m_pHzpRec )
+ m_pHzpRec = m_HzpManager.AllocateHPRec();
+ }
+
+ /// Finalization. Repeat call is available
+ void fini()
+ {
+ if ( m_pHzpRec ) {
+ details::HPRec * pRec = m_pHzpRec;
+ m_pHzpRec = nullptr;
+ m_HzpManager.RetireHPRec( pRec );
+ }
+ }
+
+ /// Initializes HP guard \p guard
+ details::HPGuard& allocGuard()
+ {
+ assert( m_pHzpRec );
+ return m_pHzpRec->m_hzp.alloc();
+ }
+
+ /// Frees HP guard \p guard
+ void freeGuard( details::HPGuard& guard )
+ {
+ assert( m_pHzpRec );
+ m_pHzpRec->m_hzp.free( guard );
+ }
+
+ /// Initializes HP guard array \p arr
+ template <size_t Count>
+ void allocGuard( details::HPArray<Count>& arr )
+ {
+ assert( m_pHzpRec );
+ m_pHzpRec->m_hzp.alloc( arr );
+ }
+
+ /// Frees HP guard array \p arr
+ template <size_t Count>
+ void freeGuard( details::HPArray<Count>& arr )
+ {
+ assert( m_pHzpRec );
+ m_pHzpRec->m_hzp.free( arr );
+ }
+
+ /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
+ template <typename T>
+ void retirePtr( T * p, void (* pFunc)(T *) )
+ {
+ retirePtr( details::retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
+ }
+
+ /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
+ void retirePtr( const details::retired_ptr& p )
+ {
+ m_pHzpRec->m_arrRetired.push( p );
+
+ if ( m_pHzpRec->m_arrRetired.isFull() ) {
+ // Max of retired pointer count is reached. Do scan
+ scan();
+ }
+ }
+
+ //@cond
+ void scan()
+ {
+ m_HzpManager.Scan( m_pHzpRec );
+ m_HzpManager.HelpScan( m_pHzpRec );
+ }
+ //@endcond
+ };
+
+ /// Auto HPGuard.
+ /**
+ This class encapsulates Hazard Pointer guard to protect a pointer against deletion .
+ It allocates one HP from thread's HP array in constructor and free the HP allocated in destruction time.
+ */
+ class AutoHPGuard
+ {
+ //@cond
+ details::HPGuard& m_hp ; ///< Hazard pointer guarded
+ ThreadGC& m_gc ; ///< Thread GC
+ //@endcond
+
+ public:
+ typedef details::HPGuard::hazard_ptr hazard_ptr ; ///< Hazard pointer type
+ public:
+ /// Allocates HP guard from \p gc
+ AutoHPGuard( ThreadGC& gc )
+ : m_hp( gc.allocGuard() )
+ , m_gc( gc )
+ {}
+
+ /// Allocates HP guard from \p gc and protects the pointer \p p of type \p T
+ template <typename T>
+ AutoHPGuard( ThreadGC& gc, T * p )
+ : m_hp( gc.allocGuard() )
+ , m_gc( gc )
+ {
+ m_hp = p;
+ }
+
+ /// Frees HP guard. The pointer guarded may be deleted after this.
+ ~AutoHPGuard()
+ {
+ m_gc.freeGuard( m_hp );
+ }
+
+ /// Returns thread GC
+ ThreadGC& getGC() const
+ {
+ return m_gc;
+ }
+
+ /// Protects the pointer \p p against reclamation (guards the pointer).
+ template <typename T>
+ T * operator =( T * p )
+ {
+ return m_hp = p;
+ }
+
+ //@cond
+ std::nullptr_t operator =(std::nullptr_t)
+ {
+ return m_hp = nullptr;
+ }
+
+ hazard_ptr get() const
+ {
+ return m_hp;
+ }
+ //@endcond
+ };
+
+ /// Auto-managed array of hazard pointers
+ /**
+ This class is wrapper around cds::gc::hzp::details::HPArray class.
+ \p Count is the size of HP array
+ */
+ template <size_t Count>
+ class AutoHPArray: public details::HPArray<Count>
+ {
+ ThreadGC& m_mgr ; ///< Thread GC
+
+ public:
+ /// Rebind array for other size \p COUNT2
+ template <size_t Count2>
+ struct rebind {
+ typedef AutoHPArray<Count2> other ; ///< rebinding result
+ };
+
+ public:
+ /// Allocates array of HP guard from \p mgr
+ AutoHPArray( ThreadGC& mgr )
+ : m_mgr( mgr )
+ {
+ mgr.allocGuard( *this );
+ }
+
+ /// Frees array of HP guard
+ ~AutoHPArray()
+ {
+ m_mgr.freeGuard( *this );
+ }
+
+ /// Returns thread GC
+ ThreadGC& getGC() const { return m_mgr; }
+ };
+
+ } // namespace hzp
+}} // namespace cds::gc
+
+// Inlines
+#include <cds/gc/hp/details/hp_inline.h>
+
+#if CDS_COMPILER == CDS_COMPILER_MSVC
+# pragma warning(pop)
+#endif
+
+#endif // #ifndef __CDS_GC_HZP_HZP_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDS_GC_HP_DECL_H
+#define __CDS_GC_HP_DECL_H
+
+#include <stdexcept> // overflow_error
+#include <cds/gc/hp/hp.h>
+#include <cds/details/marked_ptr.h>
+
+namespace cds { namespace gc {
+ /// @defgroup cds_garbage_collector Garbage collectors
+
+ /// Hazard Pointer garbage collector
+ /** @ingroup cds_garbage_collector
+ @headerfile cds/gc/hp.h
+
+ This class realizes a wrapper for Hazard Pointer garbage collector internal implementation.
+
+ Sources:
+ - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
+ - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
+ - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
+
+ See \ref cds_how_to_use "How to use" section for details of garbage collector applying.
+ */
+ class HP
+ {
+ public:
+ /// Native guarded pointer type
+ typedef gc::hzp::hazard_pointer guarded_pointer;
+
+ /// Atomic reference
+ /**
+ @headerfile cds/gc/hp.h
+ */
+ template <typename T> using atomic_ref = atomics::atomic<T *>;
+
+ /// Atomic marked pointer
+ /**
+ @headerfile cds/gc/hp.h
+ */
+ template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
+
+ /// Atomic type
+ /**
+ @headerfile cds/gc/hp.h
+ */
+ template <typename T> using atomic_type = atomics::atomic<T>;
+
+ /// Thread GC implementation for internal usage
+ typedef hzp::ThreadGC thread_gc_impl;
+
+ /// Wrapper for hzp::ThreadGC class
+ /**
+ @headerfile cds/gc/hp.h
+ This class performs automatically attaching/detaching Hazard Pointer GC
+ for the current thread.
+ */
+ class thread_gc: public thread_gc_impl
+ {
+ //@cond
+ bool m_bPersistent;
+ //@endcond
+ public:
+
+ /// Constructor
+ /**
+ The constructor attaches the current thread to the Hazard Pointer GC
+ if it is not yet attached.
+ The \p bPersistent parameter specifies attachment persistence:
+ - \p true - the class destructor will not detach the thread from Hazard Pointer GC.
+ - \p false (default) - the class destructor will detach the thread from Hazard Pointer GC.
+ */
+ thread_gc(
+ bool bPersistent = false
+ ) ; //inline in hp_impl.h
+
+ /// Destructor
+ /**
+ If the object has been created in persistent mode, the destructor does nothing.
+ Otherwise it detaches the current thread from Hazard Pointer GC.
+ */
+ ~thread_gc() ; // inline in hp_impl.h
+ };
+
+ /// Hazard Pointer guard
+ /**
+ @headerfile cds/gc/hp.h
+ This class is a wrapper for hzp::AutoHPGuard.
+ */
+ class Guard: public hzp::AutoHPGuard
+ {
+ //@cond
+ typedef hzp::AutoHPGuard base_class;
+ //@endcond
+
+ public:
+ //@cond
+ Guard() ; // inline in hp_impl.h
+ //@endcond
+
+ /// Protects a pointer of type \p atomic<T*>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store it
+ to the HP slot repeatedly until the guard's value equals \p toGuard
+ */
+ template <typename T>
+ T protect( atomics::atomic<T> const& toGuard )
+ {
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pRet;
+ do {
+ pRet = assign( pCur );
+ pCur = toGuard.load(atomics::memory_order_acquire);
+ } while ( pRet != pCur );
+ return pCur;
+ }
+
+ /// Protects a converted pointer of type \p atomic<T*>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store result of \p f functor
+ to the HP slot repeatedly until the guard's value equals \p toGuard.
+
+ The function is useful for intrusive containers when \p toGuard is a node pointer
+ that should be converted to a pointer to the value type before protecting.
+ The parameter \p f of type Func is a functor that makes this conversion:
+ \code
+ struct functor {
+ value_type * operator()( T * p );
+ };
+ \endcode
+ Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
+ */
+ template <typename T, class Func>
+ T protect( atomics::atomic<T> const& toGuard, Func f )
+ {
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
+ T pRet;
+ do {
+ pRet = pCur;
+ assign( f( pCur ) );
+ pCur = toGuard.load(atomics::memory_order_acquire);
+ } while ( pRet != pCur );
+ return pCur;
+ }
+
+ /// Store \p p to the guard
+ /**
+ The function equals to a simple assignment the value \p p to guard, no loop is performed.
+ Can be used for a pointer that cannot be changed concurrently
+ */
+ template <typename T>
+ T * assign( T * p )
+ {
+ return base_class::operator =(p);
+ }
+
+ //@cond
+ std::nullptr_t assign( std::nullptr_t )
+ {
+ return base_class::operator =(nullptr);
+ }
+ //@endcond
+
+ /// Copy from \p src guard to \p this guard
+ void copy( Guard const& src )
+ {
+ assign( src.get_native() );
+ }
+
+ /// Store marked pointer \p p to the guard
+ /**
+ The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
+ Can be used for a marked pointer that cannot be changed concurrently.
+ */
+ template <typename T, int BITMASK>
+ T * assign( cds::details::marked_ptr<T, BITMASK> p )
+ {
+ return base_class::operator =( p.ptr() );
+ }
+
+ /// Clear value of the guard
+ void clear()
+ {
+ assign( nullptr );
+ }
+
+ /// Get the value currently protected
+ template <typename T>
+ T * get() const
+ {
+ return reinterpret_cast<T *>( get_native() );
+ }
+
+ /// Get native hazard pointer stored
+ guarded_pointer get_native() const
+ {
+ return base_class::get();
+ }
+ };
+
+ /// Array of Hazard Pointer guards
+ /**
+ @headerfile cds/gc/hp.h
+ This class is a wrapper for hzp::AutoHPArray template.
+ Template parameter \p Count defines the size of HP array.
+ */
+ template <size_t Count>
+ class GuardArray: public hzp::AutoHPArray<Count>
+ {
+ //@cond
+ typedef hzp::AutoHPArray<Count> base_class;
+ //@endcond
+ public:
+ /// Rebind array for other size \p Count2
+ template <size_t Count2>
+ struct rebind {
+ typedef GuardArray<Count2> other ; ///< rebinding result
+ };
+
+ public:
+ //@cond
+ GuardArray() ; // inline in hp_impl.h
+ //@endcond
+ /// Protects a pointer of type \p atomic<T*>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store it
+ to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+ */
+ template <typename T>
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
+ {
+ T pRet;
+ do {
+ pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
+
+ return pRet;
+ }
+
+ /// Protects a pointer of type \p atomic<T*>
+ /**
+ Return the value of \p toGuard
+
+ The function tries to load \p toGuard and to store it
+ to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
+
+ The function is useful for intrusive containers when \p toGuard is a node pointer
+ that should be converted to a pointer to the value type before guarding.
+ The parameter \p f of type Func is a functor that makes this conversion:
+ \code
+ struct functor {
+ value_type * operator()( T * p );
+ };
+ \endcode
+ Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
+ */
+ template <typename T, class Func>
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
+ {
+ T pRet;
+ do {
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
+
+ return pRet;
+ }
+
+ /// Store \p to the slot \p nIndex
+ /**
+ The function equals to a simple assignment, no loop is performed.
+ */
+ template <typename T>
+ T * assign( size_t nIndex, T * p )
+ {
+ base_class::set(nIndex, p);
+ return p;
+ }
+
+ /// Store marked pointer \p p to the guard
+ /**
+ The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
+ Can be used for a marked pointer that cannot be changed concurrently.
+ */
+ template <typename T, int BITMASK>
+ T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
+ {
+ return assign( nIndex, p.ptr() );
+ }
+
+ /// Copy guarded value from \p src guard to slot at index \p nIndex
+ void copy( size_t nIndex, Guard const& src )
+ {
+ assign( nIndex, src.get_native() );
+ }
+
+ /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
+ void copy( size_t nDestIndex, size_t nSrcIndex )
+ {
+ assign( nDestIndex, get_native( nSrcIndex ));
+ }
+
+ /// Clear value of the slot \p nIndex
+ void clear( size_t nIndex)
+ {
+ base_class::clear( nIndex );
+ }
+
+ /// Get current value of slot \p nIndex
+ template <typename T>
+ T * get( size_t nIndex) const
+ {
+ return reinterpret_cast<T *>( get_native( nIndex ) );
+ }
+
+ /// Get native hazard pointer stored
+ guarded_pointer get_native( size_t nIndex ) const
+ {
+ return base_class::operator[](nIndex).get();
+ }
+
+ /// Capacity of the guard array
+ static CDS_CONSTEXPR size_t capacity()
+ {
+ return Count;
+ }
+ };
+
+ public:
+ /// Initializes hzp::GarbageCollector singleton
+ /**
+ The constructor initializes GC singleton with passed parameters.
+ If GC instance is not exist then the function creates the instance.
+ Otherwise it does nothing.
+
+ The Michael's HP reclamation schema depends of three parameters:
+ - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
+ the data structure algorithms. By default, if \p nHazardPtrCount = 0, the function
+ uses maximum of the hazard pointer count for CDS library.
+ - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
+ - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
+ <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
+ */
+ HP(
+ size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
+ size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
+ size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
+ hzp::scan_type nScanType = hzp::inplace ///< Scan type (see \ref hzp::scan_type enum)
+ )
+ {
+ hzp::GarbageCollector::Construct(
+ nHazardPtrCount,
+ nMaxThreadCount,
+ nMaxRetiredPtrCount,
+ nScanType
+ );
+ }
+
+ /// Terminates GC singleton
+ /**
+ The destructor calls \code hzp::GarbageCollector::Destruct( true ) \endcode
+ */
+ ~HP()
+ {
+ hzp::GarbageCollector::Destruct( true );
+ }
+
+ /// Checks if count of hazard pointer is no less than \p nCountNeeded
+ /**
+ If \p bRaiseException is \p true (that is the default), the function raises
+ an \p std::overflow_error exception "Too few hazard pointers"
+ if \p nCountNeeded is more than the count of hazard pointer per thread.
+ */
+ static bool check_available_guards( size_t nCountNeeded, bool bRaiseException = true )
+ {
+ if ( hzp::GarbageCollector::instance().getHazardPointerCount() < nCountNeeded ) {
+ if ( bRaiseException )
+ throw std::overflow_error( "Too few hazard pointers" );
+ return false;
+ }
+ return true;
+ }
+
+ /// Returns max Hazard Pointer count
+ size_t max_hazard_count() const
+ {
+ return hzp::GarbageCollector::instance().getHazardPointerCount();
+ }
+
+ /// Returns max count of thread
+ size_t max_thread_count() const
+ {
+ return hzp::GarbageCollector::instance().getMaxThreadCount();
+ }
+
+ /// Returns capacity of retired pointer array
+ size_t retired_array_capacity() const
+ {
+ return hzp::GarbageCollector::instance().getMaxRetiredPtrCount();
+ }
+
+ /// Retire pointer \p p with function \p pFunc
+ /**
+ The function places pointer \p p to array of pointers ready for removing.
+ (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
+ Deleting the pointer is the function \p pFunc call.
+ */
+ template <typename T>
+ static void retire( T * p, void (* pFunc)(T *) ) ; // inline in hp_impl.h
+
+ /// Retire pointer \p p with functor of type \p Disposer
+ /**
+ The function places pointer \p p to array of pointers ready for removing.
+ (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
+
+ Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
+ \code
+ template <typename T>
+ struct disposer {
+ void operator()( T * p ) ; // disposing operator
+ };
+ \endcode
+ Since the functor call can happen at any time after \p retire call, additional restrictions are imposed to \p Disposer type:
+ - it should be stateless functor
+ - it should be default-constructible
+ - the result of functor call with argument \p p should not depend on where the functor will be called.
+
+ \par Examples:
+ Operator \p delete functor:
+ \code
+ template <typename T>
+ struct disposer {
+ void operator ()( T * p ) {
+ delete p;
+ }
+ };
+
+ // How to call GC::retire method
+ int * p = new int;
+
+ // ... use p in lock-free manner
+
+ cds::gc::HP::retire<disposer>( p ) ; // place p to retired pointer array of HP GC
+ \endcode
+
+ Functor based on \p std::allocator :
+ \code
+ template <typename ALLOC = std::allocator<int> >
+ struct disposer {
+ template <typename T>
+ void operator()( T * p ) {
+ typedef typename ALLOC::templare rebind<T>::other alloc_t;
+ alloc_t a;
+ a.destroy( p );
+ a.deallocate( p, 1 );
+ }
+ };
+ \endcode
+ */
+ template <class Disposer, typename T>
+ static void retire( T * p ) ; // inline in hp_impl.h
+
+ /// Get current scan strategy
+ hzp::scan_type getScanType() const
+ {
+ return hzp::GarbageCollector::instance().getScanType();
+ }
+
+ /// Set current scan strategy
+ void setScanType(
+ hzp::scan_type nScanType ///< new scan strategy
+ )
+ {
+ hzp::GarbageCollector::instance().setScanType( nScanType );
+ }
+
+ /// Checks if Hazard Pointer GC is constructed and may be used
+ static bool isUsed()
+ {
+ return hzp::GarbageCollector::isUsed();
+ }
+
+
+ /// Forced GC cycle call for current thread
+ /**
+ Usually, this function should not be called directly.
+ */
+ static void scan() ; // inline in hp_impl.h
+
+ /// Synonym for \ref scan()
+ static void force_dispose()
+ {
+ scan();
+ }
+ };
+}} // namespace cds::gc
+
+#endif // #ifndef __CDS_GC_HP_DECL_H
--- /dev/null
+//$$CDS-header$$
+
+#ifndef __CDS_GC_HP_IMPL_H
+#define __CDS_GC_HP_IMPL_H
+
+#include <cds/threading/model.h>
+#include <cds/details/static_functor.h>
+
+//@cond
+namespace cds { namespace gc {
+
+ inline HP::thread_gc::thread_gc(
+ bool bPersistent
+ )
+ : m_bPersistent( bPersistent )
+ {
+ if ( !threading::Manager::isThreadAttached() )
+ threading::Manager::attachThread();
+ }
+
+ inline HP::thread_gc::~thread_gc()
+ {
+ if ( !m_bPersistent )
+ cds::threading::Manager::detachThread();
+ }
+
+ inline HP::Guard::Guard()
+ : Guard::base_class( cds::threading::getGC<HP>() )
+ {}
+
+ template <size_t COUNT>
+ inline HP::GuardArray<COUNT>::GuardArray()
+ : GuardArray::base_class( cds::threading::getGC<HP>() )
+ {}
+
+ template <typename T>
+ inline void HP::retire( T * p, void (* pFunc)(T *) )
+ {
+ cds::threading::getGC<HP>().retirePtr( p, pFunc );
+ }
+
+ template <class Disposer, typename T>
+ inline void HP::retire( T * p )
+ {
+ cds::threading::getGC<HP>().retirePtr( p, cds::details::static_functor<Disposer, T>::call );
+ }
+
+ inline void HP::scan()
+ {
+ cds::threading::getGC<HP>().scan();
+ }
+
+
+}} // namespace cds::gc
+//@endcond
+
+#endif // #ifndef __CDS_GC_HP_IMPL_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_HZP_HZP_H
-#define __CDS_GC_HZP_HZP_H
-
-#include <vector>
-#include <cds/cxx11_atomic.h>
-#include <cds/os/thread.h>
-#include <cds/gc/hp/details/hp_fwd.h>
-#include <cds/gc/hp/details/hp_alloc.h>
-#include <cds/gc/hp/details/hp_retired.h>
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-# pragma warning(push)
- // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
- // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector'
-# pragma warning(disable: 4251)
-#endif
-
-/*
- Editions:
- 2007.12.24 khizmax Add statistics and CDS_GATHER_HAZARDPTR_STAT macro
- 2008.03.06 khizmax Refactoring: implementation of HazardPtrMgr is moved to hazardptr.cpp
- 2008.03.08 khizmax Remove HazardPtrMgr singleton. Now you must initialize/destroy HazardPtrMgr calling
- HazardPtrMgr::Construct / HazardPtrMgr::Destruct before use (usually in main() function).
- 2008.12.06 khizmax Refactoring. Changes class name, namespace hierarchy, all helper defs have been moved to details namespace
- 2010.01.27 khizmax Introducing memory order constraint
-*/
-
-namespace cds {
- /**
- @page cds_garbage_collectors_comparison GC comparison
- @ingroup cds_garbage_collector
-
- <table>
- <tr>
- <th>Feature</th>
- <th>%cds::gc::HP</th>
- <th>%cds::gc::PTB</th>
- </tr>
- <tr>
- <td>Implementation quality</td>
- <td>stable</td>
- <td>stable</td>
- </tr>
- <tr>
- <td>Performance rank (1 - slowest, 5 - fastest)</td>
- <td>5</td>
- <td>4</td>
- </tr>
- <tr>
- <td>Max number of guarded (hazard) pointers per thread</td>
- <td>limited (specifies in GC object ctor)</td>
- <td>unlimited (dynamically allocated when needed)</td>
- </tr>
- <tr>
- <td>Max number of retired pointers<sup>1</sup></td>
- <td>bounded</td>
- <td>bounded</td>
- </tr>
- <tr>
- <td>Array of retired pointers</td>
- <td>preallocated for each thread, limited in size</td>
- <td>global for the entire process, unlimited (dynamically allocated when needed)</td>
- </tr>
- <tr>
- <td>Support direct pointer to item of lock-free container (useful for iterators)</td>
- <td>not supported</td>
- <td>not supported</td>
- </tr>
- </table>
-
- <sup>1</sup>Unbounded count of retired pointer means a possibility of memory exhaustion.
- */
-
- /// Different safe memory reclamation schemas (garbage collectors)
- /** @ingroup cds_garbage_collector
-
- This namespace specifies different safe memory reclamation (SMR) algorithms.
- See \ref cds_garbage_collector "Garbage collectors"
- */
- namespace gc {
-
- /// Michael's Hazard Pointers reclamation schema
- /**
- \par Sources:
- - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
- - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
- - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
-
- The cds::gc::hzp namespace and its members are internal representation of Hazard Pointer GC and should not be used directly.
- Use cds::gc::HP class in your code.
-
- Hazard Pointer garbage collector is a singleton. The main user-level part of Hazard Pointer schema is
- GC class and its nested classes. Before use any HP-related class you must initialize HP garbage collector
- by contructing cds::gc::HP object in beginning of your main().
- See cds::gc::HP class for explanation.
- */
- namespace hzp {
-
- namespace details {
- /// Hazard pointer record of the thread
- /**
- The structure of type "single writer - multiple reader": only the owner thread may write to this structure
- other threads have read-only access.
- */
- struct HPRec {
- HPAllocator<hazard_pointer> m_hzp ; ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependency
- retired_vector m_arrRetired ; ///< Retired pointer array
-
- /// Ctor
- HPRec( const cds::gc::hzp::GarbageCollector& HzpMgr ) ; // inline
- ~HPRec()
- {}
-
- /// Clears all hazard pointers
- void clear()
- {
- m_hzp.clear();
- }
- };
- } // namespace details
-
- /// GarbageCollector::Scan phase strategy
- /**
- See GarbageCollector::Scan for explanation
- */
- enum scan_type {
- classic, ///< classic scan as described in Michael's works (see GarbageCollector::classic_scan)
- inplace ///< inplace scan without allocation (see GarbageCollector::inplace_scan)
- };
-
- /// Hazard Pointer singleton
- /**
- Safe memory reclamation schema by Michael "Hazard Pointers"
-
- \par Sources:
- \li [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
- \li [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
- \li [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
- */
- class CDS_EXPORT_API GarbageCollector
- {
- public:
- typedef cds::atomicity::event_counter event_counter ; ///< event counter type
-
- /// Internal GC statistics
- struct InternalState {
- size_t nHPCount ; ///< HP count per thread (const)
- size_t nMaxThreadCount ; ///< Max thread count (const)
- size_t nMaxRetiredPtrCount ; ///< Max retired pointer count per thread (const)
- size_t nHPRecSize ; ///< Size of HP record, bytes (const)
-
- size_t nHPRecAllocated ; ///< Count of HP record allocations
- size_t nHPRecUsed ; ///< Count of HP record used
- size_t nTotalRetiredPtrCount ; ///< Current total count of retired pointers
- size_t nRetiredPtrInFreeHPRecs ; ///< Count of retired pointer in free (unused) HP records
-
- event_counter::value_type evcAllocHPRec ; ///< Count of HPRec allocations
- event_counter::value_type evcRetireHPRec ; ///< Count of HPRec retire events
- event_counter::value_type evcAllocNewHPRec; ///< Count of new HPRec allocations from heap
- event_counter::value_type evcDeleteHPRec ; ///< Count of HPRec deletions
-
- event_counter::value_type evcScanCall ; ///< Count of Scan calling
- event_counter::value_type evcHelpScanCall ; ///< Count of HelpScan calling
- event_counter::value_type evcScanFromHelpScan;///< Count of Scan calls from HelpScan
-
- event_counter::value_type evcDeletedNode ; ///< Count of deleting of retired objects
- event_counter::value_type evcDeferredNode ; ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it
- };
-
- /// No GarbageCollector object is created
- CDS_DECLARE_EXCEPTION( HZPManagerEmpty, "Global Hazard Pointer GarbageCollector is NULL" );
-
- /// Not enough required Hazard Pointer count
- CDS_DECLARE_EXCEPTION( HZPTooMany, "Not enough required Hazard Pointer count" );
-
- private:
- /// Internal GC statistics
- struct Statistics {
- event_counter m_AllocHPRec ; ///< Count of HPRec allocations
- event_counter m_RetireHPRec ; ///< Count of HPRec retire events
- event_counter m_AllocNewHPRec ; ///< Count of new HPRec allocations from heap
- event_counter m_DeleteHPRec ; ///< Count of HPRec deletions
-
- event_counter m_ScanCallCount ; ///< Count of Scan calling
- event_counter m_HelpScanCallCount ; ///< Count of HelpScan calling
- event_counter m_CallScanFromHelpScan ; ///< Count of Scan calls from HelpScan
-
- event_counter m_DeletedNode ; ///< Count of retired objects deleting
- event_counter m_DeferredNode ; ///< Count of objects that cannot be deleted in Scan phase because of a hazard_pointer guards it
- };
-
- /// Internal list of cds::gc::hzp::details::HPRec
- struct hplist_node: public details::HPRec
- {
- hplist_node * m_pNextNode ; ///< next hazard ptr record in list
- atomics::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
- atomics::atomic<bool> m_bFree ; ///< true if record if free (not owned)
-
- //@cond
- hplist_node( const GarbageCollector& HzpMgr )
- : HPRec( HzpMgr ),
- m_pNextNode( nullptr ),
- m_idOwner( OS::c_NullThreadId ),
- m_bFree( true )
- {}
-
- ~hplist_node()
- {
- assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
- assert( m_bFree.load(atomics::memory_order_relaxed) );
- }
- //@endcond
- };
-
- atomics::atomic<hplist_node *> m_pListHead ; ///< Head of GC list
-
- static GarbageCollector * m_pHZPManager ; ///< GC instance pointer
-
- Statistics m_Stat ; ///< Internal statistics
- bool m_bStatEnabled ; ///< true - statistics enabled
-
- const size_t m_nHazardPointerCount ; ///< max count of thread's hazard pointer
- const size_t m_nMaxThreadCount ; ///< max count of thread
- const size_t m_nMaxRetiredPtrCount ; ///< max count of retired ptr per thread
- scan_type m_nScanType ; ///< scan type (see \ref scan_type enum)
-
-
- private:
- /// Ctor
- GarbageCollector(
- size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
- size_t nMaxThreadCount = 0, ///< Max count of thread
- size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects
- scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
- );
-
- /// Dtor
- ~GarbageCollector();
-
- /// Allocate new HP record
- hplist_node * NewHPRec();
-
- /// Permanently deletes HPrecord \p pNode
- /**
- Caveat: for performance reason this function is defined as inline and cannot be called directly
- */
- void DeleteHPRec( hplist_node * pNode );
-
- /// Permanently deletes retired pointer \p p
- /**
- Caveat: for performance reason this function is defined as inline and cannot be called directly
- */
- void DeletePtr( details::retired_ptr& p );
-
- //@cond
- void detachAllThread();
- //@endcond
-
- public:
- /// Creates GarbageCollector singleton
- /**
- GC is the singleton. If GC instance is not exist then the function creates the instance.
- Otherwise it does nothing.
-
- The Michael's HP reclamation schema depends of three parameters:
-
- \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from
- the data structure algorithms. By default, if \p nHazardPtrCount = 0,
- the function uses maximum of HP count for CDS library.
-
- \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100.
-
- \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
- \p nHazardPtrCount * \p nMaxThreadCount.
- Default is 2 * \p nHazardPtrCount * \p nMaxThreadCount.
- */
- static void CDS_STDCALL Construct(
- size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
- size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
- size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
- scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum)
- );
-
- /// Destroys global instance of GarbageCollector
- /**
- The parameter \p bDetachAll should be used carefully: if its value is \p true,
- then the destroying GC automatically detaches all attached threads. This feature
- can be useful when you have no control over the thread termination, for example,
- when \p libcds is injected into existing external thread.
- */
- static void CDS_STDCALL Destruct(
- bool bDetachAll = false ///< Detach all threads
- );
-
- /// Returns pointer to GarbageCollector instance
- static GarbageCollector& instance()
- {
- if ( !m_pHZPManager )
- throw HZPManagerEmpty();
- return *m_pHZPManager;
- }
-
- /// Checks if global GC object is constructed and may be used
- static bool isUsed()
- {
- return m_pHZPManager != nullptr;
- }
-
- /// Returns max Hazard Pointer count defined in construction time
- size_t getHazardPointerCount() const { return m_nHazardPointerCount; }
-
- /// Returns max thread count defined in construction time
- size_t getMaxThreadCount() const { return m_nMaxThreadCount; }
-
- /// Returns max size of retired objects array. It is defined in construction time
- size_t getMaxRetiredPtrCount() const { return m_nMaxRetiredPtrCount; }
-
- // Internal statistics
-
- /// Get internal statistics
- InternalState& getInternalState(InternalState& stat) const;
-
- /// Checks if internal statistics enabled
- bool isStatisticsEnabled() const { return m_bStatEnabled; }
-
- /// Enables/disables internal statistics
- bool enableStatistics( bool bEnable )
- {
- bool bEnabled = m_bStatEnabled;
- m_bStatEnabled = bEnable;
- return bEnabled;
- }
-
- /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count
- /**
- If \p nRequiredCount > getHazardPointerCount() then the exception HZPTooMany is thrown
- */
- static void checkHPCount( unsigned int nRequiredCount )
- {
- if ( instance().getHazardPointerCount() < nRequiredCount )
- throw HZPTooMany();
- }
-
- /// Get current scan strategy
- scan_type getScanType() const
- {
- return m_nScanType;
- }
-
- /// Set current scan strategy
- /** @anchor hzp_gc_setScanType
- Scan strategy changing is allowed on the fly.
- */
- void setScanType(
- scan_type nScanType ///< new scan strategy
- )
- {
- m_nScanType = nScanType;
- }
-
- public: // Internals for threads
-
- /// Allocates Hazard Pointer GC record. For internal use only
- details::HPRec * AllocateHPRec();
-
- /// Free HP record. For internal use only
- void RetireHPRec( details::HPRec * pRec );
-
- /// The main garbage collecting function
- /**
- This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
- is reached.
-
- There are the following scan algorithm:
- - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use
- - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory
-
- Use \ref hzp_gc_setScanType "setScanType" member function to setup appropriate scan algorithm.
- */
- void Scan( details::HPRec * pRec )
- {
- switch ( m_nScanType ) {
- case inplace:
- inplace_scan( pRec );
- break;
- default:
- assert(false) ; // Forgotten something?..
- case classic:
- classic_scan( pRec );
- break;
- }
- }
-
- /// Helper scan routine
- /**
- The function guarantees that every node that is eligible for reuse is eventually freed, barring
- thread failures. To do so, after executing Scan, a thread executes a HelpScan,
- where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers
- to thread's list of reclaimed pointers.
-
- The function is called internally by Scan.
- */
- void HelpScan( details::HPRec * pThis );
-
- protected:
- /// Classic scan algorithm
- /** @anchor hzp_gc_classic_scan
- Classical scan algorithm as described in Michael's paper.
-
- A scan includes four stages. The first stage involves scanning the array HP for non-null values.
- Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer.
- Only stage 1 accesses shared variables. The following stages operate only on private variables.
-
- The second stage of a scan involves sorting local list of protected pointers to allow
- binary search in the third stage.
-
- The third stage of a scan involves checking each reclaimed node
- against the pointers in local list of protected pointers. If the binary search yields
- no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list
- of reclaimed pointers.
-
- The forth stage prepares new thread's private list of reclaimed pointers
- that could not be freed during the current scan, where they remain until the next scan.
-
- This algorithm allocates memory for internal HP array.
-
- This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers
- is reached.
- */
- void classic_scan( details::HPRec * pRec );
-
- /// In-place scan algorithm
- /** @anchor hzp_gc_inplace_scan
- Unlike the \ref hzp_gc_classic_scan "classic_scan" algorithm, \p inplace_scan does not allocate any memory.
- All operations are performed in-place.
- */
- void inplace_scan( details::HPRec * pRec );
- };
-
- /// Thread's hazard pointer manager
- /**
- To use Hazard Pointer reclamation schema each thread object must be linked with the object of ThreadGC class
- that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
- on the start of each thread that uses HP GC. Before terminating the thread linked to HP GC it is necessary to call
- \ref cds_threading "cds::threading::Manager::detachThread()".
- */
- class ThreadGC
- {
- GarbageCollector& m_HzpManager ; ///< Hazard Pointer GC singleton
- details::HPRec * m_pHzpRec ; ///< Pointer to thread's HZP record
-
- public:
- /// Default constructor
- ThreadGC()
- : m_HzpManager( GarbageCollector::instance() ),
- m_pHzpRec( nullptr )
- {}
-
- /// The object is not copy-constructible
- ThreadGC( ThreadGC const& ) = delete;
-
- ~ThreadGC()
- {
- fini();
- }
-
- /// Checks if thread GC is initialized
- bool isInitialized() const { return m_pHzpRec != nullptr; }
-
- /// Initialization. Repeat call is available
- void init()
- {
- if ( !m_pHzpRec )
- m_pHzpRec = m_HzpManager.AllocateHPRec();
- }
-
- /// Finalization. Repeat call is available
- void fini()
- {
- if ( m_pHzpRec ) {
- details::HPRec * pRec = m_pHzpRec;
- m_pHzpRec = nullptr;
- m_HzpManager.RetireHPRec( pRec );
- }
- }
-
- /// Initializes HP guard \p guard
- details::HPGuard& allocGuard()
- {
- assert( m_pHzpRec );
- return m_pHzpRec->m_hzp.alloc();
- }
-
- /// Frees HP guard \p guard
- void freeGuard( details::HPGuard& guard )
- {
- assert( m_pHzpRec );
- m_pHzpRec->m_hzp.free( guard );
- }
-
- /// Initializes HP guard array \p arr
- template <size_t Count>
- void allocGuard( details::HPArray<Count>& arr )
- {
- assert( m_pHzpRec );
- m_pHzpRec->m_hzp.alloc( arr );
- }
-
- /// Frees HP guard array \p arr
- template <size_t Count>
- void freeGuard( details::HPArray<Count>& arr )
- {
- assert( m_pHzpRec );
- m_pHzpRec->m_hzp.free( arr );
- }
-
- /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
- template <typename T>
- void retirePtr( T * p, void (* pFunc)(T *) )
- {
- retirePtr( details::retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
- }
-
- /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
- void retirePtr( const details::retired_ptr& p )
- {
- m_pHzpRec->m_arrRetired.push( p );
-
- if ( m_pHzpRec->m_arrRetired.isFull() ) {
- // Max of retired pointer count is reached. Do scan
- scan();
- }
- }
-
- //@cond
- void scan()
- {
- m_HzpManager.Scan( m_pHzpRec );
- m_HzpManager.HelpScan( m_pHzpRec );
- }
- //@endcond
- };
-
- /// Auto HPGuard.
- /**
- This class encapsulates Hazard Pointer guard to protect a pointer against deletion .
- It allocates one HP from thread's HP array in constructor and free the HP allocated in destruction time.
- */
- class AutoHPGuard
- {
- //@cond
- details::HPGuard& m_hp ; ///< Hazard pointer guarded
- ThreadGC& m_gc ; ///< Thread GC
- //@endcond
-
- public:
- typedef details::HPGuard::hazard_ptr hazard_ptr ; ///< Hazard pointer type
- public:
- /// Allocates HP guard from \p gc
- AutoHPGuard( ThreadGC& gc )
- : m_hp( gc.allocGuard() )
- , m_gc( gc )
- {}
-
- /// Allocates HP guard from \p gc and protects the pointer \p p of type \p T
- template <typename T>
- AutoHPGuard( ThreadGC& gc, T * p )
- : m_hp( gc.allocGuard() )
- , m_gc( gc )
- {
- m_hp = p;
- }
-
- /// Frees HP guard. The pointer guarded may be deleted after this.
- ~AutoHPGuard()
- {
- m_gc.freeGuard( m_hp );
- }
-
- /// Returns thread GC
- ThreadGC& getGC() const
- {
- return m_gc;
- }
-
- /// Protects the pointer \p p against reclamation (guards the pointer).
- template <typename T>
- T * operator =( T * p )
- {
- return m_hp = p;
- }
-
- //@cond
- std::nullptr_t operator =(std::nullptr_t)
- {
- return m_hp = nullptr;
- }
-
- hazard_ptr get() const
- {
- return m_hp;
- }
- //@endcond
- };
-
- /// Auto-managed array of hazard pointers
- /**
- This class is wrapper around cds::gc::hzp::details::HPArray class.
- \p Count is the size of HP array
- */
- template <size_t Count>
- class AutoHPArray: public details::HPArray<Count>
- {
- ThreadGC& m_mgr ; ///< Thread GC
-
- public:
- /// Rebind array for other size \p COUNT2
- template <size_t Count2>
- struct rebind {
- typedef AutoHPArray<Count2> other ; ///< rebinding result
- };
-
- public:
- /// Allocates array of HP guard from \p mgr
- AutoHPArray( ThreadGC& mgr )
- : m_mgr( mgr )
- {
- mgr.allocGuard( *this );
- }
-
- /// Frees array of HP guard
- ~AutoHPArray()
- {
- m_mgr.freeGuard( *this );
- }
-
- /// Returns thread GC
- ThreadGC& getGC() const { return m_mgr; }
- };
-
- } // namespace hzp
-}} // namespace cds::gc
-
-// Inlines
-#include <cds/gc/hp/details/hp_inline.h>
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-# pragma warning(pop)
-#endif
-
-#endif // #ifndef __CDS_GC_HZP_HZP_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_HP_DECL_H
-#define __CDS_GC_HP_DECL_H
-
-#include <stdexcept> // overflow_error
-#include <cds/gc/hp/hp.h>
-#include <cds/details/marked_ptr.h>
-
-namespace cds { namespace gc {
- /// @defgroup cds_garbage_collector Garbage collectors
-
- /// Hazard Pointer garbage collector
- /** @ingroup cds_garbage_collector
- @headerfile cds/gc/hp.h
-
- This class realizes a wrapper for Hazard Pointer garbage collector internal implementation.
-
- Sources:
- - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes"
- - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects"
- - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers"
-
- See \ref cds_how_to_use "How to use" section for details of garbage collector applying.
- */
- class HP
- {
- public:
- /// Native guarded pointer type
- typedef gc::hzp::hazard_pointer guarded_pointer;
-
- /// Atomic reference
- /**
- @headerfile cds/gc/hp.h
- */
- template <typename T> using atomic_ref = atomics::atomic<T *>;
-
- /// Atomic marked pointer
- /**
- @headerfile cds/gc/hp.h
- */
- template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
-
- /// Atomic type
- /**
- @headerfile cds/gc/hp.h
- */
- template <typename T> using atomic_type = atomics::atomic<T>;
-
- /// Thread GC implementation for internal usage
- typedef hzp::ThreadGC thread_gc_impl;
-
- /// Wrapper for hzp::ThreadGC class
- /**
- @headerfile cds/gc/hp.h
- This class performs automatically attaching/detaching Hazard Pointer GC
- for the current thread.
- */
- class thread_gc: public thread_gc_impl
- {
- //@cond
- bool m_bPersistent;
- //@endcond
- public:
-
- /// Constructor
- /**
- The constructor attaches the current thread to the Hazard Pointer GC
- if it is not yet attached.
- The \p bPersistent parameter specifies attachment persistence:
- - \p true - the class destructor will not detach the thread from Hazard Pointer GC.
- - \p false (default) - the class destructor will detach the thread from Hazard Pointer GC.
- */
- thread_gc(
- bool bPersistent = false
- ) ; //inline in hp_impl.h
-
- /// Destructor
- /**
- If the object has been created in persistent mode, the destructor does nothing.
- Otherwise it detaches the current thread from Hazard Pointer GC.
- */
- ~thread_gc() ; // inline in hp_impl.h
- };
-
- /// Hazard Pointer guard
- /**
- @headerfile cds/gc/hp.h
- This class is a wrapper for hzp::AutoHPGuard.
- */
- class Guard: public hzp::AutoHPGuard
- {
- //@cond
- typedef hzp::AutoHPGuard base_class;
- //@endcond
-
- public:
- //@cond
- Guard() ; // inline in hp_impl.h
- //@endcond
-
- /// Protects a pointer of type \p atomic<T*>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store it
- to the HP slot repeatedly until the guard's value equals \p toGuard
- */
- template <typename T>
- T protect( atomics::atomic<T> const& toGuard )
- {
- T pCur = toGuard.load(atomics::memory_order_relaxed);
- T pRet;
- do {
- pRet = assign( pCur );
- pCur = toGuard.load(atomics::memory_order_acquire);
- } while ( pRet != pCur );
- return pCur;
- }
-
- /// Protects a converted pointer of type \p atomic<T*>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store result of \p f functor
- to the HP slot repeatedly until the guard's value equals \p toGuard.
-
- The function is useful for intrusive containers when \p toGuard is a node pointer
- that should be converted to a pointer to the value type before protecting.
- The parameter \p f of type Func is a functor that makes this conversion:
- \code
- struct functor {
- value_type * operator()( T * p );
- };
- \endcode
- Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
- */
- template <typename T, class Func>
- T protect( atomics::atomic<T> const& toGuard, Func f )
- {
- T pCur = toGuard.load(atomics::memory_order_relaxed);
- T pRet;
- do {
- pRet = pCur;
- assign( f( pCur ) );
- pCur = toGuard.load(atomics::memory_order_acquire);
- } while ( pRet != pCur );
- return pCur;
- }
-
- /// Store \p p to the guard
- /**
- The function equals to a simple assignment the value \p p to guard, no loop is performed.
- Can be used for a pointer that cannot be changed concurrently
- */
- template <typename T>
- T * assign( T * p )
- {
- return base_class::operator =(p);
- }
-
- //@cond
- std::nullptr_t assign( std::nullptr_t )
- {
- return base_class::operator =(nullptr);
- }
- //@endcond
-
- /// Copy from \p src guard to \p this guard
- void copy( Guard const& src )
- {
- assign( src.get_native() );
- }
-
- /// Store marked pointer \p p to the guard
- /**
- The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
- Can be used for a marked pointer that cannot be changed concurrently.
- */
- template <typename T, int BITMASK>
- T * assign( cds::details::marked_ptr<T, BITMASK> p )
- {
- return base_class::operator =( p.ptr() );
- }
-
- /// Clear value of the guard
- void clear()
- {
- assign( nullptr );
- }
-
- /// Get the value currently protected
- template <typename T>
- T * get() const
- {
- return reinterpret_cast<T *>( get_native() );
- }
-
- /// Get native hazard pointer stored
- guarded_pointer get_native() const
- {
- return base_class::get();
- }
- };
-
- /// Array of Hazard Pointer guards
- /**
- @headerfile cds/gc/hp.h
- This class is a wrapper for hzp::AutoHPArray template.
- Template parameter \p Count defines the size of HP array.
- */
- template <size_t Count>
- class GuardArray: public hzp::AutoHPArray<Count>
- {
- //@cond
- typedef hzp::AutoHPArray<Count> base_class;
- //@endcond
- public:
- /// Rebind array for other size \p Count2
- template <size_t Count2>
- struct rebind {
- typedef GuardArray<Count2> other ; ///< rebinding result
- };
-
- public:
- //@cond
- GuardArray() ; // inline in hp_impl.h
- //@endcond
- /// Protects a pointer of type \p atomic<T*>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store it
- to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
- */
- template <typename T>
- T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
- {
- T pRet;
- do {
- pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
- } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
-
- return pRet;
- }
-
- /// Protects a pointer of type \p atomic<T*>
- /**
- Return the value of \p toGuard
-
- The function tries to load \p toGuard and to store it
- to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
-
- The function is useful for intrusive containers when \p toGuard is a node pointer
- that should be converted to a pointer to the value type before guarding.
- The parameter \p f of type Func is a functor that makes this conversion:
- \code
- struct functor {
- value_type * operator()( T * p );
- };
- \endcode
- Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
- */
- template <typename T, class Func>
- T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
- {
- T pRet;
- do {
- assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
- } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
-
- return pRet;
- }
-
- /// Store \p to the slot \p nIndex
- /**
- The function equals to a simple assignment, no loop is performed.
- */
- template <typename T>
- T * assign( size_t nIndex, T * p )
- {
- base_class::set(nIndex, p);
- return p;
- }
-
- /// Store marked pointer \p p to the guard
- /**
- The function equals to a simple assignment of <tt>p.ptr()</tt>, no loop is performed.
- Can be used for a marked pointer that cannot be changed concurrently.
- */
- template <typename T, int BITMASK>
- T * assign( size_t nIndex, cds::details::marked_ptr<T, BITMASK> p )
- {
- return assign( nIndex, p.ptr() );
- }
-
- /// Copy guarded value from \p src guard to slot at index \p nIndex
- void copy( size_t nIndex, Guard const& src )
- {
- assign( nIndex, src.get_native() );
- }
-
- /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex
- void copy( size_t nDestIndex, size_t nSrcIndex )
- {
- assign( nDestIndex, get_native( nSrcIndex ));
- }
-
- /// Clear value of the slot \p nIndex
- void clear( size_t nIndex)
- {
- base_class::clear( nIndex );
- }
-
- /// Get current value of slot \p nIndex
- template <typename T>
- T * get( size_t nIndex) const
- {
- return reinterpret_cast<T *>( get_native( nIndex ) );
- }
-
- /// Get native hazard pointer stored
- guarded_pointer get_native( size_t nIndex ) const
- {
- return base_class::operator[](nIndex).get();
- }
-
- /// Capacity of the guard array
- static CDS_CONSTEXPR size_t capacity()
- {
- return Count;
- }
- };
-
- public:
- /// Initializes hzp::GarbageCollector singleton
- /**
- The constructor initializes GC singleton with passed parameters.
- If GC instance is not exist then the function creates the instance.
- Otherwise it does nothing.
-
- The Michael's HP reclamation schema depends of three parameters:
- - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from
- the data structure algorithms. By default, if \p nHazardPtrCount = 0, the function
- uses maximum of the hazard pointer count for CDS library.
- - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100.
- - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than
- <tt> nHazardPtrCount * nMaxThreadCount </tt>. Default is <tt>2 * nHazardPtrCount * nMaxThreadCount </tt>.
- */
- HP(
- size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread
- size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application
- size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread
- hzp::scan_type nScanType = hzp::inplace ///< Scan type (see \ref hzp::scan_type enum)
- )
- {
- hzp::GarbageCollector::Construct(
- nHazardPtrCount,
- nMaxThreadCount,
- nMaxRetiredPtrCount,
- nScanType
- );
- }
-
- /// Terminates GC singleton
- /**
- The destructor calls \code hzp::GarbageCollector::Destruct( true ) \endcode
- */
- ~HP()
- {
- hzp::GarbageCollector::Destruct( true );
- }
-
- /// Checks if count of hazard pointer is no less than \p nCountNeeded
- /**
- If \p bRaiseException is \p true (that is the default), the function raises
- an \p std::overflow_error exception "Too few hazard pointers"
- if \p nCountNeeded is more than the count of hazard pointer per thread.
- */
- static bool check_available_guards( size_t nCountNeeded, bool bRaiseException = true )
- {
- if ( hzp::GarbageCollector::instance().getHazardPointerCount() < nCountNeeded ) {
- if ( bRaiseException )
- throw std::overflow_error( "Too few hazard pointers" );
- return false;
- }
- return true;
- }
-
- /// Returns max Hazard Pointer count
- size_t max_hazard_count() const
- {
- return hzp::GarbageCollector::instance().getHazardPointerCount();
- }
-
- /// Returns max count of thread
- size_t max_thread_count() const
- {
- return hzp::GarbageCollector::instance().getMaxThreadCount();
- }
-
- /// Returns capacity of retired pointer array
- size_t retired_array_capacity() const
- {
- return hzp::GarbageCollector::instance().getMaxRetiredPtrCount();
- }
-
- /// Retire pointer \p p with function \p pFunc
- /**
- The function places pointer \p p to array of pointers ready for removing.
- (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
- Deleting the pointer is the function \p pFunc call.
- */
- template <typename T>
- static void retire( T * p, void (* pFunc)(T *) ) ; // inline in hp_impl.h
-
- /// Retire pointer \p p with functor of type \p Disposer
- /**
- The function places pointer \p p to array of pointers ready for removing.
- (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it.
-
- Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is:
- \code
- template <typename T>
- struct disposer {
- void operator()( T * p ) ; // disposing operator
- };
- \endcode
- Since the functor call can happen at any time after \p retire call, additional restrictions are imposed to \p Disposer type:
- - it should be stateless functor
- - it should be default-constructible
- - the result of functor call with argument \p p should not depend on where the functor will be called.
-
- \par Examples:
- Operator \p delete functor:
- \code
- template <typename T>
- struct disposer {
- void operator ()( T * p ) {
- delete p;
- }
- };
-
- // How to call GC::retire method
- int * p = new int;
-
- // ... use p in lock-free manner
-
- cds::gc::HP::retire<disposer>( p ) ; // place p to retired pointer array of HP GC
- \endcode
-
- Functor based on \p std::allocator :
- \code
- template <typename ALLOC = std::allocator<int> >
- struct disposer {
- template <typename T>
- void operator()( T * p ) {
- typedef typename ALLOC::templare rebind<T>::other alloc_t;
- alloc_t a;
- a.destroy( p );
- a.deallocate( p, 1 );
- }
- };
- \endcode
- */
- template <class Disposer, typename T>
- static void retire( T * p ) ; // inline in hp_impl.h
-
- /// Get current scan strategy
- hzp::scan_type getScanType() const
- {
- return hzp::GarbageCollector::instance().getScanType();
- }
-
- /// Set current scan strategy
- void setScanType(
- hzp::scan_type nScanType ///< new scan strategy
- )
- {
- hzp::GarbageCollector::instance().setScanType( nScanType );
- }
-
- /// Checks if Hazard Pointer GC is constructed and may be used
- static bool isUsed()
- {
- return hzp::GarbageCollector::isUsed();
- }
-
-
- /// Forced GC cycle call for current thread
- /**
- Usually, this function should not be called directly.
- */
- static void scan() ; // inline in hp_impl.h
-
- /// Synonym for \ref scan()
- static void force_dispose()
- {
- scan();
- }
- };
-}} // namespace cds::gc
-
-#endif // #ifndef __CDS_GC_HP_DECL_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_HP_IMPL_H
-#define __CDS_GC_HP_IMPL_H
-
-#include <cds/threading/model.h>
-#include <cds/details/static_functor.h>
-
-//@cond
-namespace cds { namespace gc {
-
- inline HP::thread_gc::thread_gc(
- bool bPersistent
- )
- : m_bPersistent( bPersistent )
- {
- if ( !threading::Manager::isThreadAttached() )
- threading::Manager::attachThread();
- }
-
- inline HP::thread_gc::~thread_gc()
- {
- if ( !m_bPersistent )
- cds::threading::Manager::detachThread();
- }
-
- inline HP::Guard::Guard()
- : Guard::base_class( cds::threading::getGC<HP>() )
- {}
-
- template <size_t COUNT>
- inline HP::GuardArray<COUNT>::GuardArray()
- : GuardArray::base_class( cds::threading::getGC<HP>() )
- {}
-
- template <typename T>
- inline void HP::retire( T * p, void (* pFunc)(T *) )
- {
- cds::threading::getGC<HP>().retirePtr( p, pFunc );
- }
-
- template <class Disposer, typename T>
- inline void HP::retire( T * p )
- {
- cds::threading::getGC<HP>().retirePtr( p, cds::details::static_functor<Disposer, T>::call );
- }
-
- inline void HP::scan()
- {
- cds::threading::getGC<HP>().scan();
- }
-
-
-}} // namespace cds::gc
-//@endcond
-
-#endif // #ifndef __CDS_GC_HP_IMPL_H
+++ /dev/null
-//$$CDS-header$$
-
-#ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
-#define __CDS_GC_PTB_PASS_THE_BUCK_H
-
-#include <mutex> // unique_lock
-#include <cds/cxx11_atomic.h>
-#include <cds/gc/details/retired_ptr.h>
-#include <cds/details/aligned_allocator.h>
-#include <cds/details/allocator.h>
-#include <cds/lock/spinlock.h>
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-# pragma warning(push)
-# pragma warning(disable:4251) // C4251: 'identifier' : class 'type' needs to have dll-interface to be used by clients of class 'type2'
-#endif
-
-namespace cds { namespace gc {
-
- /// Pass The Buck reclamation schema
- /**
- \par Sources:
- - [2002] M. Herlihy, V. Luchangco, and M. Moir. The repeat offender problem: A mechanism for supporting
- dynamic-sized lockfree data structures. Technical Report TR-2002-112, Sun Microsystems Laboratories, 2002
- - [2002] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Dynamic-sized Lockfree Data Structures.
- Technical Report TR-2002-110, Sun Microsystems Laboratories, 2002
- - [2005] M. Herlihy, V. Luchangco, P. Martin, and M. Moir. Nonblocking Memory Management Support
- for Dynamic-Sized Data Structures. ACM Transactions on Computer Systems, Vol.23, No.2, May 2005
-
-
- The cds::gc::ptb namespace and its members are internal representation of the Pass-the-Buck GC and should not be used directly.
- Use cds::gc::PTB class in your code.
-
- Pass-the-Buck (PTB) garbage collector is a singleton. The main user-level part of PTB schema is
- GC class and its nested classes. Before use any PTB-related class you must initialize PTB garbage collector
- by contructing cds::gc::PTB object in beginning of your main().
- See cds::gc::PTB class for explanation.
-
- \par Implementation issues
- The global list of free guards (cds::gc::ptb::details::guard_allocator) is protected by spin-lock (i.e. serialized).
- It seems that solution should not introduce significant performance bottleneck, because each thread has own set
- of guards allocated from global list of free guards and access to global list is occurred only when
- all thread's guard is busy. In this case the thread allocates next block of guards from global list.
- Guards allocated for the thread is push back to the global list only when the thread terminates.
- */
- namespace ptb {
-
- // Forward declarations
- class Guard;
- template <size_t Count> class GuardArray;
- class ThreadGC;
- class GarbageCollector;
-
- /// Retired pointer type
- typedef cds::gc::details::retired_ptr retired_ptr;
-
- using cds::gc::details::free_retired_ptr_func;
-
- /// Details of Pass the Buck algorithm
- namespace details {
-
- // Forward declaration
- class liberate_set;
-
- /// Retired pointer buffer node
- struct retired_ptr_node {
- retired_ptr m_ptr ; ///< retired pointer
- retired_ptr_node * m_pNext ; ///< next retired pointer in buffer
- retired_ptr_node * m_pNextFree ; ///< next item in free list of retired_ptr_node
- };
-
- /// Internal guard representation
- struct guard_data {
- typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
- typedef void * guarded_ptr ; ///< type of value guarded
-
- atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
-
-#if 0
- typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
- handoff_spin spinHandOff ; ///< access to \p pHandOff field
- handoff_ptr pHandOff ; ///< trapped pointer
-#endif
-
- atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
- atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
-
- guard_data * pThreadNext ; ///< next item of thread's local list of guards
-
- //@cond
- guard_data()
- : pPost( nullptr )
-#if 0
- , pHandOff( nullptr )
-#endif
- , pGlobalNext( nullptr )
- , pNextFree( nullptr )
- , pThreadNext( nullptr )
- {}
-
- void init()
- {
- pPost.store( nullptr, atomics::memory_order_relaxed );
- }
- //@endcond
-
- /// Checks if the guard is free, that is, it does not contain any pointer guarded
- bool isFree() const
- {
- return pPost.load( atomics::memory_order_acquire ) == nullptr;
- }
- };
-
- /// Guard allocator
- template <class Alloc = CDS_DEFAULT_ALLOCATOR>
- class guard_allocator
- {
- cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
-
- atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
- atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
- SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
-
- /*
- Unfortunately, access to the list of free guard is lock-based.
- Lock-free manipulations with guard free-list are ABA-prone.
- TODO: working with m_FreeGuardList in lock-free manner.
- */
-
- private:
- /// Allocates new guard from the heap. The function uses aligned allocator
- guard_data * allocNew()
- {
- //TODO: the allocator should make block allocation
-
- details::guard_data * pGuard = m_GuardAllocator.New();
-
- // Link guard to the list
- // m_GuardList is accumulated list and it cannot support concurrent deletion,
- // so, ABA problem is impossible for it
- details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
- do {
- pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
- // pHead is changed by compare_exchange_weak
- } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
-
- pGuard->init();
- return pGuard;
- }
-
- public:
- // Default ctor
- guard_allocator()
- : m_GuardList( nullptr )
- , m_FreeGuardList( nullptr )
- {}
-
- // Destructor
- ~guard_allocator()
- {
- guard_data * pNext;
- for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
- pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
- m_GuardAllocator.Delete( pData );
- }
- }
-
- /// Allocates a guard from free list or from heap if free list is empty
- guard_data * alloc()
- {
- // Try to pop a guard from free-list
- details::guard_data * pGuard;
-
- {
- std::unique_lock<SpinLock> al( m_freeListLock );
- pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
- if ( pGuard )
- m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
- }
- if ( !pGuard )
- return allocNew();
-
- pGuard->init();
- return pGuard;
- }
-
- /// Frees guard \p pGuard
- /**
- The function places the guard \p pGuard into free-list
- */
- void free( guard_data * pGuard )
- {
- pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
-
- std::unique_lock<SpinLock> al( m_freeListLock );
- pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
- m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
- }
-
- /// Allocates list of guard
- /**
- The list returned is linked by guard's \p pThreadNext and \p pNextFree fields.
-
- cds::gc::ptb::ThreadGC supporting method
- */
- guard_data * allocList( size_t nCount )
- {
- assert( nCount != 0 );
-
- guard_data * pHead;
- guard_data * pLast;
-
- pHead =
- pLast = alloc();
-
- // The guard list allocated is private for the thread,
- // so, we can use relaxed memory order
- while ( --nCount ) {
- guard_data * p = alloc();
- pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
- pLast = p;
- }
-
- pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
-
- return pHead;
- }
-
- /// Frees list of guards
- /**
- The list \p pList is linked by guard's \p pThreadNext field.
-
- cds::gc::ptb::ThreadGC supporting method
- */
- void freeList( guard_data * pList )
- {
- assert( pList != nullptr );
-
- guard_data * pLast = pList;
- while ( pLast->pThreadNext ) {
- pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
- guard_data * p;
- pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
- pLast = p;
- }
-
- std::unique_lock<SpinLock> al( m_freeListLock );
- pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
- m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
- }
-
- /// Returns the list's head of guards allocated
- guard_data * begin()
- {
- return m_GuardList.load(atomics::memory_order_acquire);
- }
- };
-
- /// Retired pointer buffer
- /**
- The buffer of retired nodes ready for liberating.
- When size of buffer exceeds a threshold the GC calls \p liberate procedure to free
- retired nodes.
- */
- class retired_ptr_buffer
- {
- atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
- atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
-
- public:
- //@cond
- retired_ptr_buffer()
- : m_pHead( nullptr )
- , m_nItemCount(0)
- {}
-
- ~retired_ptr_buffer()
- {
- assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
- }
- //@endcond
-
- /// Pushes new node into the buffer. Returns current buffer size
- size_t push( retired_ptr_node& node )
- {
- retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
- do {
- node.m_pNext = pHead;
- // pHead is changed by compare_exchange_weak
- } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
-
- return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
- }
-
- /// Result of \ref ptb_gc_privatve "privatize" function.
- /**
- The \p privatize function returns retired node list as \p first and the size of that list as \p second.
- */
- typedef std::pair<retired_ptr_node *, size_t> privatize_result;
-
- /// Gets current list of retired pointer and clears the list
- /**@anchor ptb_gc_privatve
- */
- privatize_result privatize()
- {
- privatize_result res;
- res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
-
- // Item counter is needed only as a threshold for liberate function
- // So, we may clear the item counter without synchronization with m_pHead
- res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
- return res;
- }
-
- /// Returns current size of buffer (approximate)
- size_t size() const
- {
- return m_nItemCount.load(atomics::memory_order_relaxed);
- }
- };
-
- /// Pool of retired pointers
- /**
- The class acts as an allocator of retired node.
- Retired pointers are linked in the lock-free list.
- */
- template <class Alloc = CDS_DEFAULT_ALLOCATOR>
- class retired_ptr_pool {
- /// Pool item
- typedef retired_ptr_node item;
-
- /// Count of items in block
- static const size_t m_nItemPerBlock = 1024 / sizeof(item) - 1;
-
- /// Pool block
- struct block {
- block * pNext ; ///< next block
- item items[m_nItemPerBlock] ; ///< item array
- };
-
- atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
-
- // To solve ABA problem we use epoch-based approach
- static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
- atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
- atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
- atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
-
- cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
-
- private:
- //@cond
- void allocNewBlock()
- {
- // allocate new block
- block * pNew = m_BlockAllocator.New();
-
- // link items within the block
- item * pLastItem = pNew->items + m_nItemPerBlock - 1;
- for ( item * pItem = pNew->items; pItem != pLastItem; ++pItem ) {
- pItem->m_pNextFree = pItem + 1;
- CDS_STRICT_DO( pItem->m_pNext = nullptr );
- }
-
- // link new block to block list
- {
- block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
- do {
- pNew->pNext = pHead;
- // pHead is changed by compare_exchange_weak
- } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
- }
-
- // link block's items to free list
- {
- item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
- do {
- pLastItem->m_pNextFree = pHead;
- // pHead is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
- }
- }
-
- unsigned int current_epoch() const
- {
- return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
- }
- unsigned int next_epoch() const
- {
- return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
- }
- //@endcond
-
- public:
- //@cond
- retired_ptr_pool()
- : m_pBlockListHead( nullptr )
- , m_nCurEpoch(0)
- , m_pGlobalFreeHead( nullptr )
- {
- for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
- m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
-
- allocNewBlock();
- }
-
- ~retired_ptr_pool()
- {
- block * p;
- for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
- p = pBlock->pNext;
- m_BlockAllocator.Delete( pBlock );
- }
- }
-
- /// Increments current epoch
- void inc_epoch()
- {
- m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
- }
-
- //@endcond
-
- /// Allocates new retired pointer
- retired_ptr_node& alloc()
- {
- unsigned int nEpoch;
- item * pItem;
- for (;;) {
- pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
- if ( !pItem )
- goto retry;
- if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
- goto success;
- }
-
- /*
- item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
- while ( pItem ) {
- if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
- goto success;
- }
- */
-
- // Epoch free list is empty
- // Alloc from global free list
- retry:
- pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
- do {
- if ( !pItem ) {
- allocNewBlock();
- goto retry;
- }
- // pItem is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
-
- success:
- CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
- return *pItem;
- }
-
- /// Allocates and initializes new retired pointer
- retired_ptr_node& alloc( const retired_ptr& p )
- {
- retired_ptr_node& node = alloc();
- node.m_ptr = p;
- return node;
- }
-
- /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
- /**
- The list is linked on the m_pNextFree field
- */
- void free_range( retired_ptr_node * pHead, retired_ptr_node * pTail )
- {
- assert( pHead != nullptr );
- assert( pTail != nullptr );
-
- unsigned int nEpoch;
- item * pCurHead;
- do {
- pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
- pTail->m_pNextFree = pCurHead;
- } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
- }
- };
-
- /// Uninitialized guard
- class guard
- {
- friend class ThreadGC;
- protected:
- details::guard_data * m_pGuard ; ///< Pointer to guard data
- public:
- /// Initialize empty guard.
- guard()
- : m_pGuard( nullptr )
- {}
-
- /// The object is not copy-constructible
- guard( guard const& ) = delete;
-
- /// Object destructor, does nothing
- ~guard()
- {}
-
- /// Guards pointer \p p
- void set( void * p )
- {
- assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( p, atomics::memory_order_release );
- //CDS_COMPILER_RW_BARRIER;
- }
-
- /// Clears the guard
- void clear()
- {
- assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
- }
-
- /// Guards pointer \p p
- template <typename T>
- T * operator =( T * p )
- {
- set( reinterpret_cast<void *>( const_cast<T *>(p) ));
- return p;
- }
-
- //@cond
- std::nullptr_t operator=(std::nullptr_t)
- {
- clear();
- return nullptr;
- }
- //@endcond
-
- public: // for ThreadGC.
- /*
- GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
- the compiler produces error: \91cds::gc::ptb::details::guard_data* cds::gc::ptb::details::guard::m_pGuard\92 is protected
- despite the fact that ThreadGC is declared as friend for guard class.
- We should not like to declare m_pGuard member as public one.
- Therefore, we have to add set_guard/get_guard public functions
- */
- /// Set guard data
- void set_guard( details::guard_data * pGuard )
- {
- assert( m_pGuard == nullptr );
- m_pGuard = pGuard;
- }
-
- /// Get current guard data
- details::guard_data * get_guard()
- {
- return m_pGuard;
- }
- /// Get current guard data
- details::guard_data * get_guard() const
- {
- return m_pGuard;
- }
- };
-
- } // namespace details
-
- /// Guard
- /**
- This class represents auto guard: ctor allocates a guard from guard pool,
- dtor returns the guard back to the pool of free guard.
- */
- class Guard: public details::guard
- {
- //@cond
- typedef details::guard base_class;
- friend class ThreadGC;
- //@endcond
-
- ThreadGC& m_gc ; ///< ThreadGC object of current thread
- public:
- /// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
- Guard(ThreadGC& gc);
-
- /// Returns guard allocated back to pool of free guards
- ~Guard(); // inline after GarbageCollector
-
- /// Returns PTB GC object
- ThreadGC& getGC()
- {
- return m_gc;
- }
-
- /// Guards pointer \p p
- template <typename T>
- T * operator =( T * p )
- {
- return base_class::operator =<T>( p );
- }
-
- //@cond
- std::nullptr_t operator=(std::nullptr_t)
- {
- return base_class::operator =(nullptr);
- }
- //@endcond
- };
-
- /// Array of guards
- /**
- This class represents array of auto guards: ctor allocates \p Count guards from guard pool,
- dtor returns the guards allocated back to the pool.
- */
- template <size_t Count>
- class GuardArray
- {
- details::guard m_arr[Count] ; ///< array of guard
- ThreadGC& m_gc ; ///< ThreadGC object of current thread
- const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
-
- public:
- /// Rebind array for other size \p OtherCount
- template <size_t OtherCount>
- struct rebind {
- typedef GuardArray<OtherCount> other ; ///< rebinding result
- };
-
- public:
- /// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
- GuardArray( ThreadGC& gc ) ; // inline below
-
- /// The object is not default-constructible
- GuardArray() = delete;
-
- /// The object is not copy-constructible
- GuardArray( GuardArray const& ) = delete;
-
- /// Returns guards allocated back to pool
- ~GuardArray() ; // inline below
-
- /// Returns the capacity of array
- CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
- {
- return c_nCapacity;
- }
-
- /// Returns PTB ThreadGC object
- ThreadGC& getGC() CDS_NOEXCEPT
- {
- return m_gc;
- }
-
- /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
- details::guard& operator []( size_t nIndex )
- {
- assert( nIndex < capacity() );
- return m_arr[nIndex];
- }
-
- /// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count) [const version]
- const details::guard& operator []( size_t nIndex ) const
- {
- assert( nIndex < capacity() );
- return m_arr[nIndex];
- }
-
- /// Set the guard \p nIndex. 0 <= \p nIndex < \p Count
- template <typename T>
- void set( size_t nIndex, T * p )
- {
- assert( nIndex < capacity() );
- m_arr[nIndex].set( p );
- }
-
- /// Clears (sets to \p nullptr) the guard \p nIndex
- void clear( size_t nIndex )
- {
- assert( nIndex < capacity() );
- m_arr[nIndex].clear();
- }
-
- /// Clears all guards in the array
- void clearAll()
- {
- for ( size_t i = 0; i < capacity(); ++i )
- clear(i);
- }
- };
-
- /// Memory manager (Garbage collector)
- class CDS_EXPORT_API GarbageCollector
- {
- private:
- //@cond
- friend class ThreadGC;
-
- /// Internal GC statistics
- struct internal_stat
- {
- atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
- atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
-
- internal_stat()
- : m_nGuardCount(0)
- , m_nFreeGuardCount(0)
- {}
- };
- //@endcond
-
- public:
- /// Exception "No GarbageCollector object is created"
- CDS_DECLARE_EXCEPTION( PTBManagerEmpty, "Global PTB GarbageCollector is NULL" );
-
- /// Internal GC statistics
- struct InternalState
- {
- size_t m_nGuardCount ; ///< Total guard count
- size_t m_nFreeGuardCount ; ///< Count of free guard
-
- //@cond
- InternalState()
- : m_nGuardCount(0)
- , m_nFreeGuardCount(0)
- {}
-
- InternalState& operator =( internal_stat const& s )
- {
- m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
- m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
-
- return *this;
- }
- //@endcond
- };
-
- private:
- static GarbageCollector * m_pManager ; ///< GC global instance
-
- details::guard_allocator<> m_GuardPool ; ///< Guard pool
- details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
- details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
- //atomics::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
-
- atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
- const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
-
- internal_stat m_stat ; ///< Internal statistics
- bool m_bStatEnabled ; ///< Internal Statistics enabled
-
- public:
- /// Initializes PTB memory manager singleton
- /**
- This member function creates and initializes PTB global object.
- The function should be called before using CDS data structure based on cds::gc::PTB GC. Usually,
- this member function is called in the \p main() function. See cds::gc::ptb for example.
- After calling of this function you may use CDS data structures based on cds::gc::PTB.
-
- \par Parameters
- \li \p nLiberateThreshold - the liberate threshold. When count of retired pointers reaches this value,
- the \ref ptb_gc_liberate "liberate" member function would be called for freeing retired pointers.
- If \p nLiberateThreshold <= 1, \p liberate would called after each \ref ptb_gc_retirePtr "retirePtr" call.
- \li \p nInitialThreadGuardCount - initial count of guard allocated for ThreadGC. When a thread
- is initialized the GC allocates local guard pool for the thread from common guard pool.
- By perforce the local thread's guard pool is grown automatically from common pool.
- When the thread terminated its guard pool is backed to common GC's pool.
-
- */
- static void CDS_STDCALL Construct(
- size_t nLiberateThreshold = 1024
- , size_t nInitialThreadGuardCount = 8
- );
-
- /// Destroys PTB memory manager
- /**
- The member function destroys PTB global object. After calling of this function you may \b NOT
- use CDS data structures based on cds::gc::PTB. Usually, the \p Destruct function is called
- at the end of your \p main(). See cds::gc::ptb for example.
- */
- static void CDS_STDCALL Destruct();
-
- /// Returns pointer to GarbageCollector instance
- /**
- If PTB GC is not initialized, \p PTBManagerEmpty exception is thrown
- */
- static GarbageCollector& instance()
- {
- if ( m_pManager == nullptr )
- throw PTBManagerEmpty();
- return *m_pManager;
- }
-
- /// Checks if global GC object is constructed and may be used
- static bool isUsed() CDS_NOEXCEPT
- {
- return m_pManager != nullptr;
- }
-
- public:
- //@{
- /// Internal interface
-
- /// Allocates a guard
- details::guard_data * allocGuard()
- {
- return m_GuardPool.alloc();
- }
-
- /// Frees guard \p g for reusing in future
- void freeGuard(details::guard_data * pGuard )
- {
- m_GuardPool.free( pGuard );
- }
-
- /// Allocates guard list for a thread.
- details::guard_data * allocGuardList( size_t nCount )
- {
- return m_GuardPool.allocList( nCount );
- }
-
- /// Frees thread's guard list pointed by \p pList
- void freeGuardList( details::guard_data * pList )
- {
- m_GuardPool.freeList( pList );
- }
-
- /// Places retired pointer \p and its deleter \p pFunc into thread's array of retired pointer for deferred reclamation
- /**@anchor ptb_gc_retirePtr
- */
- template <typename T>
- void retirePtr( T * p, void (* pFunc)(T *) )
- {
- retirePtr( retired_ptr( reinterpret_cast<void *>( p ), reinterpret_cast<free_retired_ptr_func>( pFunc ) ) );
- }
-
- /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
- void retirePtr( retired_ptr const& p )
- {
- if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
- liberate();
- }
-
- protected:
- /// Liberate function
- /** @anchor ptb_gc_liberate
- The main function of Pass The Buck algorithm. It tries to free retired pointers if they are not
- trapped by any guard.
- */
- void liberate();
-
- //@}
-
- private:
- //@cond
-#if 0
- void liberate( details::liberate_set& set );
-#endif
- //@endcond
-
- public:
- /// Get internal statistics
- InternalState& getInternalState(InternalState& stat) const
- {
- return stat = m_stat;
- }
-
- /// Checks if internal statistics enabled
- bool isStatisticsEnabled() const
- {
- return m_bStatEnabled;
- }
-
- /// Enables/disables internal statistics
- bool enableStatistics( bool bEnable )
- {
- bool bEnabled = m_bStatEnabled;
- m_bStatEnabled = bEnable;
- return bEnabled;
- }
-
- private:
- //@cond none
- GarbageCollector( size_t nLiberateThreshold, size_t nInitialThreadGuardCount );
- ~GarbageCollector();
- //@endcond
- };
-
- /// Thread GC
- /**
- To use Pass The Buck reclamation schema each thread object must be linked with the object of ThreadGC class
- that interacts with GarbageCollector global object. The linkage is performed by calling \ref cds_threading "cds::threading::Manager::attachThread()"
- on the start of each thread that uses PTB GC. Before terminating the thread linked to PTB GC it is necessary to call
- \ref cds_threading "cds::threading::Manager::detachThread()".
-
- The ThreadGC object maintains two list:
- \li Thread guard list: the list of thread-local guards (linked by \p pThreadNext field)
- \li Free guard list: the list of thread-local free guards (linked by \p pNextFree field)
- Free guard list is a subset of thread guard list.
- */
- class ThreadGC
- {
- GarbageCollector& m_gc ; ///< reference to GC singleton
- details::guard_data * m_pList ; ///< Local list of guards owned by the thread
- details::guard_data * m_pFree ; ///< The list of free guard from m_pList
-
- public:
- /// Default constructor
- ThreadGC()
- : m_gc( GarbageCollector::instance() )
- , m_pList( nullptr )
- , m_pFree( nullptr )
- {}
-
- /// The object is not copy-constructible
- ThreadGC( ThreadGC const& ) = delete;
-
- /// Dtor calls fini()
- ~ThreadGC()
- {
- fini();
- }
-
- /// Initialization. Repeat call is available
- void init()
- {
- if ( !m_pList ) {
- m_pList =
- m_pFree = m_gc.allocGuardList( m_gc.m_nInitialThreadGuardCount );
- }
- }
-
- /// Finalization. Repeat call is available
- void fini()
- {
- if ( m_pList ) {
- m_gc.freeGuardList( m_pList );
- m_pList =
- m_pFree = nullptr;
- }
- }
-
- public:
- /// Initializes guard \p g
- void allocGuard( Guard& g )
- {
- assert( m_pList != nullptr );
- if ( m_pFree ) {
- g.m_pGuard = m_pFree;
- m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
- }
- else {
- g.m_pGuard = m_gc.allocGuard();
- g.m_pGuard->pThreadNext = m_pList;
- m_pList = g.m_pGuard;
- }
- }
-
- /// Frees guard \p g
- void freeGuard( Guard& g )
- {
- assert( m_pList != nullptr );
- g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
- m_pFree = g.m_pGuard;
- }
-
- /// Initializes guard array \p arr
- template <size_t Count>
- void allocGuard( GuardArray<Count>& arr )
- {
- assert( m_pList != nullptr );
- size_t nCount = 0;
-
- while ( m_pFree && nCount < Count ) {
- arr[nCount].set_guard( m_pFree );
- m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
- ++nCount;
- }
-
- while ( nCount < Count ) {
- details::guard& g = arr[nCount++];
- g.set_guard( m_gc.allocGuard() );
- g.get_guard()->pThreadNext = m_pList;
- m_pList = g.get_guard();
- }
- }
-
- /// Frees guard array \p arr
- template <size_t Count>
- void freeGuard( GuardArray<Count>& arr )
- {
- assert( m_pList != nullptr );
-
- details::guard_data * pGuard;
- for ( size_t i = 0; i < Count - 1; ++i ) {
- pGuard = arr[i].get_guard();
- pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
- }
- pGuard = arr[Count-1].get_guard();
- pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
- m_pFree = arr[0].get_guard();
- }
-
- /// Places retired pointer \p and its deleter \p pFunc into list of retired pointer for deferred reclamation
- template <typename T>
- void retirePtr( T * p, void (* pFunc)(T *) )
- {
- m_gc.retirePtr( p, pFunc );
- }
-
- //@cond
- void scan()
- {
- m_gc.liberate();
- }
- //@endcond
-
- };
-
- //////////////////////////////////////////////////////////
- // Inlines
-
- inline Guard::Guard(ThreadGC& gc)
- : m_gc( gc )
- {
- getGC().allocGuard( *this );
- }
- inline Guard::~Guard()
- {
- getGC().freeGuard( *this );
- }
-
- template <size_t Count>
- inline GuardArray<Count>::GuardArray( ThreadGC& gc )
- : m_gc( gc )
- {
- getGC().allocGuard( *this );
- }
- template <size_t Count>
- inline GuardArray<Count>::~GuardArray()
- {
- getGC().freeGuard( *this );
- }
-
- } // namespace ptb
-}} // namespace cds::gc
-
-#if CDS_COMPILER == CDS_COMPILER_MSVC
-# pragma warning(pop)
-#endif
-
-
-#endif // #ifndef __CDS_GC_PTB_PASS_THE_BUCK_H
#ifndef __CDS_THREADING__COMMON_H
#define __CDS_THREADING__COMMON_H
-#include <cds/gc/hp_decl.h>
-#include <cds/gc/dhp_decl.h>
+#include <cds/gc/hp/hp_decl.h>
+#include <cds/gc/dhp/dhp_decl.h>
#include <cds/urcu/details/gp_decl.h>
#include <cds/urcu/details/sh_decl.h>
<ClInclude Include="..\..\..\cds\details\lib.h" />\r
<ClInclude Include="..\..\..\cds\details\static_functor.h" />\r
<ClInclude Include="..\..\..\cds\gc\dhp.h" />\r
- <ClInclude Include="..\..\..\cds\gc\dhp_decl.h" />\r
- <ClInclude Include="..\..\..\cds\gc\dhp_impl.h" />\r
- <ClInclude Include="..\..\..\cds\gc\gc_fwd.h" />\r
+ <ClInclude Include="..\..\..\cds\gc\dhp\dhp.h" />\r
+ <ClInclude Include="..\..\..\cds\gc\dhp\dhp_decl.h" />\r
+ <ClInclude Include="..\..\..\cds\gc\dhp\dhp_impl.h" />\r
<ClInclude Include="..\..\..\cds\gc\guarded_ptr.h" />\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_alloc.h" />\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_fwd.h" />\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_inline.h" />\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_retired.h" />\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_type.h" />\r
- <ClInclude Include="..\..\..\cds\gc\hp\hzp.h" />\r
- <ClInclude Include="..\..\..\cds\gc\hp_decl.h" />\r
- <ClInclude Include="..\..\..\cds\gc\hp_impl.h" />\r
+ <ClInclude Include="..\..\..\cds\gc\hp\hp.h" />\r
+ <ClInclude Include="..\..\..\cds\gc\hp\hp_decl.h" />\r
+ <ClInclude Include="..\..\..\cds\gc\hp\hp_impl.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\basket_queue.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\cuckoo_set.h" />\r
<ClInclude Include="..\..\..\cds\intrusive\details\base.h" />\r
<ClInclude Include="..\..\..\cds\gc\default_gc.h" />\r
<ClInclude Include="..\..\..\cds\gc\hp.h" />\r
<ClInclude Include="..\..\..\cds\gc\nogc.h" />\r
- <ClInclude Include="..\..\..\cds\gc\ptb\ptb.h" />\r
<ClInclude Include="..\..\..\cds\gc\details\retired_ptr.h" />\r
<ClInclude Include="..\..\..\cds\user_setup\allocator.h" />\r
<ClInclude Include="..\..\..\cds\user_setup\cache_line.h" />\r
<Filter Include="Header Files\cds\gc">\r
<UniqueIdentifier>{a3c9928d-5261-4593-a8b9-728235f7056f}</UniqueIdentifier>\r
</Filter>\r
- <Filter Include="Header Files\cds\gc\ptb">\r
- <UniqueIdentifier>{53d28ee4-5fe9-4fa1-a617-53d8b0628eac}</UniqueIdentifier>\r
- </Filter>\r
<Filter Include="Header Files\cds\gc\details">\r
<UniqueIdentifier>{d7c48c0e-cc45-4a1a-b8e9-aa5b50abd22a}</UniqueIdentifier>\r
</Filter>\r
<Filter Include="Header Files\cds\gc\hp">\r
<UniqueIdentifier>{043c4eba-3bd4-4226-b214-e26f18b422a1}</UniqueIdentifier>\r
</Filter>\r
+ <Filter Include="Header Files\cds\gc\dhp">\r
+ <UniqueIdentifier>{9c0f5739-8d9d-46c2-bb91-90ca5beecc6d}</UniqueIdentifier>\r
+ </Filter>\r
</ItemGroup>\r
<ItemGroup>\r
<ClCompile Include="..\..\..\src\dllmain.cpp">\r
<ClInclude Include="..\..\..\cds\gc\nogc.h">\r
<Filter>Header Files\cds\gc</Filter>\r
</ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\ptb\ptb.h">\r
- <Filter>Header Files\cds\gc\ptb</Filter>\r
- </ClInclude>\r
<ClInclude Include="..\..\..\cds\gc\details\retired_ptr.h">\r
<Filter>Header Files\cds\gc\details</Filter>\r
</ClInclude>\r
<ClInclude Include="..\..\..\cds\container\details\make_michael_list.h">\r
<Filter>Header Files\cds\container\details</Filter>\r
</ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\hp_decl.h">\r
- <Filter>Header Files\cds\gc</Filter>\r
- </ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\hp_impl.h">\r
- <Filter>Header Files\cds\gc</Filter>\r
- </ClInclude>\r
<ClInclude Include="..\..\..\cds\compiler\cxx11_atomic.h">\r
<Filter>Header Files\cds\compiler</Filter>\r
</ClInclude>\r
<ClInclude Include="..\..\..\cds\details\binary_functor_wrapper.h">\r
<Filter>Header Files\cds\details</Filter>\r
</ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\gc_fwd.h">\r
- <Filter>Header Files\cds\gc</Filter>\r
- </ClInclude>\r
<ClInclude Include="..\..\..\cds\urcu\details\sh.h">\r
<Filter>Header Files\cds\urcu\details</Filter>\r
</ClInclude>\r
<ClInclude Include="..\..\..\cds\gc\dhp.h">\r
<Filter>Header Files\cds\gc</Filter>\r
</ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\dhp_decl.h">\r
- <Filter>Header Files\cds\gc</Filter>\r
- </ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\dhp_impl.h">\r
- <Filter>Header Files\cds\gc</Filter>\r
- </ClInclude>\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_alloc.h">\r
<Filter>Header Files\cds\gc\hp</Filter>\r
</ClInclude>\r
<ClInclude Include="..\..\..\cds\gc\hp\details\hp_type.h">\r
<Filter>Header Files\cds\gc\hp</Filter>\r
</ClInclude>\r
- <ClInclude Include="..\..\..\cds\gc\hp\hzp.h">\r
+ <ClInclude Include="..\..\..\cds\gc\hp\hp.h">\r
+ <Filter>Header Files\cds\gc\hp</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\gc\dhp\dhp.h">\r
+ <Filter>Header Files\cds\gc\dhp</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\gc\dhp\dhp_decl.h">\r
+ <Filter>Header Files\cds\gc\dhp</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\gc\dhp\dhp_impl.h">\r
+ <Filter>Header Files\cds\gc\dhp</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\gc\hp\hp_decl.h">\r
+ <Filter>Header Files\cds\gc\hp</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\..\cds\gc\hp\hp_impl.h">\r
<Filter>Header Files\cds\gc\hp</Filter>\r
</ClInclude>\r
</ItemGroup>\r
#include <algorithm> // std::fill
#include <functional> // std::hash
-#include <cds/gc/ptb/ptb.h>
+#include <cds/gc/dhp/dhp.h>
#include <cds/algo/int_algo.h>
namespace cds { namespace gc { namespace ptb {