//$$CDS-header$$
-#ifndef __CDS_GC_DETAILS_DHP_H
-#define __CDS_GC_DETAILS_DHP_H
+#ifndef CDSLIB_GC_DETAILS_DHP_H
+#define CDSLIB_GC_DETAILS_DHP_H
#include <mutex> // unique_lock
-#include <cds/cxx11_atomic.h>
+#include <cds/algo/atomic.h>
#include <cds/gc/details/retired_ptr.h>
#include <cds/details/aligned_allocator.h>
#include <cds/details/allocator.h>
-#include <cds/lock/spinlock.h>
+#include <cds/sync/spinlock.h>
#if CDS_COMPILER == CDS_COMPILER_MSVC
# pragma warning(push)
/// Internal guard representation
struct guard_data {
- typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
- typedef void * guarded_ptr ; ///< type of value guarded
+ typedef void * guarded_ptr; ///< type of value guarded
- atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
+ atomics::atomic<guarded_ptr> pPost; ///< pointer guarded
+ atomics::atomic<guard_data *> pGlobalNext; ///< next item of global list of allocated guards
+ atomics::atomic<guard_data *> pNextFree; ///< pointer to the next item in global or thread-local free-list
- atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
- atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
-
- guard_data * pThreadNext ; ///< next item of thread's local list of guards
+ guard_data * pThreadNext; ///< next item of thread's local list of guards
guard_data() CDS_NOEXCEPT
: pPost( nullptr )
{
cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
- atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
- atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
- SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
+ atomics::atomic<guard_data *> m_GuardList; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+ atomics::atomic<guard_data *> m_FreeGuardList; ///< Head of free guard list (linked by guard_data::pNextFree field)
+ cds::sync::spin m_freeListLock; ///< Access to m_FreeGuardList
/*
Unfortunately, access to the list of free guard is lock-based.
details::guard_data * pGuard = m_GuardAllocator.New();
// Link guard to the list
- // m_GuardList is accumulated list and it cannot support concurrent deletion,
+ // m_GuardList is an accumulating list and it cannot support concurrent deletion,
// so, ABA problem is impossible for it
details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
do {
details::guard_data * pGuard;
{
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
if ( pGuard )
m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
{
pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
}
pLast = p;
}
- std::unique_lock<SpinLock> al( m_freeListLock );
+ std::unique_lock<cds::sync::spin> al( m_freeListLock );
pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
}
atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
public:
- CDS_CONSTEXPR retired_ptr_buffer() CDS_NOEXCEPT
+ retired_ptr_buffer() CDS_NOEXCEPT
: m_pHead( nullptr )
, m_nItemCount(0)
{}
return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
}
+ /// Pushes [pFirst, pLast] list linked by pNext field.
+ size_t push_list( retired_ptr_node* pFirst, retired_ptr_node* pLast, size_t nSize )
+ {
+ assert( pFirst );
+ assert( pLast );
+
+ retired_ptr_node * pHead = m_pHead.load( atomics::memory_order_acquire );
+ do {
+ pLast->m_pNext = pHead;
+ // pHead is changed by compare_exchange_weak
+ } while ( !m_pHead.compare_exchange_weak( pHead, pFirst, atomics::memory_order_release, atomics::memory_order_relaxed ) );
+
+ return m_nItemCount.fetch_add( nSize, atomics::memory_order_relaxed ) + 1;
+ }
+
/// Result of \ref dhp_gc_privatve "privatize" function.
/**
The \p privatize function returns retired node list as \p first and the size of that list as \p second.
/// Pool block
struct block {
- block * pNext ; ///< next block
- item items[m_nItemPerBlock] ; ///< item array
+ block * pNext; ///< next block
+ item items[m_nItemPerBlock]; ///< item array
};
- atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
+ atomics::atomic<block *> m_pBlockListHead; ///< head of of allocated block list
// To solve ABA problem we use epoch-based approach
- static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
- atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
- atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
- atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
+ static const unsigned int c_nEpochCount = 4; ///< Max epoch count
+ atomics::atomic<unsigned int> m_nCurEpoch; ///< Current epoch
+ atomics::atomic<item *> m_pEpochFree[c_nEpochCount]; ///< List of free item per epoch
+ atomics::atomic<item *> m_pGlobalFreeHead; ///< Head of unallocated item list
cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
CDS_STRICT_DO( pItem->m_pNext = nullptr );
}
- // link new block to block list
+ // links new block to the block list
{
block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
do {
} while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
- // link block's items to free list
+ // links block's items to the free list
{
item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
do {
m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
}
- /// Allocates new retired pointer
+ /// Allocates the new retired pointer
retired_ptr_node& alloc()
{
unsigned int nEpoch;
return node;
}
- /// Places the list (pHead, pTail) of retired pointers to pool (frees retired pointers)
+ /// Places the list [pHead, pTail] of retired pointers to pool (frees retired pointers)
/**
The list is linked on the m_pNextFree field
*/
/// Uninitialized guard
class guard
{
- friend class ThreadGC;
+ friend class dhp::ThreadGC;
protected:
details::guard_data * m_pGuard ; ///< Pointer to guard data
+
public:
/// Initialize empty guard.
CDS_CONSTEXPR guard() CDS_NOEXCEPT
: m_pGuard( nullptr )
{}
- /// The object is not copy-constructible
+ /// Copy-ctor is disabled
guard( guard const& ) = delete;
+ /// Move-ctor is disabled
+ guard( guard&& ) = delete;
+
/// Object destructor, does nothing
~guard() CDS_NOEXCEPT
{}
+ /// Get current guarded pointer
+ void * get( atomics::memory_order order = atomics::memory_order_acquire ) const CDS_NOEXCEPT
+ {
+ assert( m_pGuard != nullptr );
+ return m_pGuard->pPost.load( order );
+ }
+
/// Guards pointer \p p
- void set( void * p ) CDS_NOEXCEPT
+ void set( void * p, atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
{
assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( p, atomics::memory_order_release );
- //CDS_COMPILER_RW_BARRIER;
+ m_pGuard->pPost.store( p, order );
}
/// Clears the guard
- void clear() CDS_NOEXCEPT
+ void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
{
assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
+ m_pGuard->pPost.store( nullptr, order );
}
/// Guards pointer \p p
public: // for ThreadGC.
/*
- GCC cannot compile code for template versions of ThreasGC::allocGuard/freeGuard,
+ GCC cannot compile code for template versions of ThreadGC::allocGuard/freeGuard,
the compiler produces error: \91cds::gc::dhp::details::guard_data* cds::gc::dhp::details::guard::m_pGuard\92 is protected
despite the fact that ThreadGC is declared as friend for guard class.
- We should not like to declare m_pGuard member as public one.
Therefore, we have to add set_guard/get_guard public functions
*/
/// Set guard data
{
return m_pGuard;
}
+
+ details::guard_data * release_guard() CDS_NOEXCEPT
+ {
+ details::guard_data * p = m_pGuard;
+ m_pGuard = nullptr;
+ return p;
+ }
+
+ bool is_initialized() const
+ {
+ return m_pGuard != nullptr;
+ }
};
} // namespace details
{
typedef details::guard base_class;
friend class ThreadGC;
-
- ThreadGC& m_gc ; ///< ThreadGC object of current thread
public:
/// Allocates a guard from \p gc GC. \p gc must be ThreadGC object of current thread
- Guard( ThreadGC& gc ) CDS_NOEXCEPT;
+ Guard(); // inline in dhp_impl.h
/// Returns guard allocated back to pool of free guards
- ~Guard() CDS_NOEXCEPT; // inline after GarbageCollector
-
- /// Returns DHP GC object
- ThreadGC& getGC() CDS_NOEXCEPT
- {
- return m_gc;
- }
+ ~Guard(); // inline in dhp_impl.h
/// Guards pointer \p p
template <typename T>
class GuardArray
{
details::guard m_arr[Count] ; ///< array of guard
- ThreadGC& m_gc ; ///< ThreadGC object of current thread
const static size_t c_nCapacity = Count ; ///< Array capacity (equal to \p Count template parameter)
public:
public:
/// Allocates array of guards from \p gc which must be the ThreadGC object of current thread
- GuardArray( ThreadGC& gc ) CDS_NOEXCEPT; // inline below
-
- /// The object is not default-constructible
- GuardArray() = delete;
+ GuardArray(); // inline in dhp_impl.h
/// The object is not copy-constructible
GuardArray( GuardArray const& ) = delete;
+ /// The object is not move-constructible
+ GuardArray( GuardArray&& ) = delete;
+
/// Returns guards allocated back to pool
- ~GuardArray() CDS_NOEXCEPT; // inline below
+ ~GuardArray(); // inline in dh_impl.h
/// Returns the capacity of array
CDS_CONSTEXPR size_t capacity() const CDS_NOEXCEPT
return c_nCapacity;
}
- /// Returns DHP ThreadGC object
- ThreadGC& getGC() CDS_NOEXCEPT
- {
- return m_gc;
- }
-
/// Returns reference to the guard of index \p nIndex (0 <= \p nIndex < \p Count)
details::guard& operator []( size_t nIndex ) CDS_NOEXCEPT
{
public:
/// Exception "No GarbageCollector object is created"
- CDS_DECLARE_EXCEPTION( DHPManagerEmpty, "Global DHP GarbageCollector is NULL" );
+ class not_initialized : public std::runtime_error
+ {
+ public:
+ //@cond
+ not_initialized()
+ : std::runtime_error( "Global DHP GarbageCollector is not initialized" )
+ {}
+ //@endcond
+ };
/// Internal GC statistics
struct InternalState
size_t m_nGuardCount ; ///< Total guard count
size_t m_nFreeGuardCount ; ///< Count of free guard
+ //@cond
InternalState()
: m_nGuardCount(0)
, m_nFreeGuardCount(0)
return *this;
}
+ //@endcond
};
private:
/// Returns pointer to GarbageCollector instance
/**
- If DHP GC is not initialized, \p DHPManagerEmpty exception is thrown
+ If DHP GC is not initialized, \p not_initialized exception is thrown
*/
static GarbageCollector& instance()
{
if ( m_pManager == nullptr )
- throw DHPManagerEmpty();
+ throw not_initialized();
return *m_pManager;
}
public:
/// Initializes guard \p g
- void allocGuard( Guard& g )
+ void allocGuard( dhp::details::guard& g )
{
assert( m_pList != nullptr );
- if ( m_pFree ) {
- g.m_pGuard = m_pFree;
- m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
- }
- else {
- g.m_pGuard = m_gc.allocGuard();
- g.m_pGuard->pThreadNext = m_pList;
- m_pList = g.m_pGuard;
+ if ( !g.m_pGuard ) {
+ if ( m_pFree ) {
+ g.m_pGuard = m_pFree;
+ m_pFree = m_pFree->pNextFree.load( atomics::memory_order_relaxed );
+ }
+ else {
+ g.m_pGuard = m_gc.allocGuard();
+ g.m_pGuard->pThreadNext = m_pList;
+ m_pList = g.m_pGuard;
+ }
}
}
/// Frees guard \p g
- void freeGuard( Guard& g )
+ void freeGuard( dhp::details::guard& g )
{
assert( m_pList != nullptr );
- g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
- g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
- m_pFree = g.m_pGuard;
+ if ( g.m_pGuard ) {
+ g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
+ m_pFree = g.m_pGuard;
+ g.m_pGuard = nullptr;
+ }
}
/// Initializes guard array \p arr
m_gc.retirePtr( p, pFunc );
}
+ /// Run retiring cycle
void scan()
{
m_gc.scan();
}
};
-
- //////////////////////////////////////////////////////////
- // Inlines
-
- inline Guard::Guard(ThreadGC& gc)
- : m_gc( gc )
- {
- getGC().allocGuard( *this );
- }
- inline Guard::~Guard()
- {
- getGC().freeGuard( *this );
- }
-
- template <size_t Count>
- inline GuardArray<Count>::GuardArray( ThreadGC& gc )
- : m_gc( gc )
- {
- getGC().allocGuard( *this );
- }
- template <size_t Count>
- inline GuardArray<Count>::~GuardArray()
- {
- getGC().freeGuard( *this );
- }
-
} // namespace dhp
}} // namespace cds::gc
//@endcond
# pragma warning(pop)
#endif
-#endif // #ifndef __CDS_GC_DETAILS_DHP_H
+#endif // #ifndef CDSLIB_GC_DETAILS_DHP_H