#include <cds/algo/atomic.h>
#include <cds/os/thread.h>
#include <cds/details/bounded_array.h>
+#include <cds/user_setup/cache_line.h>
#include <cds/gc/details/hp_type.h>
#include <cds/gc/details/hp_alloc.h>
other threads have read-only access.
*/
struct hp_record {
- hp_allocator<> m_hzp; ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependency
+ hp_allocator<> m_hzp; ///< array of hazard pointers. Implicit \ref CDS_DEFAULT_ALLOCATOR dependency
retired_vector m_arrRetired ; ///< Retired pointer array
+ char padding[cds::c_nCacheLineSize];
+ atomics::atomic<unsigned int> m_nSync; ///< dummy var to introduce synchronizes-with relationship between threads
+
/// Ctor
hp_record( const cds::gc::hp::GarbageCollector& HzpMgr ); // inline
~hp_record()
{
m_hzp.clear();
}
+
+ void sync()
+ {
+ m_nSync.fetch_add( 1, atomics::memory_order_acq_rel );
+ }
};
} // namespace details
m_HzpManager.Scan( m_pHzpRec );
m_HzpManager.HelpScan( m_pHzpRec );
}
+
+ void sync()
+ {
+ assert( m_pHzpRec != nullptr );
+ m_pHzpRec->sync();
+ }
};
/// Auto hp_guard.
{}
inline hp_record::hp_record( const cds::gc::hp::GarbageCollector& HzpMgr )
- : m_hzp( HzpMgr.getHazardPointerCount() ),
- m_arrRetired( HzpMgr )
+ : m_hzp( HzpMgr.getHazardPointerCount() )
+ , m_arrRetired( HzpMgr )
+ , m_nSync( 0 )
{}
}}} // namespace gc::hp::details
/**
Clearing has relaxed semantics.
*/
- void clear( atomics::memory_order order = atomics::memory_order_relaxed ) CDS_NOEXCEPT
+ void clear( atomics::memory_order order = atomics::memory_order_release ) CDS_NOEXCEPT
{
// memory order is not necessary here
base_class::store( nullptr, order );
Can be used for a pointer that cannot be changed concurrently
*/
template <typename T>
- T * assign( T * p )
- {
- return base_class::operator =(p);
- }
+ T * assign( T * p ); // inline in hp_impl.h
//@cond
std::nullptr_t assign( std::nullptr_t )
GuardArray( GuardArray const& ) = delete;
GuardArray( GuardArray&& ) = delete;
GuardArray& operator=(GuardArray const&) = delete;
- GuardArray& operator-(GuardArray&&) = delete;
+ GuardArray& operator=(GuardArray&&) = delete;
//@endcond
/// Protects a pointer of type \p atomic<T*>
T pRet;
do {
pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
- } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
return pRet;
}
T pRet;
do {
assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
- } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
return pRet;
}
The function equals to a simple assignment, no loop is performed.
*/
template <typename T>
- T * assign( size_t nIndex, T * p )
- {
- base_class::set(nIndex, p);
- return p;
- }
+ T * assign( size_t nIndex, T * p ); // inline in hp_impl.h
/// Store marked pointer \p p to the guard
/**
cds::threading::getGC<HP>().freeGuard( g );
}
+ template <typename T>
+ inline T * HP::Guard::assign( T * p )
+ {
+ T * pp = base_class::operator =(p);
+ cds::threading::getGC<HP>().sync();
+ return pp;
+ }
+
+ template <size_t Count>
+ template <typename T>
+ inline T * HP::GuardArray<Count>::assign( size_t nIndex, T * p )
+ {
+ base_class::set(nIndex, p);
+ cds::threading::getGC<HP>().sync();
+ return p;
+ }
+
template <typename T>
inline void HP::retire( T * p, void (* pFunc)(T *) )
{
//@endcond
};
+ //@cond
/// Totally relaxed memory ordering model (do not use!)
/**
In this memory model any memory constraint is equivalent to \p memory_order_relaxed.
See \p opt::memory_model for explanations
*/
struct total_relaxed_ordering {
- //@cond
static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed;
static const atomics::memory_order memory_order_consume = atomics::memory_order_relaxed;
static const atomics::memory_order memory_order_acquire = atomics::memory_order_relaxed;
static const atomics::memory_order memory_order_release = atomics::memory_order_relaxed;
static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_relaxed;
static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_relaxed;
- //@endcond
};
+ //@endcond
} // namespace v
/// [type-option] Base type traits option setter
// No HP records available for reuse
// Allocate and push a new HP record
hprec = NewHPRec();
- hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
- hprec->m_bFree.store( false, atomics::memory_order_relaxed );
-
- atomics::atomic_thread_fence( atomics::memory_order_release );
+ hprec->m_idOwner.store( curThreadId, atomics::memory_order_release );
+ hprec->m_bFree.store( false, atomics::memory_order_release );
hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
do {
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
+ pRec->sync();
void * hptr = pNode->m_hzp[i];
if ( hptr )
plist.push_back( hptr );
while ( pNode ) {
if ( !pNode->m_bFree.load( atomics::memory_order_acquire ) ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
+ pRec->sync();
void * hptr = pNode->m_hzp[i];
if ( hptr ) {
dummyRetired.m_p = hptr;