Each data structure based on flat combining contains a class derived from \p %publication_record
*/
struct publication_record {
- CDS_ATOMIC::atomic<unsigned int> nRequest; ///< Request field (depends on data structure)
- CDS_ATOMIC::atomic<unsigned int> nState; ///< Record state: inactive, active, removed
+ atomics::atomic<unsigned int> nRequest; ///< Request field (depends on data structure)
+ atomics::atomic<unsigned int> nState; ///< Record state: inactive, active, removed
unsigned int nAge; ///< Age of the record
- CDS_ATOMIC::atomic<publication_record *> pNext; ///< Next record in publication list
+ atomics::atomic<publication_record *> pNext; ///< Next record in publication list
void * pOwner; ///< [internal data] Pointer to \ref kernel object that manages the publication list
/// Initializes publication record
/// Returns the value of \p nRequest field
unsigned int op() const
{
- return nRequest.load( CDS_ATOMIC::memory_order_relaxed );
+ return nRequest.load( atomics::memory_order_relaxed );
}
/// Checks if the operation is done
bool is_done() const
{
- return nRequest.load( CDS_ATOMIC::memory_order_relaxed ) == req_Response;
+ return nRequest.load( atomics::memory_order_relaxed ) == req_Response;
}
};
if ( pRec->nState.load(memory_model::memory_order_relaxed) == active && pRec->pOwner ) {
// record is active and kernel is alive
unsigned int nState = active;
- pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
else {
// record is not in publication list or kernel already deleted
pRec->pNext = p;
// Failed CAS changes p
} while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast<publication_record *>(pRec),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_relaxed ));
m_Stat.onActivatPubRecord();
}
}
if ( pPrev ) {
publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
p->nState.store( inactive, memory_model::memory_order_release );
p = pNext;
if ( pPrev ) {
publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire );
if ( pPrev->pNext.compare_exchange_strong( p, pNext,
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
cxx11_allocator().Delete( static_cast<publication_record_type *>( p ));
m_Stat.onDeletePubRecord();
#include <cds/details/defs.h>
#include <cds/details/aligned_type.h>
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
typedef enum memory_order {
memory_order_relaxed,
memory_order_consume,
memory_order_seq_cst
} memory_order;
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS)
// In C++11, make_unsigned is declared in <type_traits>
#include <boost/type_traits/make_unsigned.hpp> // for make_unsigned
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
// forward declarations
template <class T>
platform::signal_fence( order );
}
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef __CDS_COMPILER_CXX11_ATOMIC_H
#include <cds/compiler/gcc/x86/cxx11_atomic32.h>
//@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 {
# ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT
// primitives up to 32bit + fences
- using namespace cds::cxx11_atomics::platform::gcc::x86;
+ using namespace cds::cxx11_atomic::platform::gcc::x86;
# endif
//-----------------------------------------------------------------------------
#endif
} // namespace platform
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
#include <cstdint>
//@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace ia64 {
static inline void itanium_full_fence() CDS_NOEXCEPT
using namespace gcc::ia64;
#endif
} // namespace platform
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef __CDS_COMPILER_GCC_IA64_CXX11_ATOMIC_H
#define CDS_SPARC_MB_SEQ_CST CDS_SPARC_MB_FULL
//@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace Sparc {
static inline void fence_before( memory_order order ) CDS_NOEXCEPT
using namespace gcc::Sparc;
#endif
} // namespace platform
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#undef CDS_SPARC_MB_ACQ
#include <cds/compiler/gcc/x86/cxx11_atomic32.h>
//@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 {
//-----------------------------------------------------------------------------
using namespace gcc::x86;
#endif
} // namespace platform
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC_H
#endif
//@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 {
static inline void fence_before( memory_order order ) CDS_NOEXCEPT
using namespace vc::amd64;
#endif
} // namespace platform
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef __CDS_COMPILER_VC_AMD64_CXX11_ATOMIC_H
#endif
//@cond
-namespace cds { namespace cxx11_atomics {
+namespace cds { namespace cxx11_atomic {
namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace x86 {
static inline void fence_before( memory_order order ) CDS_NOEXCEPT
using namespace vc::x86;
#endif
} // namespace platform
-}} // namespace cds::cxx11_atomics
+}} // namespace cds::cxx11_atomic
//@endcond
#endif // #ifndef __CDS_COMPILER_VC_X86_CXX11_ATOMIC_H
protected:
//@cond
- typedef CDS_ATOMIC::atomic<size_t> sequence_type;
+ typedef atomics::atomic<size_t> sequence_type;
struct cell_type
{
sequence_type sequence;
Using \p CDS_ATOMIC macro you may call <tt>\<atomic\></tt> library functions and classes,
for example:
\code
- CDS_ATOMIC::atomic<int> atomInt;
- CDS_ATOMIC::atomic_store_explicit( &atomInt, 0, CDS_ATOMIC::memory_order_release );
+ atomics::atomic<int> atomInt;
+ atomics::atomic_store_explicit( &atomInt, 0, atomics::memory_order_release );
\endcode
\par Microsoft Visual C++
You can compile \p libcds and your projects with <tt>boost.atomic</tt> specifying \p -DCDS_USE_BOOST_ATOMIC
in compiler's command line.
*/
-namespace cxx11_atomics {
-}} // namespace cds::cxx11_atomics
+namespace cxx11_atomic {
+}} // namespace cds::cxx11_atomic
//@cond
#if defined(CDS_USE_BOOST_ATOMIC)
# include <boost/version.hpp>
# if BOOST_VERSION >= 105400
# include <boost/atomic.hpp>
-# define CDS_ATOMIC boost
+ namespace atomics = boost;
# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost {
# define CDS_CXX11_ATOMIC_END_NAMESPACE }
# else
#elif defined(CDS_USE_LIBCDS_ATOMIC)
// libcds atomic
# include <cds/compiler/cxx11_atomic.h>
-# define CDS_ATOMIC cds::cxx11_atomics
-# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomics {
+ namespace atomics = cds::cxx11_atomic;
+# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomic {
# define CDS_CXX11_ATOMIC_END_NAMESPACE }}
#else
// Compiler provided C++11 atomic
# include <atomic>
-# define CDS_ATOMIC std
+ namespace atomics = std;
# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std {
# define CDS_CXX11_ATOMIC_END_NAMESPACE }
#endif
class event_counter
{
//@cond
- CDS_ATOMIC::atomic_size_t m_counter;
+ atomics::atomic_size_t m_counter;
//@endcond
public:
value_type n //< new value of the counter
) CDS_NOEXCEPT
{
- m_counter.exchange( n, CDS_ATOMIC::memory_order_relaxed );
+ m_counter.exchange( n, atomics::memory_order_relaxed );
return n;
}
size_t n ///< addendum
) CDS_NOEXCEPT
{
- return m_counter.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ) + n;
+ return m_counter.fetch_add( n, atomics::memory_order_relaxed ) + n;
}
/// Substraction
size_t n ///< subtrahend
) CDS_NOEXCEPT
{
- return m_counter.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed ) - n;
+ return m_counter.fetch_sub( n, atomics::memory_order_relaxed ) - n;
}
/// Get current value of the counter
operator size_t () const CDS_NOEXCEPT
{
- return m_counter.load( CDS_ATOMIC::memory_order_relaxed );
+ return m_counter.load( atomics::memory_order_relaxed );
}
/// Preincrement
size_t operator ++() CDS_NOEXCEPT
{
- return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1;
+ return m_counter.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
}
/// Postincrement
size_t operator ++(int) CDS_NOEXCEPT
{
- return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ return m_counter.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Predecrement
size_t operator --() CDS_NOEXCEPT
{
- return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) - 1;
+ return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ) - 1;
}
/// Postdecrement
size_t operator --(int) CDS_NOEXCEPT
{
- return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed );
+ return m_counter.fetch_sub( 1, atomics::memory_order_relaxed );
}
/// Get current value of the counter
size_t get() const CDS_NOEXCEPT
{
- return m_counter.load( CDS_ATOMIC::memory_order_relaxed );
+ return m_counter.load( atomics::memory_order_relaxed );
}
/// Resets the counter to 0
void reset() CDS_NOEXCEPT
{
- m_counter.store( 0, CDS_ATOMIC::memory_order_release );
+ m_counter.store( 0, atomics::memory_order_release );
}
};
class item_counter
{
public:
- typedef CDS_ATOMIC::atomic_size_t atomic_type ; ///< atomic type used
+ typedef atomics::atomic_size_t atomic_type ; ///< atomic type used
typedef size_t counter_type ; ///< Integral item counter type (size_t)
private:
{}
/// Returns current value of the counter
- counter_type value(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) const
+ counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const
{
return m_Counter.load( order );
}
}
/// Increments the counter. Semantics: postincrement
- counter_type inc(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed )
+ counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed )
{
return m_Counter.fetch_add( 1, order );
}
/// Decrements the counter. Semantics: postdecrement
- counter_type dec(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed)
+ counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed)
{
return m_Counter.fetch_sub( 1, order );
}
}
/// Resets count to 0
- void reset(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed)
+ void reset(atomics::memory_order order = atomics::memory_order_relaxed)
{
m_Counter.store( 0, order );
}
typedef size_t counter_type ; ///< Counter type
public:
/// Returns 0
- counter_type value(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) const
+ counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) const
{
return 0;
}
}
/// Dummy increment. Always returns 0
- size_t inc(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed)
+ size_t inc(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{
return 0;
}
/// Dummy increment. Always returns 0
- size_t dec(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed)
+ size_t dec(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{
return 0;
}
}
/// Dummy function
- void reset(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed)
+ void reset(atomics::memory_order /*order*/ = atomics::memory_order_relaxed)
{}
};
schema used. However, any implementation supports common interface for the type of data structure.
To implement any lock-free data structure, two things are needed:
- - atomic operation library conforming with C++11 memory model. The <b>libcds</b> has such feature, see cds::cxx11_atomics namespace for
+ - atomic operation library conforming with C++11 memory model. The <b>libcds</b> has such feature, see cds::cxx11_atomic namespace for
details and compiler-specific information.
- safe memory reclamation (SMR) or garbage collecting (GC) algorithm. The <b>libcds</b> has an implementation of several
well-known SMR algos, see below.
{
private:
typedef cds::details::marked_ptr<T, Bitmask> marked_ptr;
- typedef CDS_ATOMIC::atomic<T *> atomic_impl;
+ typedef atomics::atomic<T *> atomic_impl;
atomic_impl m_atomic;
public:
/**
@headerfile cds/gc/hp.h
*/
- template <typename T> using atomic_ref = CDS_ATOMIC::atomic<T *>;
+ template <typename T> using atomic_ref = atomics::atomic<T *>;
/// Atomic marked pointer
/**
@headerfile cds/gc/hp.h
*/
- template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
+ template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
/// Atomic type
/**
@headerfile cds/gc/hp.h
*/
- template <typename T> using atomic_type = CDS_ATOMIC::atomic<T>;
+ template <typename T> using atomic_type = atomics::atomic<T>;
#else
template <typename T>
- class atomic_ref: public CDS_ATOMIC::atomic<T *>
+ class atomic_ref: public atomics::atomic<T *>
{
- typedef CDS_ATOMIC::atomic<T *> base_class;
+ typedef atomics::atomic<T *> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_ref() = default;
};
template <typename T>
- class atomic_type: public CDS_ATOMIC::atomic<T>
+ class atomic_type: public atomics::atomic<T>
{
- typedef CDS_ATOMIC::atomic<T> base_class;
+ typedef atomics::atomic<T> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_type() = default;
};
template <typename MarkedPtr>
- class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
+ class atomic_marked_ptr: public atomics::atomic<MarkedPtr>
{
- typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
+ typedef atomics::atomic<MarkedPtr> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default;
to the HP slot repeatedly until the guard's value equals \p toGuard
*/
template <typename T>
- T protect( CDS_ATOMIC::atomic<T> const& toGuard )
+ T protect( atomics::atomic<T> const& toGuard )
{
- T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
T pRet;
do {
pRet = assign( pCur );
- pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+ pCur = toGuard.load(atomics::memory_order_acquire);
} while ( pRet != pCur );
return pCur;
}
Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
*/
template <typename T, class Func>
- T protect( CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+ T protect( atomics::atomic<T> const& toGuard, Func f )
{
- T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
T pRet;
do {
pRet = pCur;
assign( f( pCur ) );
- pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+ pCur = toGuard.load(atomics::memory_order_acquire);
} while ( pRet != pCur );
return pCur;
}
to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
*/
template <typename T>
- T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard )
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
{
T pRet;
do {
- pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_acquire) );
- } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed));
+ pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) );
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
return pRet;
}
Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
*/
template <typename T, class Func>
- T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
{
T pRet;
do {
- assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_acquire) ));
- } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed));
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_relaxed));
return pRet;
}
/// Retired node descriptor
struct retired_node {
- CDS_ATOMIC::atomic<ContainerNode *> m_pNode ; ///< node to destroy
+ atomics::atomic<ContainerNode *> m_pNode ; ///< node to destroy
free_retired_ptr_func m_funcFree ; ///< pointer to the destructor function
size_t m_nNextFree ; ///< Next free item in retired array
- CDS_ATOMIC::atomic<unsigned int> m_nClaim ; ///< Access to reclaimed node
- CDS_ATOMIC::atomic<bool> m_bDone ; ///< the record is in work (concurrent access flag)
+ atomics::atomic<unsigned int> m_nClaim ; ///< Access to reclaimed node
+ atomics::atomic<bool> m_bDone ; ///< the record is in work (concurrent access flag)
/// Default ctor
retired_node()
/// Compares two \ref retired_node
static bool Less( const retired_node& p1, const retired_node& p2 )
{
- return p1.m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) < p2.m_pNode.load( CDS_ATOMIC::memory_order_relaxed );
+ return p1.m_pNode.load( atomics::memory_order_relaxed ) < p2.m_pNode.load( atomics::memory_order_relaxed );
}
/// Assignment operator
retired_node& set( ContainerNode * pNode, free_retired_ptr_func func )
{
- m_bDone.store( false, CDS_ATOMIC::memory_order_relaxed );
- m_nClaim.store( 0, CDS_ATOMIC::memory_order_relaxed );
+ m_bDone.store( false, atomics::memory_order_relaxed );
+ m_nClaim.store( 0, atomics::memory_order_relaxed );
m_funcFree = func;
- m_pNode.store( pNode, CDS_ATOMIC::memory_order_release );
+ m_pNode.store( pNode, atomics::memory_order_release );
CDS_COMPILER_RW_BARRIER;
return *this;
}
void free()
{
assert( m_funcFree != nullptr );
- m_funcFree( m_pNode.load( CDS_ATOMIC::memory_order_relaxed ));
+ m_funcFree( m_pNode.load( atomics::memory_order_relaxed ));
}
};
size_t nCount = 0;
const size_t nCapacity = capacity();
for ( size_t i = 0; i < nCapacity; ++i ) {
- if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != nullptr )
+ if ( m_arr[i].m_pNode.load( atomics::memory_order_relaxed ) != nullptr )
++nCount;
}
return nCount;
assert( !isFull());
size_t n = m_nFreeList;
- assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( m_arr[n].m_pNode.load( atomics::memory_order_relaxed ) == nullptr );
m_nFreeList = m_arr[n].m_nNextFree;
CDS_DEBUG_DO( m_arr[n].m_nNextFree = m_nEndFreeList ; )
m_arr[n].set( p, pFunc );
void pop( size_t n )
{
assert( n < capacity() );
- m_arr[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_release );
+ m_arr[n].m_pNode.store( nullptr, atomics::memory_order_release );
m_arr[n].m_nNextFree = m_nFreeList;
m_nFreeList = n;
}
#if CDS_COMPILER == CDS_COMPILER_MSVC
# pragma warning(push)
-// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic<T>'
+// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
// needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector'
# pragma warning(disable: 4251)
#endif
friend class gc::HRC;
unsigned_ref_counter m_RC ; ///< reference counter
- CDS_ATOMIC::atomic<bool> m_bTrace ; ///< \p true - node is tracing by Scan
- CDS_ATOMIC::atomic<bool> m_bDeleted ; ///< \p true - node is deleted
+ atomics::atomic<bool> m_bTrace ; ///< \p true - node is tracing by Scan
+ atomics::atomic<bool> m_bDeleted ; ///< \p true - node is deleted
protected:
//@cond
/// Returns the mark whether the node is deleted
bool isDeleted() const CDS_NOEXCEPT
{
- return m_bDeleted.load( CDS_ATOMIC::memory_order_acquire );
+ return m_bDeleted.load( atomics::memory_order_acquire );
}
protected:
//@cond
- void clean( CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+ void clean( atomics::memory_order order ) CDS_NOEXCEPT
{
m_bDeleted.store( false, order );
m_bTrace.store( false, order );
{
thread_list_node * m_pNext ; ///< next list record
ThreadGC * m_pOwner ; ///< Owner of record
- CDS_ATOMIC::atomic<cds::OS::ThreadId> m_idOwner ; ///< Id of thread owned; 0 - record is free
+ atomics::atomic<cds::OS::ThreadId> m_idOwner ; ///< Id of thread owned; 0 - record is free
bool m_bFree ; ///< Node is help-scanned
//@cond
~thread_list_node()
{
assert( m_pOwner == nullptr );
- assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId );
+ assert( m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::c_NullThreadId );
}
//@endcond
};
private:
- CDS_ATOMIC::atomic<thread_list_node *> m_pListHead ; ///< Head of thread list
+ atomics::atomic<thread_list_node *> m_pListHead ; ///< Head of thread list
static GarbageCollector * m_pGC ; ///< HRC garbage collector instance
/// Retire (deferred delete) node \p pNode guarded by \p hp hazard pointer
void retireNode( ContainerNode * pNode, details::HPGuard& hp, details::free_retired_ptr_func pFunc )
{
- assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) );
+ assert( !pNode->m_bDeleted.load( atomics::memory_order_relaxed ) );
assert( pNode == hp );
retireNode( pNode, pFunc );
/// Retire (deferred delete) node \p pNode. Do not use this function directly!
void retireNode( ContainerNode * pNode, details::free_retired_ptr_func pFunc )
{
- assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) );
+ assert( !pNode->m_bDeleted.load( atomics::memory_order_relaxed ) );
- pNode->m_bDeleted.store( true, CDS_ATOMIC::memory_order_release );
- pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+ pNode->m_bDeleted.store( true, atomics::memory_order_release );
+ pNode->m_bTrace.store( false, atomics::memory_order_release );
m_pDesc->m_arrRetired.push( pNode, pFunc );
details::retired_vector::iterator itEnd = m_pDesc->m_arrRetired.end();
for ( details::retired_vector::iterator it = m_pDesc->m_arrRetired.begin(); it != itEnd; ++it ) {
details::retired_node& node = *it;
- ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
- if ( pNode && !node.m_bDone.load(CDS_ATOMIC::memory_order_acquire) )
+ ContainerNode * pNode = node.m_pNode.load(atomics::memory_order_acquire);
+ if ( pNode && !node.m_bDone.load(atomics::memory_order_acquire) )
pNode->cleanUp( this );
}
}
@headerfile cds/gc/hrc.h
*/
template <typename T>
- class atomic_ref: protected CDS_ATOMIC::atomic<T *>
+ class atomic_ref: protected atomics::atomic<T *>
{
//@cond
- typedef CDS_ATOMIC::atomic<T *> base_class;
+ typedef atomics::atomic<T *> base_class;
//@endcond
public:
//@cond
//@endcond
/// Read reference value
- T * load( CDS_ATOMIC::memory_order order ) const CDS_NOEXCEPT
+ T * load( atomics::memory_order order ) const CDS_NOEXCEPT
{
return base_class::load( order );
}
//@cond
- T * load( CDS_ATOMIC::memory_order order ) const volatile CDS_NOEXCEPT
+ T * load( atomics::memory_order order ) const volatile CDS_NOEXCEPT
{
return base_class::load( order );
}
//@endcond
/// Store new value to reference
- void store( T * pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+ void store( T * pNew, atomics::memory_order order ) CDS_NOEXCEPT
{
before_store( pNew );
T * pOld = base_class::exchange( pNew, order );
after_store( pOld, pNew );
}
//@cond
- void store( T * pNew, CDS_ATOMIC::memory_order order ) volatile CDS_NOEXCEPT
+ void store( T * pNew, atomics::memory_order order ) volatile CDS_NOEXCEPT
{
before_store( pNew );
T * pOld = base_class::exchange( pNew, order );
\p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
*/
- bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+ bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
{
before_cas( pNew );
bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
return bSuccess;
}
//@cond
- bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT
+ bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
{
before_cas( pNew );
bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail );
after_cas( bSuccess, pOld, pNew );
return bSuccess;
}
- bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+ bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
{
- return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+ return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
}
- bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT
+ bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
{
- return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+ return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed );
}
//@endcond
\p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
*/
- bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+ bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
{
before_cas( pNew );
bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
return bSuccess;
}
//@cond
- bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT
+ bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT
{
before_cas( pNew );
bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
after_cas( bSuccess, pOld, pNew );
return bSuccess;
}
- bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+ bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
{
- return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+ return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
}
- bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT
+ bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT
{
- return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed );
+ return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed );
}
//@endcond
static void after_store( T * pOld, T * pNew ) CDS_NOEXCEPT
{
if ( pNew )
- pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+ pNew->m_bTrace.store( false, atomics::memory_order_release );
if ( pOld )
--pOld->m_RC;
}
{
if ( p ) {
++p->m_RC;
- p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+ p->m_bTrace.store( false, atomics::memory_order_release );
}
}
static void after_cas( bool bSuccess, T * pOld, T * pNew ) CDS_NOEXCEPT
class atomic_marked_ptr
{
//@cond
- CDS_ATOMIC::atomic< MarkedPtr > m_a;
+ atomics::atomic< MarkedPtr > m_a;
//@endcond
public:
/// Marked pointer type
/// Read reference value
- marked_ptr load(CDS_ATOMIC::memory_order order) const CDS_NOEXCEPT
+ marked_ptr load(atomics::memory_order order) const CDS_NOEXCEPT
{
return m_a.load(order);
}
/// Store new value to reference
- void store( marked_ptr pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+ void store( marked_ptr pNew, atomics::memory_order order ) CDS_NOEXCEPT
{
before_store( pNew.ptr() );
marked_ptr pOld = m_a.exchange( pNew, order );
}
/// Store new value to reference
- void store( typename marked_ptr::pointer_type pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT
+ void store( typename marked_ptr::pointer_type pNew, atomics::memory_order order ) CDS_NOEXCEPT
{
before_store( pNew );
marked_ptr pOld = m_a.exchange( marked_ptr(pNew), order );
\p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
*/
- bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+ bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
{
before_cas( pNew.ptr() );
bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success, mo_fail );
return bSuccess;
}
//@cond
- bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+ bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
{
before_cas( pNew.ptr() );
bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success );
\p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type
*/
- bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT
+ bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT
{
// protect pNew
before_cas( pNew.ptr() );
return bSuccess;
}
//@cond
- bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT
+ bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT
{
before_cas( pNew.ptr() );
bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success );
static void after_store( typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
{
if ( pNew )
- pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+ pNew->m_bTrace.store( false, atomics::memory_order_release );
if ( pOld )
--pOld->m_RC;
}
{
if ( p ) {
++p->m_RC;
- p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+ p->m_bTrace.store( false, atomics::memory_order_release );
}
}
static void after_cas( bool bSuccess, typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT
template <typename T>
T * protect( atomic_ref<T> const& toGuard )
{
- T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+ T * pCur = toGuard.load(atomics::memory_order_relaxed);
T * pRet;
do {
pRet = assign( pCur );
- pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+ pCur = toGuard.load(atomics::memory_order_acquire);
} while ( pRet != pCur );
return pCur;
}
template <typename T, class Func>
T * protect( atomic_ref<T> const& toGuard, Func f )
{
- T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+ T * pCur = toGuard.load(atomics::memory_order_relaxed);
T * pRet;
do {
pRet = pCur;
assign( f( pCur ) );
- pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+ pCur = toGuard.load(atomics::memory_order_acquire);
} while ( pRet != pCur );
return pCur;
}
{
typename atomic_marked_ptr<T>::marked_ptr p;
do {
- assign( ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() );
- } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+ assign( ( p = link.load(atomics::memory_order_relaxed)).ptr() );
+ } while ( p != link.load(atomics::memory_order_acquire) );
return p;
}
{
typename atomic_marked_ptr<T>::marked_ptr pCur;
do {
- pCur = link.load(CDS_ATOMIC::memory_order_relaxed);
+ pCur = link.load(atomics::memory_order_relaxed);
assign( f( pCur ));
- } while ( pCur != link.load(CDS_ATOMIC::memory_order_acquire) );
+ } while ( pCur != link.load(atomics::memory_order_acquire) );
return pCur;
}
{
T * p;
do {
- p = assign( nIndex, link.load(CDS_ATOMIC::memory_order_relaxed) );
- } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+ p = assign( nIndex, link.load(atomics::memory_order_relaxed) );
+ } while ( p != link.load(atomics::memory_order_acquire) );
return p;
}
{
typename atomic_marked_ptr<T>::marked_ptr p;
do {
- assign( nIndex, ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() );
- } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+ assign( nIndex, ( p = link.load(atomics::memory_order_relaxed)).ptr() );
+ } while ( p != link.load(atomics::memory_order_acquire) );
return p;
}
{
T * pRet;
do {
- assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) ));
- } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire));
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
return pRet;
}
{
typename atomic_marked_ptr<T>::marked_ptr p;
do {
- p = link.load(CDS_ATOMIC::memory_order_relaxed);
+ p = link.load(atomics::memory_order_relaxed);
assign( nIndex, f( p ) );
- } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) );
+ } while ( p != link.load(atomics::memory_order_acquire) );
return p;
}
\li HazardPointer - type of hazard pointer. It is \ref hazard_pointer for Michael's Hazard Pointer reclamation schema
*/
template <typename HazardPointer>
- class HPGuardT: protected CDS_ATOMIC::atomic<HazardPointer>
+ class HPGuardT: protected atomics::atomic<HazardPointer>
{
public:
typedef HazardPointer hazard_ptr ; ///< Hazard pointer type
private:
//@cond
- typedef CDS_ATOMIC::atomic<hazard_ptr> base_class;
+ typedef atomics::atomic<hazard_ptr> base_class;
//@endcond
protected:
T * operator =( T * p ) CDS_NOEXCEPT
{
// We use atomic store with explicit memory order because other threads may read this hazard pointer concurrently
- base_class::store( reinterpret_cast<hazard_ptr>(p), CDS_ATOMIC::memory_order_release );
+ base_class::store( reinterpret_cast<hazard_ptr>(p), atomics::memory_order_release );
return p;
}
*/
hazard_ptr get() const CDS_NOEXCEPT
{
- return base_class::load( CDS_ATOMIC::memory_order_acquire );
+ return base_class::load( atomics::memory_order_acquire );
}
/// Clears HP
void clear() CDS_NOEXCEPT
{
// memory order is not necessary here
- base_class::store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ base_class::store( nullptr, atomics::memory_order_relaxed );
//CDS_COMPILER_RW_BARRIER;
}
};
#if CDS_COMPILER == CDS_COMPILER_MSVC
# pragma warning(push)
- // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic<T>'
+ // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic<T>'
// needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector'
# pragma warning(disable: 4251)
#endif
struct hplist_node: public details::HPRec
{
hplist_node * m_pNextNode ; ///< next hazard ptr record in list
- CDS_ATOMIC::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
- CDS_ATOMIC::atomic<bool> m_bFree ; ///< true if record if free (not owned)
+ atomics::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
+ atomics::atomic<bool> m_bFree ; ///< true if record if free (not owned)
//@cond
hplist_node( const GarbageCollector& HzpMgr )
~hplist_node()
{
- assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == OS::c_NullThreadId );
- assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) );
+ assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId );
+ assert( m_bFree.load(atomics::memory_order_relaxed) );
}
//@endcond
};
- CDS_ATOMIC::atomic<hplist_node *> m_pListHead ; ///< Head of GC list
+ atomics::atomic<hplist_node *> m_pListHead ; ///< Head of GC list
static GarbageCollector * m_pHZPManager ; ///< GC instance pointer
typedef retired_ptr_node * handoff_ptr ; ///< trapped value type
typedef void * guarded_ptr ; ///< type of value guarded
- CDS_ATOMIC::atomic<guarded_ptr> pPost ; ///< pointer guarded
+ atomics::atomic<guarded_ptr> pPost ; ///< pointer guarded
#if 0
typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field
handoff_ptr pHandOff ; ///< trapped pointer
#endif
- CDS_ATOMIC::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
- CDS_ATOMIC::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
+ atomics::atomic<guard_data *> pGlobalNext ; ///< next item of global list of allocated guards
+ atomics::atomic<guard_data *> pNextFree ; ///< pointer to the next item in global or thread-local free-list
guard_data * pThreadNext ; ///< next item of thread's local list of guards
void init()
{
- pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ pPost.store( nullptr, atomics::memory_order_relaxed );
}
//@endcond
/// Checks if the guard is free, that is, it does not contain any pointer guarded
bool isFree() const
{
- return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr;
+ return pPost.load( atomics::memory_order_acquire ) == nullptr;
}
};
{
cds::details::Allocator<details::guard_data> m_GuardAllocator ; ///< guard allocator
- CDS_ATOMIC::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
- CDS_ATOMIC::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
+ atomics::atomic<guard_data *> m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field)
+ atomics::atomic<guard_data *> m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field)
SpinLock m_freeListLock ; ///< Access to m_FreeGuardList
/*
// Link guard to the list
// m_GuardList is accumulated list and it cannot support concurrent deletion,
// so, ABA problem is impossible for it
- details::guard_data * pHead = m_GuardList.load( CDS_ATOMIC::memory_order_acquire );
+ details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire );
do {
- pGuard->pGlobalNext.store( pHead, CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed );
// pHead is changed by compare_exchange_weak
- } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed ));
pGuard->init();
return pGuard;
~guard_allocator()
{
guard_data * pNext;
- for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
- pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed );
+ for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) {
+ pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed );
m_GuardAllocator.Delete( pData );
}
}
{
cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
- pGuard = m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed);
+ pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed);
if ( pGuard )
- m_FreeGuardList.store( pGuard->pNextFree.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
+ m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
}
if ( !pGuard )
return allocNew();
*/
void free( guard_data * pGuard )
{
- pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
- pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
- m_FreeGuardList.store( pGuard, CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+ m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed );
}
/// Allocates list of guard
// so, we can use relaxed memory order
while ( --nCount ) {
guard_data * p = alloc();
- pLast->pNextFree.store( pLast->pThreadNext = p, CDS_ATOMIC::memory_order_relaxed );
+ pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed );
pLast = p;
}
- pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed );
+ pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed );
return pHead;
}
guard_data * pLast = pList;
while ( pLast->pThreadNext ) {
- pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ pLast->pPost.store( nullptr, atomics::memory_order_relaxed );
guard_data * p;
- pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed );
+ pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed );
pLast = p;
}
cds::lock::scoped_lock<SpinLock> al( m_freeListLock );
- pLast->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
- m_FreeGuardList.store( pList, CDS_ATOMIC::memory_order_relaxed );
+ pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+ m_FreeGuardList.store( pList, atomics::memory_order_relaxed );
}
/// Returns the list's head of guards allocated
guard_data * begin()
{
- return m_GuardList.load(CDS_ATOMIC::memory_order_acquire);
+ return m_GuardList.load(atomics::memory_order_acquire);
}
};
*/
class retired_ptr_buffer
{
- CDS_ATOMIC::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
- CDS_ATOMIC::atomic<size_t> m_nItemCount; ///< buffer's item count
+ atomics::atomic<retired_ptr_node *> m_pHead ; ///< head of buffer
+ atomics::atomic<size_t> m_nItemCount; ///< buffer's item count
public:
//@cond
~retired_ptr_buffer()
{
- assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr );
}
//@endcond
/// Pushes new node into the buffer. Returns current buffer size
size_t push( retired_ptr_node& node )
{
- retired_ptr_node * pHead = m_pHead.load(CDS_ATOMIC::memory_order_acquire);
+ retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire);
do {
node.m_pNext = pHead;
// pHead is changed by compare_exchange_weak
- } while ( !m_pHead.compare_exchange_weak( pHead, &node, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed ));
- return m_nItemCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1;
+ return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1;
}
/// Result of \ref ptb_gc_privatve "privatize" function.
privatize_result privatize()
{
privatize_result res;
- res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel );
+ res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel );
// Item counter is needed only as a threshold for liberate function
// So, we may clear the item counter without synchronization with m_pHead
- res.second = m_nItemCount.exchange( 0, CDS_ATOMIC::memory_order_relaxed );
+ res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed );
return res;
}
/// Returns current size of buffer (approximate)
size_t size() const
{
- return m_nItemCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return m_nItemCount.load(atomics::memory_order_relaxed);
}
};
item items[m_nItemPerBlock] ; ///< item array
};
- CDS_ATOMIC::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
+ atomics::atomic<block *> m_pBlockListHead ; ///< head of of allocated block list
// To solve ABA problem we use epoch-based approach
static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count
- CDS_ATOMIC::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
- CDS_ATOMIC::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
- CDS_ATOMIC::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
+ atomics::atomic<unsigned int> m_nCurEpoch ; ///< Current epoch
+ atomics::atomic<item *> m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch
+ atomics::atomic<item *> m_pGlobalFreeHead ; ///< Head of unallocated item list
cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator
// link new block to block list
{
- block * pHead = m_pBlockListHead.load(CDS_ATOMIC::memory_order_acquire);
+ block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire);
do {
pNew->pNext = pHead;
// pHead is changed by compare_exchange_weak
- } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
// link block's items to free list
{
- item * pHead = m_pGlobalFreeHead.load(CDS_ATOMIC::memory_order_acquire);
+ item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire);
do {
pLastItem->m_pNextFree = pHead;
// pHead is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
}
unsigned int current_epoch() const
{
- return m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) & (c_nEpochCount - 1);
+ return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1);
}
unsigned int next_epoch() const
{
- return (m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) - 1) & (c_nEpochCount - 1);
+ return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1);
}
//@endcond
, m_pGlobalFreeHead( nullptr )
{
for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i )
- m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed );
allocNewBlock();
}
~retired_ptr_pool()
{
block * p;
- for ( block * pBlock = m_pBlockListHead.load(CDS_ATOMIC::memory_order_relaxed); pBlock; pBlock = p ) {
+ for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) {
p = pBlock->pNext;
m_BlockAllocator.Delete( pBlock );
}
/// Increments current epoch
void inc_epoch()
{
- m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_acq_rel );
+ m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel );
}
//@endcond
unsigned int nEpoch;
item * pItem;
for (;;) {
- pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
+ pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
if ( !pItem )
goto retry;
- if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
goto success;
}
/*
- item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire);
+ item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire);
while ( pItem ) {
- if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ))
goto success;
}
*/
// Epoch free list is empty
// Alloc from global free list
retry:
- pItem = m_pGlobalFreeHead.load( CDS_ATOMIC::memory_order_acquire );
+ pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire );
do {
if ( !pItem ) {
allocNewBlock();
goto retry;
}
// pItem is changed by compare_exchange_weak
- } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed ));
success:
CDS_STRICT_DO( pItem->m_pNextFree = nullptr );
unsigned int nEpoch;
item * pCurHead;
do {
- pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(CDS_ATOMIC::memory_order_acquire);
+ pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire);
pTail->m_pNextFree = pCurHead;
- } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed ));
}
};
void set( void * p )
{
assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release );
+ m_pGuard->pPost.store( p, atomics::memory_order_release );
//CDS_COMPILER_RW_BARRIER;
}
void clear()
{
assert( m_pGuard != nullptr );
- m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER );
}
/// Internal GC statistics
struct internal_stat
{
- CDS_ATOMIC::atomic<size_t> m_nGuardCount ; ///< Total guard count
- CDS_ATOMIC::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
+ atomics::atomic<size_t> m_nGuardCount ; ///< Total guard count
+ atomics::atomic<size_t> m_nFreeGuardCount ; ///< Count of free guard
internal_stat()
: m_nGuardCount(0)
InternalState& operator =( internal_stat const& s )
{
- m_nGuardCount = s.m_nGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
- m_nFreeGuardCount = s.m_nFreeGuardCount.load(CDS_ATOMIC::memory_order_relaxed);
+ m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed);
+ m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed);
return *this;
}
details::guard_allocator<> m_GuardPool ; ///< Guard pool
details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers
details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating
- //CDS_ATOMIC::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
+ //atomics::atomic<size_t> m_nInLiberate ; ///< number of parallel \p liberate fnction call
- CDS_ATOMIC::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
+ atomics::atomic<size_t> m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate
const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC
internal_stat m_stat ; ///< Internal statistics
/// Places retired pointer \p into thread's array of retired pointer for deferred reclamation
void retirePtr( retired_ptr const& p )
{
- if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed) )
+ if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) )
liberate();
}
assert( m_pList != nullptr );
if ( m_pFree ) {
g.m_pGuard = m_pFree;
- m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
+ m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
}
else {
g.m_pGuard = m_gc.allocGuard();
void freeGuard( Guard& g )
{
assert( m_pList != nullptr );
- g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
- g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
+ g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
m_pFree = g.m_pGuard;
}
while ( m_pFree && nCount < Count ) {
arr[nCount].set_guard( m_pFree );
- m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed);
+ m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed);
++nCount;
}
details::guard_data * pGuard;
for ( size_t i = 0; i < Count - 1; ++i ) {
pGuard = arr[i].get_guard();
- pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
- pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed );
}
pGuard = arr[Count-1].get_guard();
- pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
- pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed );
+ pGuard->pPost.store( nullptr, atomics::memory_order_relaxed );
+ pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed );
m_pFree = arr[0].get_guard();
}
/**
@headerfile cds/gc/ptb.h
*/
- template <typename T> using atomic_ref = CDS_ATOMIC::atomic<T *>;
+ template <typename T> using atomic_ref = atomics::atomic<T *>;
/// Atomic type
/**
@headerfile cds/gc/ptb.h
*/
- template <typename T> using atomic_type = CDS_ATOMIC::atomic<T>;
+ template <typename T> using atomic_type = atomics::atomic<T>;
/// Atomic marked pointer
/**
@headerfile cds/gc/ptb.h
*/
- template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
+ template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
#else
template <typename T>
- class atomic_ref: public CDS_ATOMIC::atomic<T *>
+ class atomic_ref: public atomics::atomic<T *>
{
- typedef CDS_ATOMIC::atomic<T *> base_class;
+ typedef atomics::atomic<T *> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_ref() = default;
};
template <typename T>
- class atomic_type: public CDS_ATOMIC::atomic<T>
+ class atomic_type: public atomics::atomic<T>
{
- typedef CDS_ATOMIC::atomic<T> base_class;
+ typedef atomics::atomic<T> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_type() = default;
};
template <typename MarkedPtr>
- class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
+ class atomic_marked_ptr: public atomics::atomic<MarkedPtr>
{
- typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
+ typedef atomics::atomic<MarkedPtr> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_marked_ptr() = default;
to the HP slot repeatedly until the guard's value equals \p toGuard
*/
template <typename T>
- T protect( CDS_ATOMIC::atomic<T> const& toGuard )
+ T protect( atomics::atomic<T> const& toGuard )
{
- T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
T pRet;
do {
pRet = assign( pCur );
- pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+ pCur = toGuard.load(atomics::memory_order_acquire);
} while ( pRet != pCur );
return pCur;
}
Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
*/
template <typename T, class Func>
- T protect( CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+ T protect( atomics::atomic<T> const& toGuard, Func f )
{
- T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed);
+ T pCur = toGuard.load(atomics::memory_order_relaxed);
T pRet;
do {
pRet = pCur;
assign( f( pCur ) );
- pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire);
+ pCur = toGuard.load(atomics::memory_order_acquire);
} while ( pRet != pCur );
return pCur;
}
/// Get native guarded pointer stored
guarded_pointer get_native() const
{
- return base_class::get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed);
+ return base_class::get_guard()->pPost.load(atomics::memory_order_relaxed);
}
};
to the slot \p nIndex repeatedly until the guard's value equals \p toGuard
*/
template <typename T>
- T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard )
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard )
{
T pRet;
do {
- pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_relaxed) );
- } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire));
+ pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) );
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
return pRet;
}
Really, the result of <tt> f( toGuard.load() ) </tt> is assigned to the hazard pointer.
*/
template <typename T, class Func>
- T protect(size_t nIndex, CDS_ATOMIC::atomic<T> const& toGuard, Func f )
+ T protect(size_t nIndex, atomics::atomic<T> const& toGuard, Func f )
{
T pRet;
do {
- assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) ));
- } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire));
+ assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) ));
+ } while ( pRet != toGuard.load(atomics::memory_order_acquire));
return pRet;
}
/// Get native guarded pointer stored
guarded_pointer get_native( size_t nIndex ) const
{
- return base_class::operator[](nIndex).get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed);
+ return base_class::operator[](nIndex).get_guard()->pPost.load(atomics::memory_order_relaxed);
}
/// Capacity of the guard array
while ( true ) {
marked_ptr pNext = aGuards.protect( 0, m_pNext );
- if ( pNext.ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+ if ( pNext.ptr() && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
- m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.compare_exchange_strong( pNext, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
continue;
}
else {
virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
{
if ( bConcurrent ) {
- marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed);
- do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ marked_ptr pNext = m_pNext.load(atomics::memory_order_relaxed);
+ do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
}
else {
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
}
}
};
//@cond
static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
- CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
- CDS_ATOMIC::atomic<size_t> m_nCapacity ; ///< lock array capacity
+ atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
+ atomics::atomic<size_t> m_nCapacity ; ///< lock array capacity
lock_array_ptr m_arrLocks[ c_nArity ] ; ///< Lock array. The capacity of array is specified in constructor.
spinlock_type m_access ; ///< access to m_arrLocks
statistics_type m_Stat ; ///< internal statistics
// wait while resizing
while ( true ) {
- who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+ who = m_Owner.load( atomics::memory_order_acquire );
if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
break;
bkoff();
parrLock[i]->lock();
}
- who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+ who = m_Owner.load( atomics::memory_order_acquire );
if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks[0] == pLockArr[0] ) {
m_Stat.onCellLock();
return;
// It is assumed that the current thread already has a lock
// and requires a second lock for other hash
- size_t const nMask = m_nCapacity.load(CDS_ATOMIC::memory_order_acquire) - 1;
+ size_t const nMask = m_nCapacity.load(atomics::memory_order_acquire) - 1;
size_t nCell = m_arrLocks[0]->try_lock( arrHash[0] & nMask);
if ( nCell == lock_array_type::c_nUnspecifiedCell ) {
m_Stat.onSecondCellLockFailed();
back_off bkoff;
while ( true ) {
owner_t ownNull = 0;
- if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) {
m_arrLocks[0]->lock_all();
m_Stat.onFullLock();
void release_all()
{
m_arrLocks[0]->unlock_all();
- m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+ m_Owner.store( 0, atomics::memory_order_release );
}
void acquire_resize( lock_array_ptr * pOldLocks )
// global lock
owner_t ownNull = 0;
- if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) {
if ( pOldLocks[0] != m_arrLocks[0] ) {
- m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+ m_Owner.store( 0, atomics::memory_order_release );
m_Stat.onResizeLockArrayChanged();
}
else {
void release_resize( lock_array_ptr * pOldLocks )
{
- m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+ m_Owner.store( 0, atomics::memory_order_release );
pOldLocks[0]->unlock_all();
}
//@endcond
for ( unsigned int i = 0; i < c_nArity; ++i )
m_arrLocks[i] = pNew[i];
}
- m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_release );
+ m_nCapacity.store( nCapacity, atomics::memory_order_release );
m_Stat.onResize();
}
*/
size_t lock_count() const
{
- return m_nCapacity.load(CDS_ATOMIC::memory_order_relaxed);
+ return m_nCapacity.load(atomics::memory_order_relaxed);
}
/// Returns the arity of \p refinable mutex policy
{
assert( p != nullptr );
- p->m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release );
+ p->m_pNext.store( nullptr, atomics::memory_order_release );
allocator_type().Delete( p );
}
};
typedef typename update_desc_type::update_ptr update_ptr ; ///< Marked pointer to update descriptor
key_type m_Key ; ///< Regular key
- CDS_ATOMIC::atomic<base_class *> m_pLeft ; ///< Left subtree
- CDS_ATOMIC::atomic<base_class *> m_pRight ; ///< Right subtree
- CDS_ATOMIC::atomic<update_ptr> m_pUpdate ; ///< Update descriptor
+ atomics::atomic<base_class *> m_pLeft ; ///< Left subtree
+ atomics::atomic<base_class *> m_pRight ; ///< Right subtree
+ atomics::atomic<update_ptr> m_pUpdate ; ///< Update descriptor
//@cond
uintptr_t m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4
//@endcond
bool check_consistency( internal_node const * pRoot ) const
{
- tree_node * pLeft = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed );
- tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed );
+ tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed );
+ tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed );
assert( pLeft );
assert( pRight );
return p;
}
- update_ptr search_protect_update( search_result& res, CDS_ATOMIC::atomic<update_ptr> const& src ) const
+ update_ptr search_protect_update( search_result& res, atomics::atomic<update_ptr> const& src ) const
{
update_ptr ret;
update_ptr upd( src.load( memory_model::memory_order_relaxed ) );
tree_node * pLeaf = static_cast<tree_node *>( pOp->iInfo.pLeaf );
if ( pOp->iInfo.bRightLeaf ) {
CDS_VERIFY( pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
- memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_relaxed, atomics::memory_order_relaxed ));
}
else {
CDS_VERIFY( pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
- memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_relaxed, atomics::memory_order_relaxed ));
}
// Unflag parent
update_ptr cur( pOp, update_desc::IFlag );
CDS_VERIFY( pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_relaxed ));
}
bool check_delete_precondition( search_result& res ) const
update_ptr pUpdate( pOp->dInfo.pUpdateParent );
update_ptr pMark( pOp, update_desc::Mark );
if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, // *
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
help_marked( pOp );
// Undo grandparent dInfo
update_ptr pDel( pOp, update_desc::DFlag );
if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
retire_update_desc( pOp );
}
}
}
- tree_node * protect_sibling( typename gc::Guard& guard, CDS_ATOMIC::atomic<tree_node *>& sibling )
+ tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic<tree_node *>& sibling )
{
typename gc::Guard guardLeaf;
if ( pOp->dInfo.bRightParent ) {
CDS_VERIFY( pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( pParent, pOpposite,
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_relaxed ));
}
else {
CDS_VERIFY( pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( pParent, pOpposite,
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_relaxed ));
}
update_ptr upd( pOp, update_desc::DFlag );
CDS_VERIFY( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ memory_model::memory_order_release, atomics::memory_order_relaxed ));
}
bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res )
update_ptr updCur( res.updParent.ptr() );
if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
// do insert
help_insert( pOp );
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp )) {
// res.pLeaf is not deleted yet since it is guarded
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp ))
break;
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp ))
break;
bool check_consistency( internal_node const * pRoot ) const
{
- tree_node * pLeft = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed );
- tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed );
+ tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed );
+ tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed );
assert( pLeft );
assert( pRight );
tree_node * pLeaf = static_cast<tree_node *>( pOp->iInfo.pLeaf );
if ( pOp->iInfo.bRightLeaf ) {
pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
else {
pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast<tree_node *>( pOp->iInfo.pNew ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
update_ptr cur( pOp, update_desc::IFlag );
pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
bool check_delete_precondition( search_result& res )
update_ptr pUpdate( pOp->dInfo.pUpdateParent );
update_ptr pMark( pOp, update_desc::Mark );
if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark,
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
help_marked( pOp );
retire_node( pOp->dInfo.pParent, rl );
// Undo grandparent dInfo
update_ptr pDel( pOp, update_desc::DFlag );
if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
retire_update_desc( pOp, rl, false );
}
pOp->dInfo.bRightLeaf
? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire )
: pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
else {
pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( p,
pOp->dInfo.bRightLeaf
? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire )
: pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
update_ptr upd( pOp, update_desc::DFlag );
pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ memory_model::memory_order_release, atomics::memory_order_relaxed );
}
template <typename KeyValue, typename Compare>
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp, updRetire )) {
// res.pLeaf is not deleted yet since RCU is blocked
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp, updRetire )) {
ptr = node_traits::to_value_ptr( res.pLeaf );
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp, updRetire )) {
result = node_traits::to_value_ptr( res.pLeaf );
update_ptr updGP( res.updGrandParent.ptr() );
if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
if ( help_delete( pOp, updRetire )) {
result = node_traits::to_value_ptr( res.pLeaf );
update_ptr updCur( res.updParent.ptr() );
if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
// do insert
help_insert( pOp );
/// Checks if node is marked
bool is_marked() const
{
- return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0;
+ return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
}
/// Default ctor
*/
static void is_empty( node_type const * pNode )
{
- assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
}
};
/// Checks if node is marked
bool is_marked() const
{
- return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0;
+ return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
}
node()
while ( true ) {
marked_ptr pNextMarked( aGuards.protect( 0, m_pNext ));
node * pNext = pNextMarked.ptr();
- if ( pNext != nullptr && pNext->m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ) ) {
+ if ( pNext != nullptr && pNext->m_bDeleted.load( atomics::memory_order_acquire ) ) {
marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
- m_pNext.compare_exchange_weak( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.compare_exchange_weak( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
continue;
}
else {
virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
{
if ( bConcurrent ) {
- marked_ptr pNext( m_pNext.load(CDS_ATOMIC::memory_order_relaxed));
- do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ marked_ptr pNext( m_pNext.load(atomics::memory_order_relaxed));
+ do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
}
else {
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
}
}
};
typedef Lock lock_type ; ///< Lock type
typedef Tag tag ; ///< tag
- CDS_ATOMIC::atomic<node *> m_pNext ; ///< pointer to the next node in the list
+ atomics::atomic<node *> m_pNext ; ///< pointer to the next node in the list
mutable lock_type m_Lock ; ///< Node lock
node()
typedef Tag tag ; ///< tag
typedef cds::details::marked_ptr<node, 1> marked_ptr ; ///< marked pointer
- typedef CDS_ATOMIC::atomic<marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC
+ typedef atomics::atomic<marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC
atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list
mutable lock_type m_Lock ; ///< Node lock
/// Checks if node is marked
bool is_marked() const
{
- return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0;
+ return m_pNext.load(atomics::memory_order_relaxed).bits() != 0;
}
/// Default ctor
/// Clears internal fields
void clear()
{
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+ m_pNext.store( marked_ptr(), atomics::memory_order_release );
}
};
} // namespace lazy_list
//@cond
node()
{
- m_Links.store( anchor(0,0), CDS_ATOMIC::memory_order_release );
+ m_Links.store( anchor(0,0), atomics::memory_order_release );
}
explicit node( anchor const& a )
: m_Links()
, m_nIndex(0)
{
- m_Links.store( a, CDS_ATOMIC::memory_order_release );
+ m_Links.store( a, atomics::memory_order_release );
}
//@endcond
};
static void is_empty( const node_type * pNode )
{
# ifdef _DEBUG
- anchor a = pNode->m_Links.load(CDS_ATOMIC::memory_order_relaxed);
+ anchor a = pNode->m_Links.load(atomics::memory_order_relaxed);
assert( a.idxLeft == 0 && a.idxRight == 0 );
# endif
}
# endif
mapper_type m_set;
- CDS_ATOMIC::atomic<unsigned int> m_nLastIndex;
+ atomics::atomic<unsigned int> m_nLastIndex;
public:
:m_Anchor()
,m_Mapper( 4096, 4 )
{
- m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release );
+ m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), atomics::memory_order_release );
// GC and node_type::gc must be the same
static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
:m_Anchor()
,m_Mapper( nMaxItemCount, nLoadFactor )
{
- m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release );
+ m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), atomics::memory_order_release );
// GC and node_type::gc must be the same
static_assert(( std::is_same<gc, typename node_type::gc>::value ), "GC and node_type::gc must be the same");
*/
static void is_empty( const node_type * pNode )
{
- assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
}
};
while ( true ) {
marked_ptr pNextMarked( aGuards.protect( 0, m_pNext ));
node * pNext = pNextMarked.ptr();
- if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+ if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
marked_ptr p = aGuards.protect( 1, pNext->m_pNext );
- m_pNext.compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.compare_exchange_strong( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
continue;
}
else {
virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
{
if ( bConcurrent ) {
- marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_acquire);
- do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ marked_ptr pNext = m_pNext.load(atomics::memory_order_acquire);
+ do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
}
else {
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
}
}
};
marked_node_ptr cur(pos.pCur);
pNode->m_pNext.store( cur, memory_model::memory_order_relaxed );
- return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
}
bool unlink_node( position& pos )
// Mark the node (logical deleting)
marked_node_ptr next(pos.pNext, 0);
- if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
// physical deletion may be performed by search function if it detects that a node is logically deleted (marked)
// CAS may be successful here or in other thread that searching something
marked_node_ptr cur(pos.pCur);
- if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
retire_node( pos.pCur );
return true;
}
if ( pNext.bits() == 1 ) {
// pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node
marked_node_ptr cur( pCur.ptr());
- if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
retire_node( pCur.ptr() );
}
else {
typedef gc::nogc gc ; ///< Garbage collector
typedef Tag tag ; ///< tag
- typedef CDS_ATOMIC::atomic< node * > atomic_ptr ; ///< atomic marked pointer
+ typedef atomics::atomic< node * > atomic_ptr ; ///< atomic marked pointer
atomic_ptr m_pNext ; ///< pointer to the next node in the container
link_checker::is_empty( pNode );
pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed );
- return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
//@endcond
marked_node_ptr p( pos.pCur );
pNode->m_pNext.store( p, memory_model::memory_order_relaxed );
- return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed );
}
bool unlink_node( position& pos )
{
// Mark the node (logical deleting)
marked_node_ptr next(pos.pNext, 0);
- if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
marked_node_ptr cur(pos.pCur);
- if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, atomics::memory_order_relaxed ))
return true;
next |= 1;
- CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, atomics::memory_order_relaxed ));
}
return false;
}
if ( pNext == nullptr )
return false ; // queue is empty
- if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire);
if ( h == t )
- base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed );
break;
}
node_type * t = m_pTail.load(memory_model::memory_order_acquire);
if ( h == t ) {
// It is needed to help enqueue
- m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed );
m_Stat.onBadTail();
continue;
}
- if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ))
break;
m_Stat.onDequeueRace();
node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire);
if ( pNext != nullptr ) {
// Tail is misplaced, advance it
- m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed );
m_Stat.onBadTail();
continue;
}
node_type * tmp = nullptr;
- if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ))
break;
m_Stat.onEnqueueRace();
++m_ItemCounter;
m_Stat.onEnqueue();
- if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ))
+ if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ))
m_Stat.onAdvanceTailFailed();
return true;
}
*/
static void is_empty( const node_type * pNode )
{
- assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
- assert( pNode->m_pPrev.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
+ assert( pNode->m_pPrev.load( atomics::memory_order_relaxed ) == nullptr );
}
};
fix_list( pTail, pHead );
continue;
}
- if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
// dequeue success
break;
}
node_type * pTail = guards.protect( 0, m_pTail, node_to_value() ) ; // Read the tail
while( true ) {
pNew->m_pNext.store( pTail, memory_model::memory_order_release );
- if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { // Try to CAS the tail
+ if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { // Try to CAS the tail
pTail->m_pPrev.store( pNew, memory_model::memory_order_release ) ; // Success, write prev
++m_ItemCounter;
m_Stat.onEnqueue();
// Segment
struct segment: public boost::intrusive::slist_base_hook<>
{
- CDS_ATOMIC::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor
+ atomics::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor
size_t version; // version tag (ABA prevention tag)
// cell array is placed here in one continuous memory block
// Initializes the segment
segment( size_t nCellCount )
// MSVC warning C4355: 'this': used in base member initializer list
- : cells( reinterpret_cast< CDS_ATOMIC::atomic< cell > * >( this + 1 ))
+ : cells( reinterpret_cast< atomics::atomic< cell > * >( this + 1 ))
, version( 0 )
{
init( nCellCount );
void init( size_t nCellCount )
{
- CDS_ATOMIC::atomic< cell > * pLastCell = cells + nCellCount;
- for ( CDS_ATOMIC::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell )
- pCell->store( cell(), CDS_ATOMIC::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic< cell > * pLastCell = cells + nCellCount;
+ for ( atomics::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell )
+ pCell->store( cell(), atomics::memory_order_relaxed );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
}
private:
segment(); //=delete
};
- typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic<segment *>, options::alignment >::type aligned_segment_ptr;
+ typedef typename opt::details::alignment_setter< atomics::atomic<segment *>, options::alignment >::type aligned_segment_ptr;
//@endcond
protected:
bool populated( segment const& s ) const
{
// The lock should be held
- CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor();
- for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+ atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
+ for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
if ( !pCell->load( memory_model::memory_order_relaxed ).all() )
return false;
}
bool exhausted( segment const& s ) const
{
// The lock should be held
- CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor();
- for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
+ atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor();
+ for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) {
if ( !pCell->load( memory_model::memory_order_relaxed ).bits() )
return false;
}
// Empty cell found, try to enqueue here
cell nullCell;
if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
// Ok to push item
m_Stat.onPush();
if ( !item.bits() ) {
// Try to mark the cell as deleted
if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1,
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
--m_ItemCounter;
m_Stat.onPop();
while ( true ) {
node * pNext = aGuards.protect( 0, m_pNext );
- if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+ if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
node * p = aGuards.protect( 1, pNext->m_pNext );
- m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.compare_exchange_strong( pNext, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
continue;
}
else {
virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent )
{
if ( bConcurrent ) {
- node * pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed);
- do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ node * pNext = m_pNext.load(atomics::memory_order_relaxed);
+ do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
}
else {
- m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.store( nullptr, atomics::memory_order_relaxed );
}
}
};
*/
static void is_empty( const node_type * pNode )
{
- assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr );
+ assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr );
}
};
void clear()
{
assert( m_arrNext == nullptr );
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+ m_pNext.store( marked_ptr(), atomics::memory_order_release );
}
//@cond
*/
class xorshift {
//@cond
- CDS_ATOMIC::atomic<unsigned int> m_nSeed;
+ atomics::atomic<unsigned int> m_nSeed;
//@endcond
public:
/// The upper bound of generator's return value. The generator produces random number in range <tt>[0..c_nUpperBound)</tt>
/// Initializes the generator instance
xorshift()
{
- m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed );
+ m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed );
}
/// Main generator function
return level;
}
*/
- unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed );
+ unsigned int x = m_nSeed.load( atomics::memory_order_relaxed );
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
- m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed );
+ m_nSeed.store( x, atomics::memory_order_relaxed );
unsigned int nLevel = ((x & 0x00000001) != 0) ? 0 : cds::bitop::LSB( (~(x >> 1)) & 0x7FFFFFFF );
assert( nLevel < c_nUpperBound );
return nLevel;
class turbo_pascal
{
//@cond
- CDS_ATOMIC::atomic<unsigned int> m_nSeed;
+ atomics::atomic<unsigned int> m_nSeed;
//@endcond
public:
/// The upper bound of generator's return value. The generator produces random number in range <tt>[0..c_nUpperBound)</tt>
/// Initializes the generator instance
turbo_pascal()
{
- m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed );
+ m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed );
}
/// Main generator function
upper 16 bits) so we traverse from highest bit down (i.e., test
sign), thus hardly ever use lower bits.
*/
- unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed ) * 134775813 + 1;
- m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed );
+ unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ) * 134775813 + 1;
+ m_nSeed.store( x, atomics::memory_order_relaxed );
unsigned int nLevel = ( x & 0x80000000 ) ? (31 - cds::bitop::MSBnz( (x & 0x7FFFFFFF) | 1 )) : 0;
assert( nLevel < c_nUpperBound );
return nLevel;
head_node( unsigned int nHeight )
{
for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
- m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed );
node_type::make_tower( nHeight, m_Tower );
}
~node()
{
release_tower();
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ m_pNext.store( marked_ptr(), atomics::memory_order_relaxed );
}
/// Constructs a node of height \p nHeight
m_arrNext = nullptr;
m_nHeight = 1;
for ( unsigned int i = 0; i < nHeight; ++i )
- pTower[i].store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+ pTower[i].store( marked_ptr(), atomics::memory_order_release );
}
return pTower;
}
while ( true ) {
marked_ptr pNextMarked( aGuards.protect( 0, next(i) ));
node * pNext = pNextMarked.ptr();
- if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) {
+ if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) {
marked_ptr p = aGuards.protect( 1, pNext->next(i) );
- next(i).compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ next(i).compare_exchange_strong( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed );
continue;
}
else {
unsigned int const nHeight = height();
if ( bConcurrent ) {
for (unsigned int i = 0; i < nHeight; ++i ) {
- marked_ptr pNext = next(i).load(CDS_ATOMIC::memory_order_relaxed);
- while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ marked_ptr pNext = next(i).load(atomics::memory_order_relaxed);
+ while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) );
}
}
else {
for (unsigned int i = 0; i < nHeight; ++i )
- next(i).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ next(i).store( marked_ptr(), atomics::memory_order_relaxed );
}
}
};
: m_pHead( new head_tower() )
{
for ( size_t i = 0; i < sizeof(m_pHead->m_Tower) / sizeof(m_pHead->m_Tower[0]); ++i )
- m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed );
m_pHead->make_tower( nHeight, m_pHead->m_Tower );
}
back_off bkoff;
for (;;) {
- if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
// Current node is marked as deleted. So, its next pointer can point to anything
// In this case we interrupt our iteration and returns end() iterator.
*this = iterator();
bkoff();
continue;
}
- else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
+ else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
// p is marked as deleted. Spin waiting for physical removal
bkoff();
continue;
node_type * pp = p.ptr();
// Logically deleted node is marked from highest level
- if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
m_pNode = pp;
break;
}
item_counter m_ItemCounter ; ///< item counter
random_level_generator m_RandomLevelGen ; ///< random level generator instance
- CDS_ATOMIC::atomic<unsigned int> m_nHeight ; ///< estimated high level
+ atomics::atomic<unsigned int> m_nHeight ; ///< estimated high level
mutable stat m_Stat ; ///< internal statistics
protected:
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 )
gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 )
gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node );
{
marked_node_ptr p( pos.pSucc[0] );
pNode->next( 0 ).store( p, memory_model::memory_order_release );
- if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+ if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
return false;
}
cds::unref( f )( val );
marked_node_ptr p;
while ( true ) {
marked_node_ptr q( pos.pSucc[ nLevel ]);
- if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
// pNode has been marked as removed while we are inserting it
// Stop inserting
assert( p.bits() );
return true;
}
p = q;
- if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
break;
// Renew insert position
while ( true ) {
pSucc = gSucc.protect( pDel->next(nLevel), gc_protect );
if ( pSucc.bits() || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1,
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
break;
}
pSucc = gSucc.protect( pDel->next(0), gc_protect );
marked_node_ptr p( pSucc.ptr() );
if ( pDel->next(0).compare_exchange_strong( p, marked_node_ptr(p.ptr(), 1),
- memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
cds::unref(f)( *node_traits::to_value_ptr( pDel ));
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
pSucc = gSucc.protect( pDel->next(nLevel), gc_protect );
if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( p, marked_node_ptr(pSucc.ptr()),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
+ memory_model::memory_order_release, atomics::memory_order_relaxed) )
{
// Make slow erase
find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false );
{
unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
if ( nCur < nHeight )
- m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
//@endcond
gc::check_available_guards( c_nHazardPtrCount );
// Barrier for head node
- CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
}
/// Clears and destructs the skip-list
typedef cds::gc::nogc gc ; ///< Garbage collector
typedef Tag tag ; ///< tag
- typedef CDS_ATOMIC::atomic<node * > atomic_ptr;
+ typedef atomics::atomic<node * > atomic_ptr;
typedef atomic_ptr tower_item_type;
protected:
void clear()
{
assert( m_arrNext == nullptr );
- m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release );
+ m_pNext.store( nullptr, atomics::memory_order_release );
}
bool is_cleared() const
{
- return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr
+ return m_pNext.load( atomics::memory_order_relaxed ) == nullptr
&& m_arrNext == nullptr
&& m_nHeight <= 1
;
public: // for internal use only!!!
iterator( node_type& refHead )
- : m_pNode( refHead[0].load( CDS_ATOMIC::memory_order_relaxed ) )
+ : m_pNode( refHead[0].load( atomics::memory_order_relaxed ) )
{}
static iterator from_node( node_type * pNode )
iterator& operator ++()
{
if ( m_pNode )
- m_pNode = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed );
+ m_pNode = m_pNode->next(0).load( atomics::memory_order_relaxed );
return *this;
}
head_node( unsigned int nHeight )
{
for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
- m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ m_Tower[i].store( nullptr, atomics::memory_order_relaxed );
node_type::make_tower( nHeight, m_Tower );
}
void clear()
{
for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i )
- m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed );
- node_type::m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ m_Tower[i].store( nullptr, atomics::memory_order_relaxed );
+ node_type::m_pNext.store( nullptr, atomics::memory_order_relaxed );
}
};
//@endcond
item_counter m_ItemCounter ; ///< item counter
random_level_generator m_RandomLevelGen ; ///< random level generator instance
- CDS_ATOMIC::atomic<unsigned int> m_nHeight ; ///< estimated high level
+ atomics::atomic<unsigned int> m_nHeight ; ///< estimated high level
mutable stat m_Stat ; ///< internal statistics
protected:
void increase_height( unsigned int nHeight )
{
unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
- while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ) );
+ while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) );
}
//@endcond
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
// Barrier for head node
- CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
}
/// Clears and destructs the skip-list
// bit 0 - the item is logically deleted
// bit 1 - the item is extracted (only for level 0)
typedef cds::details::marked_ptr<node, 3> marked_ptr ; ///< marked pointer
- typedef CDS_ATOMIC::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer
+ typedef atomics::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer
typedef atomic_marked_ptr tower_item_type;
protected:
void clear_tower()
{
for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel )
- next(nLevel).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed );
+ next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed );
}
/// Access to element of next pointer array
void clear()
{
assert( m_arrNext == nullptr );
- m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release );
+ m_pNext.store( marked_ptr(), atomics::memory_order_release );
m_pDelChain = nullptr;
}
back_off bkoff;
for (;;) {
- if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
// Current node is marked as deleted. So, its next pointer can point to anything
// In this case we interrupt our iteration and returns end() iterator.
*this = iterator();
return;
}
- marked_ptr p = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed );
+ marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed );
node_type * pp = p.ptr();
if ( p.bits() ) {
// p is marked as deleted. Spin waiting for physical removal
bkoff();
continue;
}
- else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) {
+ else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) {
// p is marked as deleted. Spin waiting for physical removal
bkoff();
continue;
back_off bkoff;
for (;;) {
- marked_ptr p = refHead.next(0).load( CDS_ATOMIC::memory_order_relaxed );
+ marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed );
if ( !p.ptr() ) {
// empty skip-list
break;
node_type * pp = p.ptr();
// Logically deleted node is marked from highest level
- if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) {
+ if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) {
m_pNode = pp;
break;
}
item_counter m_ItemCounter ; ///< item counter
random_level_generator m_RandomLevelGen ; ///< random level generator instance
- CDS_ATOMIC::atomic<unsigned int> m_nHeight ; ///< estimated high level
- CDS_ATOMIC::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
+ atomics::atomic<unsigned int> m_nHeight ; ///< estimated high level
+ atomics::atomic<node_type *> m_pDeferredDelChain ; ///< Deferred deleted node chain
mutable stat m_Stat ; ///< internal statistics
protected:
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
# ifdef _DEBUG
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
# ifdef _DEBUG
// pCur is marked, i.e. logically deleted.
marked_node_ptr p( pCur.ptr() );
if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ memory_model::memory_order_release, atomics::memory_order_relaxed ))
{
if ( nLevel == 0 ) {
# ifdef _DEBUG
{
marked_node_ptr p( pos.pSucc[0] );
pNode->next( 0 ).store( p, memory_model::memory_order_release );
- if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) {
return false;
}
# ifdef _DEBUG
marked_node_ptr p;
while ( true ) {
marked_node_ptr q( pos.pSucc[ nLevel ]);
- if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) {
// pNode has been marked as removed while we are inserting it
// Stop inserting
assert( p.bits() );
return true;
}
p = q;
- if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) )
break;
// Renew insert position
pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed );
while ( true ) {
if ( pSucc.bits()
- || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
break;
}
return false;
int const nMask = bExtract ? 3 : 1;
- if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ))
+ if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))
{
cds::unref(f)( *node_traits::to_value_ptr( pDel ));
for ( int nLevel = static_cast<int>( pDel->height() - 1 ); nLevel >= 0; --nLevel ) {
if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc,
marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ),
- memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) )
+ memory_model::memory_order_release, atomics::memory_order_relaxed) )
{
// Do slow erase
find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false );
{
unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed );
if ( nCur < nHeight )
- m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed );
}
class deferred_list_iterator
node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed );
do {
pTail->m_pDelChain = pDeferList;
- } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ));
pos.pDelChain = nullptr;
}
static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" );
// Barrier for head node
- CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release );
+ atomics::atomic_thread_fence( memory_model::memory_order_release );
}
/// Clears and destructs the skip-list
protected:
ordered_list_wrapper m_List ; ///< Ordered list containing split-list items
bucket_table m_Buckets ; ///< bucket table
- CDS_ATOMIC::atomic<size_t> m_nBucketCountLog2 ; ///< log2( current bucket count )
+ atomics::atomic<size_t> m_nBucketCountLog2 ; ///< log2( current bucket count )
item_counter m_ItemCounter ; ///< Item counter
hash m_HashFunctor ; ///< Hash functor
size_t bucket_no( size_t nHash ) const
{
- return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 );
+ return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
}
static size_t parent_bucket( size_t nBucket )
void inc_item_count()
{
- size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed);
+ size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
{
- m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed );
+ m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
}
}
public:
typedef GC gc ; ///< Garbage collector
typedef Node node_type ; ///< Bucket node type
- typedef CDS_ATOMIC::atomic<node_type *> table_entry ; ///< Table entry type
+ typedef atomics::atomic<node_type *> table_entry ; ///< Table entry type
/// Bucket table allocator
typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator;
public:
typedef GC gc ; ///< Garbage collector
typedef Node node_type ; ///< Bucket node type
- typedef CDS_ATOMIC::atomic<node_type *> table_entry ; ///< Table entry type
+ typedef atomics::atomic<node_type *> table_entry ; ///< Table entry type
/// Memory model for atomic operations
typedef typename options::memory_model memory_model;
protected:
- typedef CDS_ATOMIC::atomic<table_entry *> segment_type ; ///< Bucket table segment type
+ typedef atomics::atomic<table_entry *> segment_type ; ///< Bucket table segment type
public:
/// Bucket table allocator
if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) {
table_entry * pNewSegment = allocate_segment();
table_entry * pNull = nullptr;
- if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
destroy_segment( pNewSegment );
}
}
protected:
ordered_list_wrapper m_List ; ///< Ordered list containing split-list items
bucket_table m_Buckets ; ///< bucket table
- CDS_ATOMIC::atomic<size_t> m_nBucketCountLog2 ; ///< log2( current bucket count )
+ atomics::atomic<size_t> m_nBucketCountLog2 ; ///< log2( current bucket count )
item_counter m_ItemCounter ; ///< Item counter
hash m_HashFunctor ; ///< Hash functor
size_t bucket_no( size_t nHash ) const
{
- return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 );
+ return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
}
static size_t parent_bucket( size_t nBucket )
void inc_item_count()
{
- size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed);
+ size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
{
- m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed );
+ m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
}
}
protected:
ordered_list_wrapper m_List ; ///< Ordered list containing split-list items
bucket_table m_Buckets ; ///< bucket table
- CDS_ATOMIC::atomic<size_t> m_nBucketCountLog2 ; ///< log2( current bucket count )
+ atomics::atomic<size_t> m_nBucketCountLog2 ; ///< log2( current bucket count )
item_counter m_ItemCounter ; ///< Item counter
hash m_HashFunctor ; ///< Hash functor
size_t bucket_no( size_t nHash ) const
{
- return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 );
+ return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 );
}
static size_t parent_bucket( size_t nBucket )
void inc_item_count()
{
- size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed);
+ size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed);
if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() )
{
- m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed );
+ m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed );
}
}
static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1;
lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor.
- CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
- CDS_ATOMIC::atomic<size_t> m_nCapacity ; ///< Lock array capacity
+ atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag)
+ atomics::atomic<size_t> m_nCapacity ; ///< Lock array capacity
spinlock_type m_access ; ///< access to m_arrLocks
//@endcond
lock_array_ptr create_lock_array( size_t nCapacity )
{
- m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_relaxed );
+ m_nCapacity.store( nCapacity, atomics::memory_order_relaxed );
return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() );
}
while ( true ) {
// wait while resizing
while ( true ) {
- who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+ who = m_Owner.load( atomics::memory_order_acquire );
if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
break;
bkoff();
lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1));
lock.lock();
- who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+ who = m_Owner.load( atomics::memory_order_acquire );
if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
return lock;
lock.unlock();
while ( true ) {
// wait while resizing
while ( true ) {
- who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+ who = m_Owner.load( atomics::memory_order_acquire );
if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) )
break;
bkoff();
pLocks->lock_all();
- who = m_Owner.load( CDS_ATOMIC::memory_order_acquire );
+ who = m_Owner.load( atomics::memory_order_acquire );
if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks )
return pLocks;
back_off bkoff;
for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) {
owner_t ownNull = 0;
- if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) {
lock_array_ptr pOldLocks = m_arrLocks;
size_t const nLockCount = pOldLocks->size();
for ( size_t i = 0; i < nLockCount; ++i ) {
void release_resize()
{
- m_Owner.store( 0, CDS_ATOMIC::memory_order_release );
+ m_Owner.store( 0, atomics::memory_order_release );
}
//@endcond
public:
*/
size_t lock_count() const
{
- return m_nCapacity.load( CDS_ATOMIC::memory_order_relaxed );
+ return m_nCapacity.load( atomics::memory_order_relaxed );
}
/// Resize for new capacity
{
operation_id idOp; ///< Op id
T * pVal; ///< for push: pointer to argument; for pop: accepts a return value
- CDS_ATOMIC::atomic<unsigned int> nStatus; ///< Internal elimination status
+ atomics::atomic<unsigned int> nStatus; ///< Internal elimination status
operation()
: pVal( nullptr )
struct bkoff_predicate {
operation_desc * pOp;
bkoff_predicate( operation_desc * p ): pOp(p) {}
- bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; }
+ bool operator()() { return pOp->nStatus.load( atomics::memory_order_acquire ) != op_busy; }
};
# endif
bool backoff( operation_desc& op, Stat& stat )
{
elimination_backoff_type bkoff;
- op.nStatus.store( op_busy, CDS_ATOMIC::memory_order_relaxed );
+ op.nStatus.store( op_busy, atomics::memory_order_relaxed );
elimination_rec * myRec = cds::algo::elimination::init_record( op );
slot.pRec = nullptr;
slot.lock.unlock();
- himOp->nStatus.store( op_collided, CDS_ATOMIC::memory_order_release );
+ himOp->nStatus.store( op_collided, atomics::memory_order_release );
cds::algo::elimination::clear_record();
stat.onActiveCollision( op.idOp );
return true;
}
- himOp->nStatus.store( op_free, CDS_ATOMIC::memory_order_release );
+ himOp->nStatus.store( op_free, atomics::memory_order_release );
}
slot.pRec = myRec;
slot.lock.unlock();
// Wait for colliding operation
# if defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10)
// MSVC++ 2010 compiler error C2065: 'op_busy' : undeclared identifier
- bkoff( [&op]() -> bool { return op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } );
+ bkoff( [&op]() -> bool { return op.nStatus.load( atomics::memory_order_acquire ) != op_busy; } );
# else
// Local structs is not supported by old compilers (for example, GCC 4.3)
//struct bkoff_predicate {
// operation_desc * pOp;
// bkoff_predicate( operation_desc * p ): pOp(p) {}
- // bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; }
+ // bool operator()() { return pOp->nStatus.load( atomics::memory_order_acquire ) != op_busy; }
//};
bkoff( bkoff_predicate(&op) );
# endif
slot.pRec = nullptr;
}
- bool bCollided = op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) == op_collided;
+ bool bCollided = op.nStatus.load( atomics::memory_order_acquire ) == op_collided;
if ( !bCollided )
stat.onEliminationFailed();
node_type * t = m_Top.load(memory_model::memory_order_relaxed);
while ( true ) {
pNew->m_pNext.store( t, memory_model::memory_order_relaxed );
- if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { // #1 sync-with #2
+ if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // #1 sync-with #2
++m_ItemCounter;
m_stat.onPush();
return true;
return nullptr; // stack is empty
node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed);
- if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { // #2
+ if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // #2
clear_links( t );
--m_ItemCounter;
m_stat.onPop();
pTop = m_Top.load( memory_model::memory_order_relaxed );
if ( pTop == nullptr )
return;
- if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) ) { // sync-with #1 and #2
+ if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) ) { // sync-with #1 and #2
m_ItemCounter.reset();
break;
}
protected:
//@cond
- typedef typename options::buffer::template rebind< CDS_ATOMIC::atomic<value_type *> >::other buffer;
+ typedef typename options::buffer::template rebind< atomics::atomic<value_type *> >::other buffer;
typedef typename opt::details::alignment_setter< buffer, options::alignment >::type aligned_buffer;
typedef size_t index_type;
- typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic<index_type>, options::alignment >::type aligned_index;
+ typedef typename opt::details::alignment_setter< atomics::atomic<index_type>, options::alignment >::type aligned_index;
//@endcond
protected:
}
// help the dequeue to update head
- m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, atomics::memory_order_relaxed );
continue;
}
continue;
// get actual tail and try to enqueue new node
- if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+ if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) {
if ( temp % 2 == 0 )
- m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, atomics::memory_order_relaxed );
++m_ItemCounter;
return true;
}
// check whether the queue is empty
if ( temp == m_nTail.load(memory_model::memory_order_acquire) ) {
// help the enqueue to update end
- m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, atomics::memory_order_relaxed );
continue;
}
continue;
// Get the actual head, null means empty
- if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, atomics::memory_order_relaxed )) {
if ( temp % 2 == 0 )
- m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, atomics::memory_order_relaxed );
--m_ItemCounter;
return reinterpret_cast<value_type *>(reinterpret_cast<intptr_t>( tt ) & ~intptr_t(1));
}
public:
typedef Backoff backoff_strategy ; ///< back-off strategy type
private:
- CDS_ATOMIC::atomic<bool> m_spin ; ///< Spin
+ atomics::atomic<bool> m_spin ; ///< Spin
# ifdef CDS_DEBUG
typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode)
# endif
:m_dbgOwnerId( OS::c_NullThreadId )
# endif
{
- m_spin.store( false, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.store( false, atomics::memory_order_relaxed );
}
/// Construct spin-lock in specified state
:m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId )
# endif
{
- m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.store( bLocked, atomics::memory_order_relaxed );
}
/// Dummy copy constructor
/// Destructor. On debug time it checks whether spin-lock is free
~Spinlock()
{
- assert( !m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
+ assert( !m_spin.load( atomics::memory_order_relaxed ) );
}
/// Check if the spin is locked
bool is_locked() const CDS_NOEXCEPT
{
- return m_spin.load( CDS_ATOMIC::memory_order_relaxed );
+ return m_spin.load( atomics::memory_order_relaxed );
}
/// Try to lock the object
bool tryLock() CDS_NOEXCEPT
{
bool bCurrent = false;
- m_spin.compare_exchange_strong( bCurrent, true, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed );
CDS_DEBUG_DO(
if ( !bCurrent ) {
// TATAS algorithm
while ( !tryLock() ) {
- while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) {
+ while ( m_spin.load( atomics::memory_order_relaxed ) ) {
backoff();
}
}
/// Unlock the spin-lock. Debug version: deadlock may be detected
void unlock() CDS_NOEXCEPT
{
- assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) );
+ assert( m_spin.load( atomics::memory_order_relaxed ) );
assert( m_dbgOwnerId == OS::getCurrentThreadId() );
CDS_DEBUG_DO( m_dbgOwnerId = OS::c_NullThreadId; )
- m_spin.store( false, CDS_ATOMIC::memory_order_release );
+ m_spin.store( false, atomics::memory_order_release );
}
};
typedef Backoff backoff_strategy ; ///< The backoff type
private:
- CDS_ATOMIC::atomic<integral_type> m_spin ; ///< spin-lock atomic
+ atomics::atomic<integral_type> m_spin ; ///< spin-lock atomic
thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId
private:
bool tryLockOwned( thread_id tid ) CDS_NOEXCEPT
{
if ( isOwned( tid )) {
- m_spin.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.fetch_add( 1, atomics::memory_order_relaxed );
return true;
}
return false;
bool tryAcquireLock() CDS_NOEXCEPT
{
integral_type nCurrent = 0;
- return m_spin.compare_exchange_weak( nCurrent, 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed );
+ return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed );
}
bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() ))
// TATAS algorithm
backoff_strategy bkoff;
while ( !tryAcquireLock() ) {
- while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) )
+ while ( m_spin.load( atomics::memory_order_relaxed ) )
bkoff();
}
}
*/
bool is_locked() const CDS_NOEXCEPT
{
- return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
+ return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() ));
}
/// Try to lock the spin-lock (synonym for \ref try_lock)
bool unlock() CDS_NOEXCEPT
{
if ( isOwned( OS::getCurrentThreadId() ) ) {
- integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed );
+ integral_type n = m_spin.load( atomics::memory_order_relaxed );
if ( n > 1 )
- m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed );
+ m_spin.store( n - 1, atomics::memory_order_relaxed );
else {
free();
- m_spin.store( 0, CDS_ATOMIC::memory_order_release );
+ m_spin.store( 0, atomics::memory_order_release );
}
return true;
}
: public options::free_list::item_hook
, public options::partial_list::item_hook
{
- CDS_ATOMIC::atomic<anchor_tag> anchor ; ///< anchor, see \ref anchor_tag
+ atomics::atomic<anchor_tag> anchor ; ///< anchor, see \ref anchor_tag
byte * pSB ; ///< ptr to superblock
processor_heap_base * pProcHeap ; ///< pointer to owner processor heap
unsigned int nBlockSize ; ///< block size in bytes
/// Processor heap
struct processor_heap_base
{
- CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic<active_tag> active; ///< pointer to the descriptor of active superblock owned by processor heap
+ CDS_DATA_ALIGNMENT(8) atomics::atomic<active_tag> active; ///< pointer to the descriptor of active superblock owned by processor heap
processor_desc * pProcDesc ; ///< pointer to parent processor descriptor
const size_class * pSizeClass ; ///< pointer to size class
- CDS_ATOMIC::atomic<superblock_desc *> pPartial ; ///< pointer to partial filled superblock (may be \p nullptr)
+ atomics::atomic<superblock_desc *> pPartial ; ///< pointer to partial filled superblock (may be \p nullptr)
partial_list partialList ; ///< list of partial filled superblocks owned by the processor heap
unsigned int nPageIdx ; ///< page size-class index, \ref c_nPageSelfAllocation - "small page"
/// Get partial superblock owned by the processor heap
superblock_desc * get_partial()
{
- superblock_desc * pDesc = pPartial.load(CDS_ATOMIC::memory_order_acquire);
+ superblock_desc * pDesc = pPartial.load(atomics::memory_order_acquire);
do {
if ( !pDesc ) {
pDesc = partialList.pop();
break;
}
- } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) );
//assert( pDesc == nullptr || free_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_free_list_hook *>(pDesc) ));
//assert( pDesc == nullptr || partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
//assert( partial_desc_list<superblock_desc>::node_algorithms::inited( static_cast<sb_partial_list_hook *>(pDesc) ) );
superblock_desc * pCur = nullptr;
- if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) )
+ if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) )
partialList.push( pDesc );
}
system_heap m_LargeHeap ; ///< Heap for large block
aligned_heap m_AlignedHeap ; ///< Internal aligned heap
sizeclass_selector m_SizeClassSelector ; ///< Size-class selector
- CDS_ATOMIC::atomic<processor_desc *> * m_arrProcDesc ; ///< array of pointers to the processor descriptors
+ atomics::atomic<processor_desc *> * m_arrProcDesc ; ///< array of pointers to the processor descriptors
unsigned int m_nProcessorCount ; ///< Processor count
bound_checker m_BoundChecker ; ///< Bound checker
// Reserve block
while ( true ) {
++nCollision;
- oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire);
+ oldActive = pProcHeap->active.load(atomics::memory_order_acquire);
if ( !oldActive.ptr() )
return nullptr;
unsigned int nCredits = oldActive.credits();
newActive = oldActive;
newActive.credits( nCredits - 1 );
}
- if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed ))
break;
}
nCollision = -1;
do {
++nCollision;
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize;
newAnchor.count -= nMoreCredits;
}
}
- } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
if ( nCollision )
pProcHeap->stat.incActiveAnchorCASFailureCount( nCollision );
do {
++nCollision;
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
if ( oldAnchor.state == SBSTATE_EMPTY ) {
free_superblock( pDesc );
goto retry;
newAnchor.count -= nMoreCredits + 1;
newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL;
newAnchor.tag += 1;
- } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+ } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
if ( nCollision )
pProcHeap->stat.incPartialDescCASFailureCount( nCollision );
do {
++nCollision;
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
assert( oldAnchor.avail < pDesc->nCapacity );
pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize;
newAnchor.avail = reinterpret_cast<free_block_header *>( pAddr )->nNextFree;
++newAnchor.tag;
- } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) );
+ } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) );
if ( nCollision )
pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision );
assert( pDesc != nullptr );
pDesc->pSB = new_superblock_buffer( pProcHeap );
- anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed);
+ anchor_tag anchor = pDesc->anchor.load(atomics::memory_order_relaxed);
anchor.tag += 1;
// Make single-linked list of free blocks in superblock
anchor.count = pDesc->nCapacity - 1 - (newActive.credits() + 1);
anchor.state = SBSTATE_ACTIVE;
- pDesc->anchor.store(anchor, CDS_ATOMIC::memory_order_relaxed);
+ pDesc->anchor.store(anchor, atomics::memory_order_relaxed);
active_tag curActive;
- if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) {
+ if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed )) {
pProcHeap->stat.incAllocFromNew();
//reinterpret_cast<block_header *>( pDesc->pSB )->set( pDesc, 0 );
return reinterpret_cast<block_header *>( pDesc->pSB );
if ( nProcessorId >= m_nProcessorCount )
nProcessorId = 0;
- processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( CDS_ATOMIC::memory_order_relaxed );
+ processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( atomics::memory_order_relaxed );
while ( !pDesc ) {
processor_desc * pNewDesc = new_processor_desc( nProcessorId );
- if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) {
+ if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) {
pDesc = pNewDesc;
break;
}
active_tag newActive;
newActive.set( pDesc, nCredits - 1 );
- if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
return;
// Someone installed another active superblock.
anchor_tag newAnchor;
do {
- newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
newAnchor.count += nCredits;
newAnchor.state = SBSTATE_PARTIAL;
- } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ));
pDesc->pProcHeap->add_partial( pDesc );
}
m_AlignedHeap.free( pDesc );
}
- superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+ superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
if ( pPartial ) {
free( pPartial->pSB );
m_AlignedHeap.free( pPartial );
}
- pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+ pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
if ( pDesc ) {
free( pDesc->pSB );
m_AlignedHeap.free( pDesc );
m_AlignedHeap.free( pDesc );
}
- superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed);
+ superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed);
if ( pPartial ) {
pageHeap.free( pPartial->pSB );
m_AlignedHeap.free( pPartial );
}
- pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr();
+ pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr();
if ( pDesc ) {
pageHeap.free( pDesc->pSB );
m_AlignedHeap.free( pDesc );
pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc;
assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 );
- anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+ anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
anchor.tag = 0;
- pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+ pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
pProcHeap->stat.incDescAllocCount();
}
assert( pDesc->nCapacity <= c_nMaxBlockInSuperBlock );
pDesc->pProcHeap = pProcHeap;
- anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed );
+ anchor = pDesc->anchor.load( atomics::memory_order_relaxed );
anchor.avail = 1;
- pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed );
+ pDesc->anchor.store( anchor, atomics::memory_order_relaxed );
return pDesc;
}
m_nProcessorCount = m_Topology.processor_count();
m_arrProcDesc = new( m_AlignedHeap.alloc(sizeof(processor_desc *) * m_nProcessorCount, c_nAlignment ))
- CDS_ATOMIC::atomic<processor_desc *>[ m_nProcessorCount ];
+ atomics::atomic<processor_desc *>[ m_nProcessorCount ];
memset( m_arrProcDesc, 0, sizeof(processor_desc *) * m_nProcessorCount ) ; // ?? memset for atomic<>
}
~Heap()
{
for ( unsigned int i = 0; i < m_nProcessorCount; ++i ) {
- processor_desc * pDesc = m_arrProcDesc[i].load(CDS_ATOMIC::memory_order_relaxed);
+ processor_desc * pDesc = m_arrProcDesc[i].load(atomics::memory_order_relaxed);
if ( pDesc )
free_processor_desc( pDesc );
}
pProcHeap->stat.incDeallocatedBytes( pDesc->nBlockSize );
- oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire);
+ oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire);
do {
newAnchor = oldAnchor;
reinterpret_cast<free_block_header *>( pBlock )->nNextFree = oldAnchor.avail;
}
else
newAnchor.count += 1;
- } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) );
+ } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) );
pProcHeap->stat.incFreeCount();
{
size_t nProcHeapCount = m_SizeClassSelector.size();
for ( unsigned int nProcessor = 0; nProcessor < m_nProcessorCount; ++nProcessor ) {
- processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(CDS_ATOMIC::memory_order_relaxed);
+ processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(atomics::memory_order_relaxed);
if ( pProcDesc ) {
for ( unsigned int i = 0; i < nProcHeapCount; ++i ) {
processor_heap_base * pProcHeap = pProcDesc->arrProcHeap + i;
struct os_allocated_atomic
{
///@cond
- CDS_ATOMIC::atomic<size_t> nAllocCount ; ///< Event count of large block allocation from %OS
- CDS_ATOMIC::atomic<size_t> nFreeCount ; ///< Event count of large block deallocation to %OS
- CDS_ATOMIC::atomic<unsigned long long> nBytesAllocated ; ///< Total size of allocated large blocks, in bytes
- CDS_ATOMIC::atomic<unsigned long long> nBytesDeallocated ; ///< Total size of deallocated large blocks, in bytes
+ atomics::atomic<size_t> nAllocCount ; ///< Event count of large block allocation from %OS
+ atomics::atomic<size_t> nFreeCount ; ///< Event count of large block deallocation to %OS
+ atomics::atomic<unsigned long long> nBytesAllocated ; ///< Total size of allocated large blocks, in bytes
+ atomics::atomic<unsigned long long> nBytesDeallocated ; ///< Total size of deallocated large blocks, in bytes
os_allocated_atomic()
: nAllocCount(0)
/// Adds \p nSize to nBytesAllocated counter
void incBytesAllocated( size_t nSize )
{
- nAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed);
- nBytesAllocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed );
+ nAllocCount.fetch_add( 1, atomics::memory_order_relaxed);
+ nBytesAllocated.fetch_add( nSize, atomics::memory_order_relaxed );
}
/// Adds \p nSize to nBytesDeallocated counter
void incBytesDeallocated( size_t nSize )
{
- nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
- nBytesDeallocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed );
+ nFreeCount.fetch_add( 1, atomics::memory_order_relaxed );
+ nBytesDeallocated.fetch_add( nSize, atomics::memory_order_relaxed );
}
/// Returns count of \p alloc and \p alloc_aligned function call (for large block allocated directly from %OS)
size_t allocCount() const
{
- return nAllocCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nAllocCount.load(atomics::memory_order_relaxed);
}
/// Returns count of \p free and \p free_aligned function call (for large block allocated directly from %OS)
size_t freeCount() const
{
- return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nFreeCount.load(atomics::memory_order_relaxed);
}
/// Returns current value of nBytesAllocated counter
atomic64u_t allocatedBytes() const
{
- return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed);
+ return nBytesAllocated.load(atomics::memory_order_relaxed);
}
/// Returns current value of nBytesAllocated counter
atomic64u_t deallocatedBytes() const
{
- return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed);
+ return nBytesDeallocated.load(atomics::memory_order_relaxed);
}
};
class procheap_atomic_stat
{
//@cond
- CDS_ATOMIC::atomic<size_t> nAllocFromActive ; ///< Event count of allocation from active superblock
- CDS_ATOMIC::atomic<size_t> nAllocFromPartial ; ///< Event count of allocation from partial superblock
- CDS_ATOMIC::atomic<size_t> nAllocFromNew ; ///< Event count of allocation from new superblock
- CDS_ATOMIC::atomic<size_t> nFreeCount ; ///< \ref free function call count
- CDS_ATOMIC::atomic<size_t> nBlockCount ; ///< Count of superblock allocated
- CDS_ATOMIC::atomic<size_t> nBlockDeallocCount ; ///< Count of superblock deallocated
- CDS_ATOMIC::atomic<size_t> nDescAllocCount ; ///< Count of superblock descriptors
- CDS_ATOMIC::atomic<size_t> nDescFull ; ///< Count of full superblock
- CDS_ATOMIC::atomic<unsigned long long> nBytesAllocated ; ///< Count of allocated bytes
- CDS_ATOMIC::atomic<unsigned long long> nBytesDeallocated ; ///< Count of deallocated bytes
-
- CDS_ATOMIC::atomic<size_t> nActiveDescCASFailureCount ; ///< CAS failure counter for active block of \p alloc_from_active Heap function
- CDS_ATOMIC::atomic<size_t> nActiveAnchorCASFailureCount; ///< CAS failure counter for active block of \p alloc_from_active Heap function
- CDS_ATOMIC::atomic<size_t> nPartialDescCASFailureCount ; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
- CDS_ATOMIC::atomic<size_t> nPartialAnchorCASFailureCount; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
+ atomics::atomic<size_t> nAllocFromActive ; ///< Event count of allocation from active superblock
+ atomics::atomic<size_t> nAllocFromPartial ; ///< Event count of allocation from partial superblock
+ atomics::atomic<size_t> nAllocFromNew ; ///< Event count of allocation from new superblock
+ atomics::atomic<size_t> nFreeCount ; ///< \ref free function call count
+ atomics::atomic<size_t> nBlockCount ; ///< Count of superblock allocated
+ atomics::atomic<size_t> nBlockDeallocCount ; ///< Count of superblock deallocated
+ atomics::atomic<size_t> nDescAllocCount ; ///< Count of superblock descriptors
+ atomics::atomic<size_t> nDescFull ; ///< Count of full superblock
+ atomics::atomic<unsigned long long> nBytesAllocated ; ///< Count of allocated bytes
+ atomics::atomic<unsigned long long> nBytesDeallocated ; ///< Count of deallocated bytes
+
+ atomics::atomic<size_t> nActiveDescCASFailureCount ; ///< CAS failure counter for active block of \p alloc_from_active Heap function
+ atomics::atomic<size_t> nActiveAnchorCASFailureCount; ///< CAS failure counter for active block of \p alloc_from_active Heap function
+ atomics::atomic<size_t> nPartialDescCASFailureCount ; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
+ atomics::atomic<size_t> nPartialAnchorCASFailureCount; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function
//@endcond
/// Increment event counter of allocation from active superblock
void incAllocFromActive()
{
- nAllocFromActive.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nAllocFromActive.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment event counter of allocation from active superblock by \p n
void incAllocFromActive( size_t n )
{
- nAllocFromActive.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nAllocFromActive.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment event counter of allocation from partial superblock
void incAllocFromPartial()
{
- nAllocFromPartial.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nAllocFromPartial.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment event counter of allocation from partial superblock by \p n
void incAllocFromPartial( size_t n )
{
- nAllocFromPartial.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nAllocFromPartial.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment event count of allocation from new superblock
void incAllocFromNew()
{
- nAllocFromNew.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nAllocFromNew.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment event count of allocation from new superblock by \p n
void incAllocFromNew( size_t n )
{
- nAllocFromNew.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nAllocFromNew.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment event counter of free calling
void incFreeCount()
{
- nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nFreeCount.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment event counter of free calling by \p n
void incFreeCount( size_t n )
{
- nFreeCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nFreeCount.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment counter of superblock allocated
void incBlockAllocated()
{
- nBlockCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nBlockCount.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment counter of superblock allocated by \p n
void incBlockAllocated( size_t n )
{
- nBlockCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nBlockCount.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment counter of superblock deallocated
void incBlockDeallocated()
{
- nBlockDeallocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nBlockDeallocCount.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment counter of superblock deallocated by \p n
void incBlockDeallocated( size_t n )
{
- nBlockDeallocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nBlockDeallocCount.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment counter of superblock descriptor allocated
void incDescAllocCount()
{
- nDescAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nDescAllocCount.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment counter of superblock descriptor allocated by \p n
void incDescAllocCount( size_t n )
{
- nDescAllocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nDescAllocCount.fetch_add( n, atomics::memory_order_relaxed );
}
/// Increment counter of full superblock descriptor
void incDescFull()
{
- nDescFull.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nDescFull.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Increment counter of full superblock descriptor by \p n
void incDescFull( size_t n )
{
- nDescFull.fetch_add( n, CDS_ATOMIC::memory_order_relaxed );
+ nDescFull.fetch_add( n, atomics::memory_order_relaxed );
}
/// Decrement counter of full superblock descriptor
void decDescFull()
{
- nDescFull.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed );
+ nDescFull.fetch_sub( 1, atomics::memory_order_relaxed );
}
/// Decrement counter of full superblock descriptor by \p n
void decDescFull(size_t n)
{
- nDescFull.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed );
+ nDescFull.fetch_sub( n, atomics::memory_order_relaxed );
}
/// Add \p nBytes to allocated bytes counter
void incAllocatedBytes( size_t nBytes )
{
- nBytesAllocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed );
+ nBytesAllocated.fetch_add( nBytes, atomics::memory_order_relaxed );
}
/// Add \p nBytes to deallocated bytes counter
void incDeallocatedBytes( size_t nBytes )
{
- nBytesDeallocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed);
+ nBytesDeallocated.fetch_add( nBytes, atomics::memory_order_relaxed);
}
/// Add \p nCount to CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function
void incActiveDescCASFailureCount( int nCount )
{
- nActiveDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+ nActiveDescCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
}
/// Add \p nCount to CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function
void incActiveAnchorCASFailureCount( int nCount )
{
- nActiveAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+ nActiveAnchorCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
}
/// Add \p nCount to CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_partial internal Heap function
void incPartialDescCASFailureCount( int nCount )
{
- nPartialDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+ nPartialDescCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
}
/// Add \p nCount to CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_partial internal Heap function
void incPartialAnchorCASFailureCount( int nCount )
{
- nPartialAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed );
+ nPartialAnchorCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed );
}
// -----------------------------------------------------------------
/// Read event counter of allocation from active superblock
size_t allocFromActive() const
{
- return nAllocFromActive.load(CDS_ATOMIC::memory_order_relaxed);
+ return nAllocFromActive.load(atomics::memory_order_relaxed);
}
/// Read event counter of allocation from partial superblock
size_t allocFromPartial() const
{
- return nAllocFromPartial.load(CDS_ATOMIC::memory_order_relaxed);
+ return nAllocFromPartial.load(atomics::memory_order_relaxed);
}
/// Read event count of allocation from new superblock
size_t allocFromNew() const
{
- return nAllocFromNew.load(CDS_ATOMIC::memory_order_relaxed);
+ return nAllocFromNew.load(atomics::memory_order_relaxed);
}
/// Read event counter of free calling
size_t freeCount() const
{
- return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nFreeCount.load(atomics::memory_order_relaxed);
}
/// Read counter of superblock allocated
size_t blockAllocated() const
{
- return nBlockCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nBlockCount.load(atomics::memory_order_relaxed);
}
/// Read counter of superblock deallocated
size_t blockDeallocated() const
{
- return nBlockDeallocCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nBlockDeallocCount.load(atomics::memory_order_relaxed);
}
/// Read counter of superblock descriptor allocated
size_t descAllocCount() const
{
- return nDescAllocCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nDescAllocCount.load(atomics::memory_order_relaxed);
}
/// Read counter of full superblock descriptor
size_t descFull() const
{
- return nDescFull.load(CDS_ATOMIC::memory_order_relaxed);
+ return nDescFull.load(atomics::memory_order_relaxed);
}
/// Get counter of allocated bytes
*/
atomic64u_t allocatedBytes() const
{
- return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed);
+ return nBytesAllocated.load(atomics::memory_order_relaxed);
}
/// Get counter of deallocated bytes
*/
atomic64u_t deallocatedBytes() const
{
- return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed);
+ return nBytesDeallocated.load(atomics::memory_order_relaxed);
}
/// Get CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function
size_t activeDescCASFailureCount() const
{
- return nActiveDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nActiveDescCASFailureCount.load(atomics::memory_order_relaxed);
}
/// Get CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function
size_t activeAnchorCASFailureCount() const
{
- return nActiveAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nActiveAnchorCASFailureCount.load(atomics::memory_order_relaxed);
}
/// Get CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_active internal Heap function
size_t partialDescCASFailureCount() const
{
- return nPartialDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nPartialDescCASFailureCount.load(atomics::memory_order_relaxed);
}
/// Get CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_active internal Heap function
size_t partialAnchorCASFailureCount() const
{
- return nPartialAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed);
+ return nPartialAnchorCASFailureCount.load(atomics::memory_order_relaxed);
}
};
//@cond
// For new C++11 (cds-1.1.0)
- static const CDS_ATOMIC::memory_order memory_order_relaxed = CDS_ATOMIC::memory_order_relaxed;
- static const CDS_ATOMIC::memory_order memory_order_consume = CDS_ATOMIC::memory_order_consume;
- static const CDS_ATOMIC::memory_order memory_order_acquire = CDS_ATOMIC::memory_order_acquire;
- static const CDS_ATOMIC::memory_order memory_order_release = CDS_ATOMIC::memory_order_release;
- static const CDS_ATOMIC::memory_order memory_order_acq_rel = CDS_ATOMIC::memory_order_acq_rel;
- static const CDS_ATOMIC::memory_order memory_order_seq_cst = CDS_ATOMIC::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed;
+ static const atomics::memory_order memory_order_consume = atomics::memory_order_consume;
+ static const atomics::memory_order memory_order_acquire = atomics::memory_order_acquire;
+ static const atomics::memory_order memory_order_release = atomics::memory_order_release;
+ static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_acq_rel;
+ static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst;
//@endcond
};
//@cond
// For new C++11 (cds-1.1.0)
- static const CDS_ATOMIC::memory_order memory_order_relaxed = CDS_ATOMIC::memory_order_seq_cst;
- static const CDS_ATOMIC::memory_order memory_order_consume = CDS_ATOMIC::memory_order_seq_cst;
- static const CDS_ATOMIC::memory_order memory_order_acquire = CDS_ATOMIC::memory_order_seq_cst;
- static const CDS_ATOMIC::memory_order memory_order_release = CDS_ATOMIC::memory_order_seq_cst;
- static const CDS_ATOMIC::memory_order memory_order_acq_rel = CDS_ATOMIC::memory_order_seq_cst;
- static const CDS_ATOMIC::memory_order memory_order_seq_cst = CDS_ATOMIC::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_relaxed = atomics::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_consume = atomics::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_acquire = atomics::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_release = atomics::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_seq_cst;
+ static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst;
//@endcond
};
} // namespace v
template <typename T>
class ref_counter
{
- CDS_ATOMIC::atomic<T> m_nRefCount ; ///< The reference counter
+ atomics::atomic<T> m_nRefCount ; ///< The reference counter
public:
typedef T ref_counter_type ; ///< The reference counter type
/// Get current value of reference counter.
T value() const CDS_NOEXCEPT
{
- return m_nRefCount.load( CDS_ATOMIC::memory_order_relaxed );
+ return m_nRefCount.load( atomics::memory_order_relaxed );
}
/// Current value of reference counter
/// Atomic increment
void inc() CDS_NOEXCEPT
{
- m_nRefCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ m_nRefCount.fetch_add( 1, atomics::memory_order_relaxed );
}
/// Atomic decrement. Return \p true if reference counter is 0, otherwise \p false
bool dec() CDS_NOEXCEPT
{
- if ( m_nRefCount.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) == 1 ) {
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ if ( m_nRefCount.fetch_sub( 1, atomics::memory_order_relaxed ) == 1 ) {
+ atomics::atomic_thread_fence( atomics::memory_order_release );
return true;
}
return false;
cds::algo::elimination::record m_EliminationRec;
//@cond
- static CDS_EXPORT_API CDS_ATOMIC::atomic<size_t> s_nLastUsedProcNo;
+ static CDS_EXPORT_API atomics::atomic<size_t> s_nLastUsedProcNo;
static CDS_EXPORT_API size_t s_nProcCount;
//@endcond
, m_pSHBRCU( nullptr )
, m_pSHTRCU( nullptr )
#endif
- , m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) % s_nProcCount )
+ , m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, atomics::memory_order_relaxed) % s_nProcCount )
, m_nAttachCount(0)
{
if (cds::gc::HP::isUsed() )
{
public:
# ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT
- template <typename MarkedPtr> using atomic_marked_ptr = CDS_ATOMIC::atomic<MarkedPtr>;
+ template <typename MarkedPtr> using atomic_marked_ptr = atomics::atomic<MarkedPtr>;
# else
template <typename MarkedPtr>
- class atomic_marked_ptr: public CDS_ATOMIC::atomic<MarkedPtr>
+ class atomic_marked_ptr: public atomics::atomic<MarkedPtr>
{
- typedef CDS_ATOMIC::atomic<MarkedPtr> base_class;
+ typedef atomics::atomic<MarkedPtr> base_class;
public:
# ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT
atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default;
template <typename ThreadData>
struct thread_list_record {
ThreadData * m_pNext ; ///< Next item in thread list
- CDS_ATOMIC::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
+ atomics::atomic<OS::ThreadId> m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned)
thread_list_record()
: m_pNext( nullptr )
typedef cds::details::Allocator< thread_record, Alloc > allocator_type;
private:
- CDS_ATOMIC::atomic<thread_record *> m_pHead;
+ atomics::atomic<thread_record *> m_pHead;
public:
thread_list()
cds::OS::ThreadId const curThreadId = cds::OS::getCurrentThreadId();
// First try to reuse a retired (non-active) HP record
- for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
+ for ( pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
continue;
return pRec;
}
// No records available for reuse
// Allocate and push a new record
pRec = allocator_type().New();
- pRec->m_list.m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
+ pRec->m_list.m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
- thread_record * pOldHead = m_pHead.load( CDS_ATOMIC::memory_order_acquire );
+ thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire );
do {
pRec->m_list.m_pNext = pOldHead;
- } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_release, atomics::memory_order_relaxed ));
return pRec;
}
void retire( thread_record * pRec )
{
assert( pRec != nullptr );
- pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+ pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
}
void detach_all()
thread_record * pNext = nullptr;
cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId;
- for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) {
+ for ( thread_record * pRec = m_pHead.load(atomics::memory_order_acquire); pRec; pRec = pNext ) {
pNext = pRec->m_list.m_pNext;
- if ( pRec->m_list.m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
+ if ( pRec->m_list.m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
retire( pRec );
}
}
}
- thread_record * head( CDS_ATOMIC::memory_order mo ) const
+ thread_record * head( atomics::memory_order mo ) const
{
return m_pHead.load( mo );
}
CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId; )
CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;)
- thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst );
+ thread_record * p = m_pHead.exchange( nullptr, atomics::memory_order_seq_cst );
while ( p ) {
thread_record * pNext = p->m_list.m_pNext;
- assert( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
- || p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
- || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) )
+ assert( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+ || p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+ || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) )
);
al.Delete( p );
thread_record * pRec = get_thread_record();
assert( pRec != nullptr );
- uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
+ uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
- pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_relaxed),
- CDS_ATOMIC::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ pRec->m_nAccessControl.store( gp_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_relaxed),
+ atomics::memory_order_relaxed );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
//CDS_COMPILER_RW_BARRIER;
}
else {
- pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_relaxed );
}
}
assert( pRec != nullptr );
//CDS_COMPILER_RW_BARRIER;
- pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+ pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
}
template <typename RCUtag>
thread_record * pRec = get_thread_record();
assert( pRec != nullptr );
- return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
+ return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
}
template <typename RCUtag>
inline bool gp_singleton<RCUtag>::check_grace_period( typename gp_singleton<RCUtag>::thread_record * pRec ) const
{
- uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
+ uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
return (v & general_purpose_rcu::c_nNestMask)
- && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask ));
+ && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask ));
}
template <typename RCUtag>
inline void gp_singleton<RCUtag>::flip_and_wait( Backoff& bkoff )
{
OS::ThreadId const nullThreadId = OS::c_NullThreadId;
- m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
+ m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, atomics::memory_order_seq_cst );
- for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
- while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) {
+ for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+ while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) {
bkoff();
CDS_COMPILER_RW_BARRIER;
}
// that is not so efficiently
# define CDS_GPURCU_DECLARE_THREAD_DATA(tag_) \
template <> struct thread_data<tag_> { \
- CDS_ATOMIC::atomic<uint32_t> m_nAccessControl ; \
+ atomics::atomic<uint32_t> m_nAccessControl ; \
thread_list_record< thread_data > m_list ; \
thread_data(): m_nAccessControl(0) {} \
~thread_data() {} \
typedef gp_singleton_instance< rcu_tag > rcu_instance;
protected:
- CDS_ATOMIC::atomic<uint32_t> m_nGlobalControl;
+ atomics::atomic<uint32_t> m_nGlobalControl;
thread_list< rcu_tag > m_ThreadList;
protected:
m_ThreadList.retire( pRec );
}
- uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) const
+ uint32_t global_control_word( atomics::memory_order mo ) const
{
return m_nGlobalControl.load( mo );
}
static rcu_singleton * instance() { assert( rcu_instance::s_pRCU ); return static_cast<rcu_singleton *>( rcu_instance::s_pRCU ); } \
static thread_record * attach_thread() { return instance()->attach_thread() ; } \
static void detach_thread( thread_record * pRec ) { return instance()->detach_thread( pRec ) ; } \
- static uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
+ static uint32_t global_control_word( atomics::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
}
CDS_GP_RCU_DECLARE_SINGLETON( general_instant_tag );
protected:
//@cond
buffer_type m_Buffer;
- CDS_ATOMIC::atomic<uint64_t> m_nCurEpoch;
+ atomics::atomic<uint64_t> m_nCurEpoch;
lock_type m_Lock;
size_t const m_nCapacity;
//@endcond
virtual void retire_ptr( retired_ptr& p )
{
if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+ epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
push_buffer( ep );
}
}
template <typename ForwardIterator>
void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
{
- uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
epoch_retired_ptr ep( *itFirst, nEpoch );
++itFirst;
/// Wait to finish a grace period and then clear the buffer
void synchronize()
{
- epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+ epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
synchronize( ep );
}
bool synchronize( epoch_retired_ptr& ep )
{
uint64_t nEpoch;
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
cds::lock::scoped_lock<lock_type> sl( m_Lock );
if ( ep.m_p && m_Buffer.push( ep ) )
return false;
- nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
flip_and_wait();
flip_and_wait();
}
clear_buffer( nEpoch );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
return true;
}
//@endcond
/// Waits to finish a grace period
void synchronize()
{
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
cds::lock::scoped_lock<lock_type> sl( m_Lock );
flip_and_wait();
flip_and_wait();
}
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
}
//@cond
protected:
//@cond
buffer_type m_Buffer;
- CDS_ATOMIC::atomic<uint64_t> m_nCurEpoch;
+ atomics::atomic<uint64_t> m_nCurEpoch;
lock_type m_Lock;
size_t const m_nCapacity;
disposer_thread m_DisposerThread;
if ( bDetachAll )
pThis->m_ThreadList.detach_all();
- pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
+ pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
delete pThis;
singleton_ptr::s_pRCU = nullptr;
virtual void retire_ptr( retired_ptr& p )
{
if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
+ epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
push_buffer( ep );
}
}
template <typename ForwardIterator>
void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
{
- uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
epoch_retired_ptr p( *itFirst, nEpoch );
++itFirst;
//@cond
void synchronize( bool bSync )
{
- uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+ uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
cds::lock::scoped_lock<lock_type> sl( m_Lock );
flip_and_wait();
m_DisposerThread.dispose( m_Buffer, nPrevEpoch, bSync );
}
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
}
void force_dispose()
{
thread_record * pRec = get_thread_record();
assert( pRec != nullptr );
- uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed );
+ uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed );
if ( (tmp & rcu_class::c_nNestMask) == 0 ) {
pRec->m_nAccessControl.store(
- sh_singleton<RCUtag>::instance()->global_control_word(CDS_ATOMIC::memory_order_acquire),
- CDS_ATOMIC::memory_order_release
+ sh_singleton<RCUtag>::instance()->global_control_word(atomics::memory_order_acquire),
+ atomics::memory_order_release
);
}
else {
- pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+ pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release );
}
CDS_COMPILER_RW_BARRIER;
}
assert( pRec != nullptr);
CDS_COMPILER_RW_BARRIER;
- pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+ pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release );
}
template <typename RCUtag>
thread_record * pRec = get_thread_record();
assert( pRec != nullptr);
- return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
+ return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0;
}
{
thread_record * pRec = cds::threading::getRCU<RCUtag>();
if ( pRec ) {
- CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_acquire );
- pRec->m_bNeedMemBar.store( false, CDS_ATOMIC::memory_order_relaxed );
- CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_signal_fence( atomics::memory_order_acquire );
+ pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed );
+ atomics::atomic_signal_fence( atomics::memory_order_release );
}
}
OS::ThreadId const nullThreadId = OS::c_NullThreadId;
// Send "need membar" signal to all RCU threads
- for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
- OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+ for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+ OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
if ( tid != nullThreadId ) {
- pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release );
+ pRec->m_bNeedMemBar.store( true, atomics::memory_order_release );
raise_signal( tid );
}
}
// Wait while all RCU threads process the signal
- for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
- OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire);
+ for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+ OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire);
if ( tid != nullThreadId ) {
bkOff.reset();
- while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId
- && pRec->m_bNeedMemBar.load( CDS_ATOMIC::memory_order_acquire ))
+ while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId
+ && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire ))
{
// Some versions of OSes can lose signals
// So, we resend the signal
template <typename RCUtag>
bool sh_singleton<RCUtag>::check_grace_period( thread_record * pRec ) const
{
- uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_acquire );
+ uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire );
return (v & signal_handling_rcu::c_nNestMask)
- && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
+ && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask ));
}
template <typename RCUtag>
{
OS::ThreadId const nullThreadId = OS::c_NullThreadId;
- for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
- while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
+ for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) {
+ while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ))
bkOff();
}
}
// that is not so efficiently
# define CDS_SHURCU_DECLARE_THREAD_DATA(tag_) \
template <> struct thread_data<tag_> { \
- CDS_ATOMIC::atomic<uint32_t> m_nAccessControl ; \
- CDS_ATOMIC::atomic<bool> m_bNeedMemBar ; \
+ atomics::atomic<uint32_t> m_nAccessControl ; \
+ atomics::atomic<bool> m_bNeedMemBar ; \
thread_list_record< thread_data > m_list ; \
thread_data(): m_nAccessControl(0), m_bNeedMemBar(false) {} \
~thread_data() {} \
typedef sh_singleton_instance< rcu_tag > rcu_instance;
protected:
- CDS_ATOMIC::atomic<uint32_t> m_nGlobalControl;
+ atomics::atomic<uint32_t> m_nGlobalControl;
thread_list< rcu_tag > m_ThreadList;
int const m_nSigNo;
m_ThreadList.retire( pRec );
}
- uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) const
+ uint32_t global_control_word( atomics::memory_order mo ) const
{
return m_nGlobalControl.load( mo );
}
void switch_next_epoch()
{
- m_nGlobalControl.fetch_xor( rcu_tag::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst );
+ m_nGlobalControl.fetch_xor( rcu_tag::c_nControlBit, atomics::memory_order_seq_cst );
}
bool check_grace_period( thread_record * pRec ) const;
static rcu_singleton * instance() { assert( rcu_instance::s_pRCU ); return static_cast<rcu_singleton *>( rcu_instance::s_pRCU ); } \
static thread_record * attach_thread() { return instance()->attach_thread() ; } \
static void detach_thread( thread_record * pRec ) { return instance()->detach_thread( pRec ) ; } \
- static uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
+ static uint32_t global_control_word( atomics::memory_order mo ) { return instance()->global_control_word( mo ) ; } \
}
CDS_SIGRCU_DECLARE_SINGLETON( signal_buffered_tag );
protected:
//@cond
buffer_type m_Buffer;
- CDS_ATOMIC::atomic<uint64_t> m_nCurEpoch;
+ atomics::atomic<uint64_t> m_nCurEpoch;
lock_type m_Lock;
size_t const m_nCapacity;
//@endcond
virtual void retire_ptr( retired_ptr& p )
{
if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+ epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed ));
push_buffer( ep );
}
}
template <typename ForwardIterator>
void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
{
- uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
epoch_retired_ptr ep( *itFirst, nEpoch );
++itFirst;
/// Wait to finish a grace period and then clear the buffer
void synchronize()
{
- epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ));
+ epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed ));
synchronize( ep );
}
bool synchronize( epoch_retired_ptr& ep )
{
uint64_t nEpoch;
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
cds::lock::scoped_lock<lock_type> sl( m_Lock );
if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity())
return false;
- nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed );
back_off bkOff;
base_class::force_membar_all_threads( bkOff );
protected:
//@cond
buffer_type m_Buffer;
- CDS_ATOMIC::atomic<uint64_t> m_nCurEpoch;
+ atomics::atomic<uint64_t> m_nCurEpoch;
lock_type m_Lock;
size_t const m_nCapacity;
disposer_thread m_DisposerThread;
if ( bDetachAll )
pThis->m_ThreadList.detach_all();
- pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ));
+ pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire ));
delete pThis;
singleton_ptr::s_pRCU = nullptr;
virtual void retire_ptr( retired_ptr& p )
{
if ( p.m_p ) {
- epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) );
+ epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) );
push_buffer( ep );
}
}
template <typename ForwardIterator>
void batch_retire( ForwardIterator itFirst, ForwardIterator itLast )
{
- uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed );
+ uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed );
while ( itFirst != itLast ) {
epoch_retired_ptr p( *itFirst, nEpoch );
++itFirst;
//@cond
void synchronize( bool bSync )
{
- uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release );
+ uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire );
+ atomics::atomic_thread_fence( atomics::memory_order_acquire );
{
cds::lock::scoped_lock<lock_type> sl( m_Lock );
GarbageCollector::~GarbageCollector()
{
- thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
+ thread_list_node * pNode = m_pListHead.load( atomics::memory_order_relaxed );
while ( pNode ) {
- assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId );
+ assert( pNode->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::c_NullThreadId );
clearHRCThreadDesc( pNode );
thread_list_node * pNext = pNode->m_pNext;
deleteHRCThreadDesc( pNode );
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
ContainerNode * pItem;
for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) {
- if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed )) != nullptr ) {
+ if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( atomics::memory_order_relaxed )) != nullptr ) {
pNode->m_arrRetired[n].m_funcFree( pItem );
//pItem->destroy();
- pNode->m_arrRetired[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ pNode->m_arrRetired[n].m_pNode.store( nullptr, atomics::memory_order_relaxed );
}
}
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
thread_list_node * hprec;
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
- for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
- if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) {
+ for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
+ if ( hprec->m_idOwner.load( atomics::memory_order_acquire ) == curThreadId ) {
assert( !hprec->m_bFree );
return hprec;
}
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
// First try to reuse a retired (non-active) HP record
- for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
+ for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) {
cds::OS::ThreadId expectedThreadId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, atomics::memory_order_acq_rel, atomics::memory_order_relaxed ) )
continue;
hprec->m_pOwner = pThreadGC;
hprec->m_bFree = false;
// Allocate and push a new HP record
hprec = newHRCThreadDesc();
assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
- hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
+ hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
hprec->m_pOwner = pThreadGC;
hprec->m_bFree = false;
thread_list_node * pOldHead;
- pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
+ pOldHead = m_pListHead.load( atomics::memory_order_relaxed );
do {
hprec->m_pNext = pOldHead;
- } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() );
return hprec;
if the destruction of thread object is called by the destructor
after thread termination
*/
- assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != cds::OS::c_NullThreadId );
+ assert( pNode->m_idOwner.load( atomics::memory_order_relaxed ) != cds::OS::c_NullThreadId );
pNode->m_pOwner = nullptr;
- pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+ pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() );
}
typedef std::vector< ContainerNode * > hazard_ptr_list;
details::thread_descriptor * pRec = pThreadGC->m_pDesc;
- assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+ assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
// Step 1: mark all pRec->m_arrRetired items as "traced"
{
details::retired_vector::const_iterator itEnd = pRec->m_arrRetired.end();
for ( details::retired_vector::const_iterator it = pRec->m_arrRetired.begin() ; it != itEnd; ++it ) {
- ContainerNode * pNode = it->m_pNode.load( CDS_ATOMIC::memory_order_acquire );
+ ContainerNode * pNode = it->m_pNode.load( atomics::memory_order_acquire );
if ( pNode ) {
if ( pNode->m_RC.value() == 0 ) {
- pNode->m_bTrace.store( true, CDS_ATOMIC::memory_order_release );
+ pNode->m_bTrace.store( true, atomics::memory_order_release );
if ( pNode->m_RC.value() != 0 )
- pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release );
+ pNode->m_bTrace.store( false, atomics::memory_order_release );
}
}
}
// Stage 2: Scan HP list and insert non-null values to plist
{
- thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
+ thread_list_node * pNode = m_pListHead.load( atomics::memory_order_acquire );
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
details::retired_node& node = *it;
- ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
+ ContainerNode * pNode = node.m_pNode.load(atomics::memory_order_acquire);
if ( !pNode )
continue;
- if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(CDS_ATOMIC::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) {
+ if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(atomics::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) {
// pNode may be destructed safely
- node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
- if ( node.m_nClaim.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) {
+ node.m_bDone.store( true, atomics::memory_order_release );
+ if ( node.m_nClaim.load( atomics::memory_order_acquire ) == 0 ) {
pNode->terminate( pThreadGC, false );
- pNode->clean( CDS_ATOMIC::memory_order_relaxed );
+ pNode->clean( atomics::memory_order_relaxed );
node.m_funcFree( pNode );
arr.pop( nRetired );
}
pNode->terminate( pThreadGC, true );
- //node.m_bDone.store( true, CDS_ATOMIC::memory_order_release );
+ //node.m_bDone.store( true, atomics::memory_order_release );
CDS_HRC_STATISTIC( ++m_Stat.m_ScanClaimGuarded );
}
else {
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
- for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext )
+ for ( thread_list_node * pRec = m_pListHead.load(atomics::memory_order_acquire); pRec; pRec = pRec->m_pNext )
{
// If threadDesc is free then own its
cds::OS::ThreadId expectedThreadId = nullThreadId;
- if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) )
+ if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed) )
{
continue;
}
details::retired_vector::iterator it = src.begin();
for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) {
- if ( it->m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr )
+ if ( it->m_pNode.load( atomics::memory_order_relaxed ) == nullptr )
continue;
- dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree );
+ dest.push( it->m_pNode.load(atomics::memory_order_relaxed), it->m_funcFree );
src.pop( nRetired );
while ( dest.isFull() ) {
}
pRec->m_bFree = true;
}
- pRec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
+ pRec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
}
}
CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls );
//const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+ thread_list_node * pThread = m_pListHead.load(atomics::memory_order_acquire);
while ( pThread ) {
for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) {
details::retired_node& rRetiredNode = pThread->m_arrRetired[i];
- ContainerNode * pNode = rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire);
- if ( pNode && !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) {
- rRetiredNode.m_nClaim.fetch_add( 1, CDS_ATOMIC::memory_order_release );
- if ( !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire)
- && pNode == rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire) )
+ ContainerNode * pNode = rRetiredNode.m_pNode.load(atomics::memory_order_acquire);
+ if ( pNode && !rRetiredNode.m_bDone.load(atomics::memory_order_acquire) ) {
+ rRetiredNode.m_nClaim.fetch_add( 1, atomics::memory_order_release );
+ if ( !rRetiredNode.m_bDone.load(atomics::memory_order_acquire)
+ && pNode == rRetiredNode.m_pNode.load(atomics::memory_order_acquire) )
{
pNode->cleanUp( pThis );
}
- rRetiredNode.m_nClaim.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+ rRetiredNode.m_nClaim.fetch_sub( 1, atomics::memory_order_release );
}
}
pThread = pThread->m_pNext;
stat.nRetiredPtrInFreeHRCRecs = 0;
// Walk through HRC records
- for ( thread_list_node *hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNext ) {
+ for ( thread_list_node *hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNext ) {
++stat.nHRCRecAllocated;
size_t nRetiredNodeCount = hprec->m_arrRetired.retiredNodeCount();
if ( hprec->m_bFree ) {
CDS_DEBUG_DO( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; )
CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;)
- hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed );
- m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed );
+ hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed );
+ m_pListHead.store( nullptr, atomics::memory_order_relaxed );
hplist_node * pNext = nullptr;
for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) {
- assert( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId
- || hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId
- || !cds::OS::isThreadAlive( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) )
+ assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId
+ || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId
+ || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) )
);
details::retired_vector& vect = hprec->m_arrRetired;
details::retired_vector::iterator itRetired = vect.begin();
}
vect.clear();
pNext = hprec->m_pNextNode;
- hprec->m_bFree.store( true, CDS_ATOMIC::memory_order_relaxed );
+ hprec->m_bFree.store( true, atomics::memory_order_relaxed );
DeleteHPRec( hprec );
}
}
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
// First try to reuse a retired (non-active) HP record
- for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) {
cds::OS::ThreadId thId = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) )
+ if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) )
continue;
- hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release );
+ hprec->m_bFree.store( false, atomics::memory_order_release );
return hprec;
}
// No HP records available for reuse
// Allocate and push a new HP record
hprec = NewHPRec();
- hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed );
- hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_relaxed );
+ hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed );
+ hprec->m_bFree.store( false, atomics::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ atomics::atomic_thread_fence( atomics::memory_order_release );
- hplist_node * pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_acquire );
+ hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire );
do {
hprec->m_pNextNode = pOldHead;
- } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ));
+ } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed ));
return hprec;
}
pRec->clear();
Scan( pRec );
hplist_node * pNode = static_cast<hplist_node *>( pRec );
- pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release );
+ pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release );
}
void GarbageCollector::detachAllThread()
{
hplist_node * pNext = nullptr;
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
- for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) {
pNext = hprec->m_pNextNode;
- if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) {
+ if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) {
RetireHPRec( hprec );
}
}
// Stage 1: Scan HP list and insert non-null values in plist
- hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
// Search guarded pointers in retired array
- hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire);
+ hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire);
while ( pNode ) {
for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) {
{
CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount );
- assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
+ assert( static_cast<hplist_node *>(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() );
const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId;
const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId();
- for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
// If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it
- if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_acquire) )
+ if ( hprec->m_bFree.load(atomics::memory_order_acquire) )
continue;
// Owns hprec if it is empty.
// Several threads may work concurrently so we use atomic technique only.
{
- cds::OS::ThreadId curOwner = hprec->m_idOwner.load(CDS_ATOMIC::memory_order_acquire);
+ cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire);
if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) {
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
continue;
}
else {
curOwner = nullThreadId;
- if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ))
+ if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed ))
continue;
}
}
}
src.clear();
- hprec->m_bFree.store(true, CDS_ATOMIC::memory_order_release);
- hprec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release );
+ hprec->m_bFree.store(true, atomics::memory_order_release);
+ hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release );
}
}
stat.nTotalRetiredPtrCount =
stat.nRetiredPtrInFreeHPRecs = 0;
- for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
+ for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) {
++stat.nHPRecAllocated;
stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size();
- if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_relaxed) ) {
+ if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) {
// Free HP record
stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size();
}
namespace cds {
- CDS_EXPORT_API CDS_ATOMIC::atomic<size_t> threading::ThreadData::s_nLastUsedProcNo(0);
+ CDS_EXPORT_API atomics::atomic<size_t> threading::ThreadData::s_nLastUsedProcNo(0);
CDS_EXPORT_API size_t threading::ThreadData::s_nProcCount = 1;
#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS
#endif
namespace details {
- static CDS_ATOMIC::atomic<size_t> s_nInitCallCount(0);
+ static atomics::atomic<size_t> s_nInitCallCount(0);
bool CDS_EXPORT_API init_first_call()
{
- return s_nInitCallCount.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) == 0;
+ return s_nInitCallCount.fetch_add(1, atomics::memory_order_relaxed) == 0;
}
bool CDS_EXPORT_API fini_last_call()
{
- if ( s_nInitCallCount.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) == 1 ) {
- CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release );
+ if ( s_nInitCallCount.fetch_sub( 1, atomics::memory_order_relaxed ) == 1 ) {
+ atomics::atomic_thread_fence( atomics::memory_order_release );
return true;
}
return false;
details::retired_ptr_node * pHead = nullptr;
details::retired_ptr_node * pTail = nullptr;
- for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_relaxed)) {
+ for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) {
details::guard_data::handoff_ptr h = pGuard->pHandOff;
pGuard->pHandOff = nullptr;
while ( h ) {
details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize();
if ( retiredList.first ) {
- size_t nLiberateThreshold = m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed);
+ size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed);
details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) );
// Get list of retired pointers
}
// Liberate cycle
- for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) )
+ for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
{
// get guarded pointer
- details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire);
+ details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
if ( valGuarded ) {
details::retired_ptr_node * pRetired = set.erase( valGuarded );
}
else {
// liberate cycle did not free any retired pointer - double liberate threshold
- m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed );
+ m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed );
}
}
}
{
details::guard_data::handoff_ptr const nullHandOff = nullptr;
- for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) )
+ for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) )
{
// get guarded pointer
- details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire);
+ details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire);
details::guard_data::handoff_ptr h;
if ( valGuarded ) {
// Now, try to set retired node pRetired as a hand-off node for the guard
cds::lock::Auto<details::guard_data::handoff_spin> al( pGuard->spinHandOff );
- if ( valGuarded == pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire) ) {
+ if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) {
if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) {
h = nullHandOff ; //nullptr;
details::retired_ptr_node * pTail = pGuard->pHandOff;
ThreadPool& m_Pool;
boost::thread * m_pThread;
cds::OS::Timer m_Timer;
- CDS_ATOMIC::atomic<bool> m_bTimeElapsed;
+ atomics::atomic<bool> m_bTimeElapsed;
public:
double m_nDuration;
virtual void fini() {}
void stop()
{
- m_bTimeElapsed.store( true, CDS_ATOMIC::memory_order_release );
+ m_bTimeElapsed.store( true, atomics::memory_order_release );
}
bool time_elapsed() const
{
- return m_bTimeElapsed.load( CDS_ATOMIC::memory_order_acquire );
+ return m_bTimeElapsed.load( atomics::memory_order_acquire );
}
bool check_timeout( size_t nMaxDuration )
class cxx11_atomic_class: public CppUnitMini::TestCase
{
template <typename AtomicFlag>
- void do_test_atomic_flag_mo( AtomicFlag& f, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
{
- CDS_ATOMIC::memory_order mo_clear = convert_to_store_order(order);
+ atomics::memory_order mo_clear = convert_to_store_order(order);
for ( int i = 0; i < 5; ++i ) {
CPPUNIT_ASSERT( !f.test_and_set( order ));
CPPUNIT_ASSERT( f.test_and_set( order ) );
f.clear();
}
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_consume );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_release );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
+ do_test_atomic_flag_mo( f, atomics::memory_order_consume );
+ do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
+ do_test_atomic_flag_mo( f, atomics::memory_order_release );
+ do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
+ do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
}
template <class Atomic, typename Integral>
}
template <class Atomic, typename Integral>
- void do_test_atomic_type( Atomic& a, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_type( Atomic& a, atomics::memory_order order )
{
typedef Integral integral_type;
- const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
- const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+ const atomics::memory_order oLoad = convert_to_load_order( order );
+ const atomics::memory_order oStore = convert_to_store_order( order );
CPPUNIT_ASSERT( a.is_lock_free() );
a.store((integral_type) 0, oStore );
integral_type n = integral_type(42) << (nByte * 8);
integral_type expected = prev;
- CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == prev );
- CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == n );
prev = n;
integral_type n = integral_type(42) << (nByte * 8);
integral_type expected = prev;
- CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == prev );
- CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == n );
prev = n;
}
template <class Atomic, typename Integral>
- void do_test_atomic_integral( Atomic& a, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
{
do_test_atomic_type< Atomic, Integral >( a, order );
typedef Integral integral_type;
- const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
- const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+ const atomics::memory_order oLoad = convert_to_load_order( order );
+ const atomics::memory_order oStore = convert_to_store_order( order );
// fetch_xxx testing
a.store( (integral_type) 0, oStore );
{
do_test_atomic_integral<Atomic, Integral >(a);
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_consume );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_release );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_consume );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
}
template <typename Integral>
void test_atomic_integral()
{
- typedef CDS_ATOMIC::atomic<Integral> atomic_type;
+ typedef atomics::atomic<Integral> atomic_type;
atomic_type a[8];
for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
template <typename Integral>
void test_atomic_integral_volatile()
{
- typedef CDS_ATOMIC::atomic<Integral> volatile atomic_type;
+ typedef atomics::atomic<Integral> volatile atomic_type;
atomic_type a[8];
for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
}
template <class AtomicBool>
- void do_test_atomic_bool( AtomicBool& a, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
{
- const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
- const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+ const atomics::memory_order oLoad = convert_to_load_order( order );
+ const atomics::memory_order oStore = convert_to_store_order( order );
CPPUNIT_ASSERT( a.is_lock_free() );
a.store( false, oStore );
CPPUNIT_ASSERT( a.load( oLoad ) == false );
bool expected = false;
- CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == false );
- CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == true );
CPPUNIT_ASSERT( a.load( oLoad ) == true );
a.store( false, oStore );
expected = false;
- CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == false );
- CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == true );
CPPUNIT_ASSERT( a.load( oLoad ) == true );
template <typename Atomic>
- void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
{
- CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
- CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+ atomics::memory_order oLoad = convert_to_load_order(order);
+ atomics::memory_order oStore = convert_to_store_order(order);
void * p;
a.store( (void *) arr, oStore );
CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load( oLoad )) == 1 );
p = arr;
- CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 0 );
CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 1 );
- CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
- CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 6 );
- CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 3 );
CPPUNIT_ASSERT( *reinterpret_cast<char *>(p) == 4 );
template <bool Volatile>
void do_test_atomic_pointer_void()
{
- typedef typename add_volatile<CDS_ATOMIC::atomic< void *>, Volatile>::type atomic_pointer;
+ typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type atomic_pointer;
char arr[8];
const char aSize = sizeof(arr)/sizeof(arr[0]);
CPPUNIT_ASSERT( *reinterpret_cast<char *>(a.load()) == i - 1 );
}
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
}
template <typename Atomic, typename Integral>
- void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, CDS_ATOMIC::memory_order order )
+ void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
{
typedef Integral integral_type;
- CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
- CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+ atomics::memory_order oLoad = convert_to_load_order(order);
+ atomics::memory_order oStore = convert_to_store_order(order);
integral_type * p;
a.store( arr, oStore );
CPPUNIT_ASSERT( *a.load( oLoad ) == 1 );
p = arr;
- CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 0 );
CPPUNIT_ASSERT( *p == 1 );
- CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *p == 6 );
- CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *p == 6 );
- CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 3 );
CPPUNIT_ASSERT( *p == 4 );
void test_atomic_pointer_for()
{
typedef Integral integral_type;
- typedef typename add_volatile<CDS_ATOMIC::atomic< integral_type *>, Volatile>::type atomic_pointer;
+ typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type atomic_pointer;
integral_type arr[8];
const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
CPPUNIT_ASSERT( *a.load() == i - 1 );
}
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
}
public:
{
// Array to test different alignment
- CDS_ATOMIC::atomic_flag flags[8];
+ atomics::atomic_flag flags[8];
for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
do_test_atomic_flag( flags[i] );
}
{
// Array to test different alignment
- CDS_ATOMIC::atomic_flag volatile flags[8];
+ atomics::atomic_flag volatile flags[8];
for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
do_test_atomic_flag( flags[i] );
}
for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
do_test_atomic_bool( a[i] );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_consume );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_release );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
+ do_test_atomic_bool( a[i], atomics::memory_order_consume );
+ do_test_atomic_bool( a[i], atomics::memory_order_acquire );
+ do_test_atomic_bool( a[i], atomics::memory_order_release );
+ do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
+ do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
}
}
void test_atomic_bool()
{
- test_atomic_bool_< CDS_ATOMIC::atomic<bool> >();
+ test_atomic_bool_< atomics::atomic<bool> >();
}
void test_atomic_bool_volatile()
{
- test_atomic_bool_< CDS_ATOMIC::atomic<bool> volatile >();
+ test_atomic_bool_< atomics::atomic<bool> volatile >();
}
void test_atomic_char() { test_atomic_integral<char>(); }
class cxx11_atomic_func: public CppUnitMini::TestCase
{
template <typename AtomicFlag>
- void do_test_atomic_flag_mo( AtomicFlag& f, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order )
{
- CDS_ATOMIC::memory_order mo_clear = convert_to_store_order(order);
+ atomics::memory_order mo_clear = convert_to_store_order(order);
f.clear( convert_to_store_order(order) );
for ( int i = 0; i < 5; ++i ) {
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_flag_test_and_set_explicit( &f, order ));
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_flag_test_and_set_explicit( &f, order ) );
- CDS_ATOMIC::atomic_flag_clear_explicit( &f, mo_clear );
- CDS_ATOMIC::atomic_flag_clear_explicit( &f, mo_clear );
+ CPPUNIT_ASSERT( !atomics::atomic_flag_test_and_set_explicit( &f, order ));
+ CPPUNIT_ASSERT( atomics::atomic_flag_test_and_set_explicit( &f, order ) );
+ atomics::atomic_flag_clear_explicit( &f, mo_clear );
+ atomics::atomic_flag_clear_explicit( &f, mo_clear );
}
//CPPUNIT_ASSERT( f.m_Flag == 0 );
}
for ( int i = 0; i < 5; ++i ) {
//CPPUNIT_ASSERT( f.m_Flag == 0 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_flag_test_and_set( &f ));
+ CPPUNIT_ASSERT( !atomics::atomic_flag_test_and_set( &f ));
//CPPUNIT_ASSERT( f.m_Flag != 0 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_flag_test_and_set( &f ) );
+ CPPUNIT_ASSERT( atomics::atomic_flag_test_and_set( &f ) );
//CPPUNIT_ASSERT( f.m_Flag != 0 );
- CDS_ATOMIC::atomic_flag_clear(&f);
+ atomics::atomic_flag_clear(&f);
//CPPUNIT_ASSERT( f.m_Flag == 0 );
- CDS_ATOMIC::atomic_flag_clear(&f);
+ atomics::atomic_flag_clear(&f);
}
//CPPUNIT_ASSERT( f.m_Flag == 0 );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_consume );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_release );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_flag_mo( f, atomics::memory_order_relaxed );
+ do_test_atomic_flag_mo( f, atomics::memory_order_consume );
+ do_test_atomic_flag_mo( f, atomics::memory_order_acquire );
+ do_test_atomic_flag_mo( f, atomics::memory_order_release );
+ do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel );
+ do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst );
}
template <class Atomic, typename Integral>
{
typedef Integral integral_type;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
- CDS_ATOMIC::atomic_store( &a, (integral_type) 0 );
+ CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+ atomics::atomic_store( &a, (integral_type) 0 );
CPPUNIT_ASSERT( a == 0 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
integral_type n = integral_type(42) << (nByte * 8);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, n ) == 0 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, (integral_type) 0 ) == n );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, n ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, (integral_type) 0 ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 );
}
- integral_type prev = CDS_ATOMIC::atomic_load( &a );
+ integral_type prev = atomics::atomic_load( &a );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
integral_type n = integral_type(42) << (nByte * 8);
integral_type expected = prev;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, n));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &expected, n));
CPPUNIT_ASSERT( expected == prev );
CPPUNIT_ASSERT( expected != n );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, n) );
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &expected, n) );
CPPUNIT_ASSERT( expected == n );
prev = n;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n );
}
- CDS_ATOMIC::atomic_store( &a, (integral_type) 0 );
+ atomics::atomic_store( &a, (integral_type) 0 );
- prev = CDS_ATOMIC::atomic_load( &a );
+ prev = atomics::atomic_load( &a );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
integral_type n = integral_type(42) << (nByte * 8);
integral_type expected = prev;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, n));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &expected, n));
CPPUNIT_ASSERT( expected == prev );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, n));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &expected, n));
CPPUNIT_ASSERT( expected == n );
prev = n;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n );
}
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, (integral_type) 0 ) == prev );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, (integral_type) 0 ) == prev );
}
template <class Atomic, typename Integral>
typedef Integral integral_type;
// fetch_xxx testing
- CDS_ATOMIC::atomic_store( &a, (integral_type) 0 );
+ atomics::atomic_store( &a, (integral_type) 0 );
// fetch_add
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
- integral_type prev = CDS_ATOMIC::atomic_load( &a );
+ integral_type prev = atomics::atomic_load( &a );
integral_type n = integral_type(42) << (nByte * 8);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, n) == prev);
+ CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, n) == prev);
}
// fetch_sub
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
- integral_type prev = CDS_ATOMIC::atomic_load( &a );
+ integral_type prev = atomics::atomic_load( &a );
integral_type n = integral_type(42) << ((nByte - 1) * 8);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, n) == prev);
+ CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, n) == prev);
}
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 );
// fetch_or / fetc_xor / fetch_and
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
- integral_type prev = CDS_ATOMIC::atomic_load( &a );
+ integral_type prev = atomics::atomic_load( &a );
integral_type mask = 1 << nBit;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_or( &a, mask ) == prev );
- prev = CDS_ATOMIC::atomic_load( &a );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_or( &a, mask ) == prev );
+ prev = atomics::atomic_load( &a );
CPPUNIT_ASSERT( ( prev & mask) == mask);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_and( &a, (integral_type) ~mask ) == prev );
- prev = CDS_ATOMIC::atomic_load( &a );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_and( &a, (integral_type) ~mask ) == prev );
+ prev = atomics::atomic_load( &a );
CPPUNIT_ASSERT_EX( integral_type(prev & mask) == integral_type(0), "prev=" << std::hex << prev << ", mask=" << std::hex << mask);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_xor( &a, mask ) == prev );
- prev = CDS_ATOMIC::atomic_load( &a );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_xor( &a, mask ) == prev );
+ prev = atomics::atomic_load( &a );
CPPUNIT_ASSERT( ( prev & mask) == mask);
}
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == (integral_type) -1 );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == (integral_type) -1 );
}
template <class Atomic, typename Integral>
- void do_test_atomic_type( Atomic& a, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_type( Atomic& a, atomics::memory_order order )
{
typedef Integral integral_type;
- const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
- const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+ const atomics::memory_order oLoad = convert_to_load_order( order );
+ const atomics::memory_order oStore = convert_to_store_order( order );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
- CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore );
+ CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+ atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
CPPUNIT_ASSERT( a == 0 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
integral_type n = integral_type(42) << (nByte * 8);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, n, order ) == 0 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, (integral_type) 0, order ) == n );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, n, order ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 );
}
- integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
integral_type n = integral_type(42) << (nByte * 8);
integral_type expected = prev;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == prev );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == n );
prev = n;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n );
}
- CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore );
+ atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
- prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ prev = atomics::atomic_load_explicit( &a, oLoad );
for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) {
integral_type n = integral_type(42) << (nByte * 8);
integral_type expected = prev;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == prev );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == n );
prev = n;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n );
}
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, (integral_type) 0, order ) == prev );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ) == prev );
}
template <class Atomic, typename Integral>
- void do_test_atomic_integral( Atomic& a, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_integral( Atomic& a, atomics::memory_order order )
{
do_test_atomic_type< Atomic, Integral >( a, order );
typedef Integral integral_type;
- const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
- const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+ const atomics::memory_order oLoad = convert_to_load_order( order );
+ const atomics::memory_order oStore = convert_to_store_order( order );
// fetch_xxx testing
- CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore );
+ atomics::atomic_store_explicit( &a, (integral_type) 0, oStore );
// fetch_add
for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte )
{
- integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
integral_type n = integral_type(42) << (nByte * 8);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, n, order) == prev);
+ CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, n, order) == prev);
}
// fetch_sub
for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte )
{
- integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ integral_type prev = atomics::atomic_load_explicit( &a, oLoad );
integral_type n = integral_type(42) << ((nByte - 1) * 8);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, n, order ) == prev);
+ CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, n, order ) == prev);
}
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 );
// fetch_or / fetc_xor / fetch_and
for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit )
{
- integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) ;;
+ integral_type prev = atomics::atomic_load_explicit( &a, oLoad ) ;;
integral_type mask = 1 << nBit;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_or_explicit( &a, mask, order ) == prev );
- prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_or_explicit( &a, mask, order ) == prev );
+ prev = atomics::atomic_load_explicit( &a, oLoad );
CPPUNIT_ASSERT( ( prev & mask) == mask);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ) == prev );
- prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ) == prev );
+ prev = atomics::atomic_load_explicit( &a, oLoad );
CPPUNIT_ASSERT( ( prev & mask) == 0);
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_xor_explicit( &a, mask, order ) == prev );
- prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_xor_explicit( &a, mask, order ) == prev );
+ prev = atomics::atomic_load_explicit( &a, oLoad );
CPPUNIT_ASSERT( ( prev & mask) == mask);
}
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == (integral_type) -1 );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == (integral_type) -1 );
}
template <typename Atomic, typename Integral>
{
do_test_atomic_integral<Atomic, Integral >(a);
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_consume );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_release );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_integral<Atomic, Integral >( a, CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_relaxed );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_consume );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acquire );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_release );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_acq_rel );
+ do_test_atomic_integral<Atomic, Integral >( a, atomics::memory_order_seq_cst );
}
template <typename Integral>
void test_atomic_integral()
{
- typedef CDS_ATOMIC::atomic<Integral> atomic_type;
+ typedef atomics::atomic<Integral> atomic_type;
atomic_type a[8];
for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
test_atomic_integral_<atomic_type, Integral>( a[i] );
template <typename Integral>
void test_atomic_integral_volatile()
{
- typedef CDS_ATOMIC::atomic<Integral> volatile atomic_type;
+ typedef atomics::atomic<Integral> volatile atomic_type;
atomic_type a[8];
for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
test_atomic_integral_<atomic_type, Integral>( a[i] );
template <class AtomicBool>
void do_test_atomic_bool(AtomicBool& a)
{
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
- CDS_ATOMIC::atomic_store( &a, false );
+ CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+ atomics::atomic_store( &a, false );
CPPUNIT_ASSERT( a == false );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == false );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == false );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, true ) == false );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, false ) == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == false );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, true ) == false );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, false ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == false );
bool expected = false;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, true));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &expected, true));
CPPUNIT_ASSERT( expected == false );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, false));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &expected, false));
CPPUNIT_ASSERT( expected == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true );
- CDS_ATOMIC::atomic_store( &a, false );
+ atomics::atomic_store( &a, false );
expected = false;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, true));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &expected, true));
CPPUNIT_ASSERT( expected == false );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, false));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &expected, false));
CPPUNIT_ASSERT( expected == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, false ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, false ) == true );
}
template <class AtomicBool>
- void do_test_atomic_bool( AtomicBool& a, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order )
{
- const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order );
- const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order );
+ const atomics::memory_order oLoad = convert_to_load_order( order );
+ const atomics::memory_order oStore = convert_to_store_order( order );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) );
- CDS_ATOMIC::atomic_store_explicit( &a, false, oStore );
+ CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) );
+ atomics::atomic_store_explicit( &a, false, oStore );
CPPUNIT_ASSERT( a == false );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == false );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == false );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, true, order ) == false );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, false, order ) == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == false );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, true, order ) == false );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, false, order ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == false );
bool expected = false;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == false );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true );
- CDS_ATOMIC::atomic_store( &a, false );
+ atomics::atomic_store( &a, false );
expected = false;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == false );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, CDS_ATOMIC::memory_order_relaxed));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, atomics::memory_order_relaxed));
CPPUNIT_ASSERT( expected == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, false, order ) == true );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, false, order ) == true );
}
template <typename Atomic, typename Integral>
- void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, CDS_ATOMIC::memory_order order )
+ void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order )
{
typedef Integral integral_type;
- CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
- CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+ atomics::memory_order oLoad = convert_to_load_order(order);
+ atomics::memory_order oStore = convert_to_store_order(order);
integral_type * p;
- CDS_ATOMIC::atomic_store_explicit( &a, arr, oStore );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 1 );
+ atomics::atomic_store_explicit( &a, arr, oStore );
+ CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == 1 );
p = arr;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 0 );
CPPUNIT_ASSERT( *p == 1 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *p == 6 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *p == 6 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 3 );
CPPUNIT_ASSERT( *p == 4 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, arr, order ) == arr + 3 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == arr );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 1 );
+ CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, arr, order ) == arr + 3 );
+ CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == arr );
+ CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == 1 );
for ( integral_type i = 1; i < aSize; ++i ) {
- integral_type * p = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
CPPUNIT_ASSERT( *p == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, 1, order ) == p );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == i + 1 );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, 1, order ) == p );
+ CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == i + 1 );
}
for ( integral_type i = aSize; i > 1; --i ) {
- integral_type * p = CDS_ATOMIC::atomic_load_explicit( &a, oLoad );
+ integral_type * p = atomics::atomic_load_explicit( &a, oLoad );
CPPUNIT_ASSERT( *p == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, 1, order ) == p );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == i - 1 );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, 1, order ) == p );
+ CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == i - 1 );
}
}
void test_atomic_pointer_for()
{
typedef Integral integral_type;
- typedef typename add_volatile<CDS_ATOMIC::atomic< integral_type *>, Volatile>::type atomic_pointer;
+ typedef typename add_volatile<atomics::atomic< integral_type *>, Volatile>::type atomic_pointer;
integral_type arr[8];
const integral_type aSize = sizeof(arr)/sizeof(arr[0]);
atomic_pointer a;
integral_type * p;
- CDS_ATOMIC::atomic_store( &a, arr );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == 1 );
+ atomics::atomic_store( &a, arr );
+ CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == 1 );
p = arr;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &p, arr + 5 ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &p, arr + 5 ));
CPPUNIT_ASSERT( p == arr + 0 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &p, arr + 3 ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &p, arr + 3 ));
CPPUNIT_ASSERT( p == arr + 5 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &p, arr + 3 ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &p, arr + 3 ));
CPPUNIT_ASSERT( p == arr + 5 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &p, arr + 5 ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &p, arr + 5 ));
CPPUNIT_ASSERT( p == arr + 3 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, arr ) == arr + 3 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == arr );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == 1 );
+ CPPUNIT_ASSERT( atomics::atomic_exchange( &a, arr ) == arr + 3 );
+ CPPUNIT_ASSERT( atomics::atomic_load( &a ) == arr );
+ CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == 1 );
for ( integral_type i = 1; i < aSize; ++i ) {
- integral_type * p = CDS_ATOMIC::atomic_load( &a );
+ integral_type * p = atomics::atomic_load( &a );
CPPUNIT_ASSERT( *p == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, 1 ) == p );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == i + 1 );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, 1 ) == p );
+ CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == i + 1 );
}
for ( integral_type i = aSize; i > 1; --i ) {
- integral_type * p = CDS_ATOMIC::atomic_load( &a );
+ integral_type * p = atomics::atomic_load( &a );
CPPUNIT_ASSERT( *p == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, 1 ) == p );
- CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == i - 1 );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, 1 ) == p );
+ CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == i - 1 );
}
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
- test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel );
+ test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst );
}
template <typename Atomic>
- void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, CDS_ATOMIC::memory_order order )
+ void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order )
{
- CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order);
- CDS_ATOMIC::memory_order oStore = convert_to_store_order(order);
+ atomics::memory_order oLoad = convert_to_load_order(order);
+ atomics::memory_order oStore = convert_to_store_order(order);
char * p;
- CDS_ATOMIC::atomic_store_explicit( &a, (void *) arr, oStore );
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == 1 );
+ atomics::atomic_store_explicit( &a, (void *) arr, oStore );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == 1 );
p = arr;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 0 );
CPPUNIT_ASSERT( *p == 1 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *p == 6 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 5 );
CPPUNIT_ASSERT( *p == 6 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed ));
CPPUNIT_ASSERT( p == arr + 3 );
CPPUNIT_ASSERT( *p == 4 );
- CPPUNIT_ASSERT( reinterpret_cast<char *>(CDS_ATOMIC::atomic_exchange_explicit( &a, (void *) arr, order )) == arr + 3 );
- CPPUNIT_ASSERT( reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == arr );
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == 1 );
+ CPPUNIT_ASSERT( reinterpret_cast<char *>(atomics::atomic_exchange_explicit( &a, (void *) arr, order )) == arr + 3 );
+ CPPUNIT_ASSERT( reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == arr );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == 1 );
for ( char i = 1; i < aSize; ++i ) {
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, 1, order ));
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i + 1 );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, 1, order ));
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i + 1 );
}
for ( char i = aSize; i > 1; --i ) {
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, 1, order ));
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i - 1 );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, 1, order ));
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load_explicit( &a, oLoad )) == i - 1 );
}
}
template <bool Volatile>
void do_test_atomic_pointer_void()
{
- typedef typename add_volatile<CDS_ATOMIC::atomic< void *>, Volatile>::type atomic_pointer;
+ typedef typename add_volatile<atomics::atomic< void *>, Volatile>::type atomic_pointer;
char arr[8];
const char aSize = sizeof(arr)/sizeof(arr[0]);
atomic_pointer a;
char * p;
- CDS_ATOMIC::atomic_store( &a, (void *) arr );
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == 1 );
+ atomics::atomic_store( &a, (void *) arr );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == 1 );
p = arr;
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) ));
CPPUNIT_ASSERT( p == arr + 0 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) ));
CPPUNIT_ASSERT( p == arr + 5 );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) ));
+ CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) ));
CPPUNIT_ASSERT( p == arr + 5 );
- CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) ));
+ CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) ));
CPPUNIT_ASSERT( p == arr + 3 );
- CPPUNIT_ASSERT( reinterpret_cast<char *>( CDS_ATOMIC::atomic_exchange( &a, (void *) arr )) == arr + 3 );
- CPPUNIT_ASSERT( reinterpret_cast<char *>( CDS_ATOMIC::atomic_load( &a )) == arr );
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == 1 );
+ CPPUNIT_ASSERT( reinterpret_cast<char *>( atomics::atomic_exchange( &a, (void *) arr )) == arr + 3 );
+ CPPUNIT_ASSERT( reinterpret_cast<char *>( atomics::atomic_load( &a )) == arr );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == 1 );
for ( char i = 1; i < aSize; ++i ) {
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, 1 ));
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i + 1 );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, 1 ));
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i + 1 );
}
for ( char i = aSize; i > 1; --i ) {
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i );
- CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, 1 ));
- CPPUNIT_ASSERT( *reinterpret_cast<char *>(CDS_ATOMIC::atomic_load( &a )) == i - 1 );
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i );
+ CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, 1 ));
+ CPPUNIT_ASSERT( *reinterpret_cast<char *>(atomics::atomic_load( &a )) == i - 1 );
}
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_consume );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_release );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel );
+ do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst );
}
public:
void test_atomic_flag()
{
- CDS_ATOMIC::atomic_flag flags[8];
+ atomics::atomic_flag flags[8];
for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
do_test_atomic_flag( flags[i] );
}
void test_atomic_flag_volatile()
{
- CDS_ATOMIC::atomic_flag volatile flags[8];
+ atomics::atomic_flag volatile flags[8];
for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i )
do_test_atomic_flag( flags[i] );
}
for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) {
do_test_atomic_bool( a[i] );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_relaxed );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_consume );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acquire );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_release );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acq_rel );
- do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_seq_cst );
+ do_test_atomic_bool( a[i], atomics::memory_order_relaxed );
+ do_test_atomic_bool( a[i], atomics::memory_order_consume );
+ do_test_atomic_bool( a[i], atomics::memory_order_acquire );
+ do_test_atomic_bool( a[i], atomics::memory_order_release );
+ do_test_atomic_bool( a[i], atomics::memory_order_acq_rel );
+ do_test_atomic_bool( a[i], atomics::memory_order_seq_cst );
}
}
void test_atomic_bool()
{
- test_atomic_bool_<CDS_ATOMIC::atomic<bool> >();
+ test_atomic_bool_<atomics::atomic<bool> >();
}
void test_atomic_bool_volatile()
{
- test_atomic_bool_<CDS_ATOMIC::atomic<bool> volatile >();
+ test_atomic_bool_<atomics::atomic<bool> volatile >();
}
void test_atomic_char() { test_atomic_integral<char>(); }
void test_atomic_fence()
{
- CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_relaxed );
- CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_consume );
- CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_acquire );
- CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_release );
- CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_acq_rel );
- CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_seq_cst );
-
- CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_relaxed );
- CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_consume );
- CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_acquire );
- CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_release );
- CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_acq_rel );
- CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_seq_cst );
+ atomics::atomic_thread_fence(atomics::memory_order_relaxed );
+ atomics::atomic_thread_fence(atomics::memory_order_consume );
+ atomics::atomic_thread_fence(atomics::memory_order_acquire );
+ atomics::atomic_thread_fence(atomics::memory_order_release );
+ atomics::atomic_thread_fence(atomics::memory_order_acq_rel );
+ atomics::atomic_thread_fence(atomics::memory_order_seq_cst );
+
+ atomics::atomic_signal_fence(atomics::memory_order_relaxed );
+ atomics::atomic_signal_fence(atomics::memory_order_consume );
+ atomics::atomic_signal_fence(atomics::memory_order_acquire );
+ atomics::atomic_signal_fence(atomics::memory_order_release );
+ atomics::atomic_signal_fence(atomics::memory_order_acq_rel );
+ atomics::atomic_signal_fence(atomics::memory_order_seq_cst );
}
public:
namespace misc {
- static inline CDS_ATOMIC::memory_order convert_to_store_order( CDS_ATOMIC::memory_order order )
+ static inline atomics::memory_order convert_to_store_order( atomics::memory_order order )
{
switch ( order ) {
- case CDS_ATOMIC::memory_order_acquire:
- case CDS_ATOMIC::memory_order_consume:
- return CDS_ATOMIC::memory_order_relaxed;
- case CDS_ATOMIC::memory_order_acq_rel:
- return CDS_ATOMIC::memory_order_release;
+ case atomics::memory_order_acquire:
+ case atomics::memory_order_consume:
+ return atomics::memory_order_relaxed;
+ case atomics::memory_order_acq_rel:
+ return atomics::memory_order_release;
default:
return order;
}
}
- static inline CDS_ATOMIC::memory_order convert_to_load_order( CDS_ATOMIC::memory_order order )
+ static inline atomics::memory_order convert_to_load_order( atomics::memory_order order )
{
switch ( order ) {
- case CDS_ATOMIC::memory_order_release:
- return CDS_ATOMIC::memory_order_relaxed;
- case CDS_ATOMIC::memory_order_acq_rel:
- return CDS_ATOMIC::memory_order_acquire;
+ case atomics::memory_order_release:
+ return atomics::memory_order_relaxed;
+ case atomics::memory_order_acq_rel:
+ return atomics::memory_order_acquire;
default:
return order;
}
typedef size_t value_type;
typedef std::pair<key_type const, value_type> pair_type;
- CDS_ATOMIC::atomic<size_t> m_nInsThreadCount;
+ atomics::atomic<size_t> m_nInsThreadCount;
// Inserts keys from [0..N)
template <class Map>
}
}
- getTest().m_nInsThreadCount.fetch_sub( 1, CDS_ATOMIC::memory_order_acquire );
+ getTest().m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_acquire );
}
};
++m_nDeleteFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
++m_nDeleteFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
++m_nDeleteFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
++m_nDeleteFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
}
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
}
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
typedef InsertThread<Map> insert_thread;
typedef DeleteThread<Map> delete_thread;
- m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+ m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
CppUnitMini::ThreadPool pool( *this );
pool.add( new insert_thread( pool, testMap ), c_nInsThreadCount );
typedef DeleteThread<Map> delete_thread;
typedef ExtractThread< typename Map::gc, Map > extract_thread;
- m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+ m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
CppUnitMini::ThreadPool pool( *this );
pool.add( new insert_thread( pool, testMap ), c_nInsThreadCount );
struct value_type {
size_t nKey;
size_t nData;
- CDS_ATOMIC::atomic<size_t> nEnsureCall;
- CDS_ATOMIC::atomic<bool> bInitialized;
+ atomics::atomic<size_t> nEnsureCall;
+ atomics::atomic<bool> bInitialized;
cds::OS::ThreadId threadId ; // insert thread id
typedef cds::lock::Spinlock< cds::backoff::pause > lock_type;
value_type( value_type const& s )
: nKey(s.nKey)
, nData(s.nData)
- , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
- , bInitialized( s.bInitialized.load(CDS_ATOMIC::memory_order_relaxed) )
+ , nEnsureCall(s.nEnsureCall.load(atomics::memory_order_relaxed))
+ , bInitialized( s.bInitialized.load(atomics::memory_order_relaxed) )
, threadId( cds::OS::getCurrentThreadId() )
{}
{
nKey = v.nKey;
nData = v.nData;
- nEnsureCall.store( v.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
- bInitialized.store(v.bInitialized.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed);
+ nEnsureCall.store( v.nEnsureCall.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
+ bInitialized.store(v.bInitialized.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed);
return *this;
}
val.second.nData = val.first * 8;
++nTestFunctorRef;
- val.second.bInitialized.store( true, CDS_ATOMIC::memory_order_relaxed);
+ val.second.bInitialized.store( true, atomics::memory_order_relaxed);
}
};
++nCreated;
val.second.nKey = val.first;
val.second.nData = val.first * 8;
- val.second.bInitialized.store( true, CDS_ATOMIC::memory_order_relaxed);
+ val.second.bInitialized.store( true, atomics::memory_order_relaxed);
}
else {
- val.second.nEnsureCall.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ val.second.nEnsureCall.fetch_add( 1, atomics::memory_order_relaxed );
++nModified;
}
}
void operator ()( pair_type& item )
{
while ( true ) {
- if ( item.second.bInitialized.load( CDS_ATOMIC::memory_order_relaxed )) {
+ if ( item.second.bInitialized.load( atomics::memory_order_relaxed )) {
cds::lock::scoped_lock< typename value_type::lock_type> ac( item.second.m_access );
if ( m_cnt.nKeyExpected == item.second.nKey && m_cnt.nKeyExpected * 8 == item.second.nData )
}
m_fTime = m_Timer.duration() - m_fTime;
- getTest().m_nProducerCount.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+ getTest().m_nProducerCount.fetch_sub( 1, atomics::memory_order_release );
}
};
}
else {
++m_nPopEmpty;
- if ( getTest().m_nProducerCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 && m_Queue.empty() )
+ if ( getTest().m_nProducerCount.load( atomics::memory_order_acquire ) == 0 && m_Queue.empty() )
break;
}
}
protected:
size_t m_nThreadPushCount;
- CDS_ATOMIC::atomic<size_t> m_nProducerCount;
+ atomics::atomic<size_t> m_nProducerCount;
static CDS_CONSTEXPR_CONST size_t c_nBadConsumer = 0xbadc0ffe;
protected:
CppUnitMini::ThreadPool pool( *this );
- m_nProducerCount.store( s_nWriterThreadCount, CDS_ATOMIC::memory_order_release );
+ m_nProducerCount.store( s_nWriterThreadCount, atomics::memory_order_release );
// Writers must be first
pool.add( new Producer<Queue>( pool, testQueue ), s_nWriterThreadCount );
protected:
size_t m_nThreadPushCount;
- CDS_ATOMIC::atomic<size_t> m_nWriterDone;
+ atomics::atomic<size_t> m_nWriterDone;
protected:
template <class Queue>
typedef key_thread key_type;
typedef size_t value_type;
- CDS_ATOMIC::atomic<size_t> m_nInsThreadCount;
+ atomics::atomic<size_t> m_nInsThreadCount;
// Inserts keys from [0..N)
template <class Set>
}
}
- getTest().m_nInsThreadCount.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+ getTest().m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_release );
}
};
++m_nDeleteFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
++m_nDeleteFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
++m_nExtractFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
++m_nExtractFailed;
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
xp.release();
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
xp.release();
}
}
- if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 )
+ if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 )
break;
}
}
typedef InsertThread<Set> insert_thread;
typedef DeleteThread<Set> delete_thread;
- m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+ m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
CppUnitMini::ThreadPool pool( *this );
pool.add( new insert_thread( pool, testSet ), c_nInsThreadCount );
typedef DeleteThread<Set> delete_thread;
typedef ExtractThread< typename Set::gc, Set > extract_thread;
- m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release );
+ m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release );
CppUnitMini::ThreadPool pool( *this );
pool.add( new insert_thread( pool, testSet ), c_nInsThreadCount );
struct value_type {
size_t nKey;
size_t nData;
- CDS_ATOMIC::atomic<size_t> nEnsureCall;
+ atomics::atomic<size_t> nEnsureCall;
bool volatile bInitialized;
cds::OS::ThreadId threadId ; // insert thread id
value_type( value_type const& s )
: nKey(s.nKey)
, nData(s.nData)
- , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed))
+ , nEnsureCall(s.nEnsureCall.load(atomics::memory_order_relaxed))
, bInitialized( s.bInitialized )
, threadId( cds::OS::getCurrentThreadId() )
{}
{
nKey = v.nKey;
nData = v.nData;
- nEnsureCall.store( v.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed );
+ nEnsureCall.store( v.nEnsureCall.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed );
bInitialized = v.bInitialized;
return *this;
++nCreated;
}
else {
- val.val.nEnsureCall.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed );
+ val.val.nEnsureCall.fetch_add( 1, atomics::memory_order_relaxed );
++nModified;
}
}
class IntrusiveStack_PushPop: public CppUnitMini::TestCase
{
- CDS_ATOMIC::atomic<size_t> m_nWorkingProducers;
+ atomics::atomic<size_t> m_nWorkingProducers;
static CDS_CONSTEXPR_CONST size_t c_nValArraySize = 1024;
static CDS_CONSTEXPR_CONST size_t c_nBadConsumer = 0xbadc0ffe;
++m_nPushError;
}
- getTest().m_nWorkingProducers.fetch_sub( 1, CDS_ATOMIC::memory_order_release );
+ getTest().m_nWorkingProducers.fetch_sub( 1, atomics::memory_order_release );
}
};
m_nDirtyPop = 0;
memset( m_arrPop, 0, sizeof(m_arrPop));
- while ( !(getTest().m_nWorkingProducers.load(CDS_ATOMIC::memory_order_acquire) == 0 && m_Stack.empty()) ) {
+ while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) {
typename Stack::value_type * p = m_Stack.pop();
if ( p ) {
p->nConsumer = m_nThreadNo;
template <class Stack>
void test( Stack& testStack, value_array<typename Stack::value_type>& arrValue )
{
- m_nWorkingProducers.store( s_nPushThreadCount, CDS_ATOMIC::memory_order_release );
+ m_nWorkingProducers.store( s_nPushThreadCount, atomics::memory_order_release );
size_t const nPushCount = s_nStackSize / s_nPushThreadCount;
typename Stack::value_type * pValStart = arrValue.get();
class Stack_PushPop: public CppUnitMini::TestCase
{
- CDS_ATOMIC::atomic<size_t> m_nWorkingProducers;
+ atomics::atomic<size_t> m_nWorkingProducers;
static size_t const c_nValArraySize = 1024;
template <class Stack>
}
- getTest().m_nWorkingProducers.fetch_sub(1, CDS_ATOMIC::memory_order_release);
+ getTest().m_nWorkingProducers.fetch_sub(1, atomics::memory_order_release);
}
};
memset( m_arrPop, 0, sizeof(m_arrPop));
SimpleValue v;
- while ( !(getTest().m_nWorkingProducers.load(CDS_ATOMIC::memory_order_acquire) == 0 && m_Stack.empty()) ) {
+ while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) {
if ( m_Stack.pop( v )) {
++m_nPopCount;
if ( v.nNo < sizeof(m_arrPop)/sizeof(m_arrPop[0]) )
template <class Stack>
void test( Stack& testStack )
{
- m_nWorkingProducers.store(s_nPushThreadCount, CDS_ATOMIC::memory_order_release);
+ m_nWorkingProducers.store(s_nPushThreadCount, atomics::memory_order_release);
size_t const nPushCount = s_nStackSize / s_nPushThreadCount;
CppUnitMini::ThreadPool pool( *this );