From 785a8577b9d9ad4988c494055a9a95f1cbf62d65 Mon Sep 17 00:00:00 2001 From: khizmax Date: Sat, 20 Sep 2014 20:40:56 +0400 Subject: [PATCH] Replace CDS_ATOMIC with namespace atomics Rename namespace cds::cxx11_atomics to cds::cxx11_atomic --- cds/algo/flat_combining.h | 18 +- cds/compiler/cxx11_atomic.h | 8 +- cds/compiler/gcc/amd64/cxx11_atomic.h | 6 +- cds/compiler/gcc/ia64/cxx11_atomic.h | 4 +- cds/compiler/gcc/sparc/cxx11_atomic.h | 4 +- cds/compiler/gcc/x86/cxx11_atomic.h | 4 +- cds/compiler/vc/amd64/cxx11_atomic.h | 4 +- cds/compiler/vc/x86/cxx11_atomic.h | 4 +- cds/container/vyukov_mpmc_cycle_queue.h | 2 +- cds/cxx11_atomic.h | 56 +-- cds/details/defs.h | 2 +- cds/details/marked_ptr.h | 2 +- cds/gc/hp_decl.h | 42 +- cds/gc/hrc/details/hrc_retired.h | 22 +- cds/gc/hrc/hrc.h | 28 +- cds/gc/hrc_decl.h | 92 ++-- cds/gc/hzp/details/hp_alloc.h | 10 +- cds/gc/hzp/hzp.h | 12 +- cds/gc/ptb/ptb.h | 142 +++--- cds/gc/ptb_decl.h | 46 +- cds/intrusive/basket_queue.h | 10 +- cds/intrusive/cuckoo_set.h | 24 +- cds/intrusive/details/dummy_node_holder.h | 2 +- cds/intrusive/details/ellen_bintree_base.h | 6 +- cds/intrusive/ellen_bintree_impl.h | 32 +- cds/intrusive/ellen_bintree_rcu.h | 30 +- cds/intrusive/lazy_list_base.h | 4 +- cds/intrusive/lazy_list_hrc.h | 12 +- cds/intrusive/lazy_list_nogc.h | 2 +- cds/intrusive/lazy_list_rcu.h | 6 +- cds/intrusive/michael_deque.h | 12 +- cds/intrusive/michael_list_base.h | 2 +- cds/intrusive/michael_list_hrc.h | 10 +- cds/intrusive/michael_list_impl.h | 8 +- cds/intrusive/michael_list_nogc.h | 4 +- cds/intrusive/michael_list_rcu.h | 8 +- cds/intrusive/moir_queue.h | 4 +- cds/intrusive/msqueue.h | 10 +- cds/intrusive/optimistic_queue.h | 8 +- cds/intrusive/segmented_queue.h | 26 +- cds/intrusive/single_link_struct.h | 12 +- cds/intrusive/skip_list_base.h | 20 +- cds/intrusive/skip_list_hrc.h | 16 +- cds/intrusive/skip_list_impl.h | 30 +- cds/intrusive/skip_list_nogc.h | 22 +- cds/intrusive/skip_list_rcu.h | 44 +- cds/intrusive/split_list.h | 8 +- cds/intrusive/split_list_base.h | 8 +- cds/intrusive/split_list_nogc.h | 8 +- cds/intrusive/split_list_rcu.h | 8 +- cds/intrusive/striped_set/striping_policy.h | 20 +- cds/intrusive/treiber_stack.h | 22 +- cds/intrusive/tsigas_cycle_queue.h | 16 +- cds/lock/spinlock.h | 34 +- cds/memory/michael/allocator.h | 72 +-- cds/memory/michael/osalloc_stat.h | 24 +- cds/memory/michael/procheap_stat.h | 106 ++-- cds/opt/options.h | 24 +- cds/refcounter.h | 10 +- cds/threading/details/_common.h | 4 +- cds/urcu/details/base.h | 38 +- cds/urcu/details/gp.h | 24 +- cds/urcu/details/gp_decl.h | 8 +- cds/urcu/details/gpb.h | 14 +- cds/urcu/details/gpi.h | 4 +- cds/urcu/details/gpt.h | 14 +- cds/urcu/details/sh.h | 40 +- cds/urcu/details/sh_decl.h | 12 +- cds/urcu/details/sig_buffered.h | 12 +- cds/urcu/details/sig_threaded.h | 12 +- src/hrc_gc.cpp | 74 +-- src/hzp_gc.cpp | 58 +-- src/init.cpp | 10 +- src/ptb_gc.cpp | 16 +- tests/cppunit/thread.h | 6 +- tests/test-hdr/misc/cxx11_atomic_class.cpp | 142 +++--- tests/test-hdr/misc/cxx11_atomic_func.cpp | 456 +++++++++--------- .../misc/cxx11_convert_memory_order.h | 22 +- tests/unit/map2/map_delodd.cpp | 20 +- tests/unit/map2/map_insdel_func.cpp | 20 +- .../queue/intrusive_queue_reader_writer.cpp | 8 +- tests/unit/queue/queue_reader_writer.cpp | 2 +- tests/unit/set2/set_delodd.cpp | 20 +- tests/unit/set2/set_insdel_func.h | 8 +- tests/unit/stack/stack_intrusive_pushpop.cpp | 8 +- tests/unit/stack/stack_pushpop.cpp | 8 +- 86 files changed, 1146 insertions(+), 1146 deletions(-) diff --git a/cds/algo/flat_combining.h b/cds/algo/flat_combining.h index 9663d39d..ccd7f585 100644 --- a/cds/algo/flat_combining.h +++ b/cds/algo/flat_combining.h @@ -93,10 +93,10 @@ namespace cds { namespace algo { Each data structure based on flat combining contains a class derived from \p %publication_record */ struct publication_record { - CDS_ATOMIC::atomic nRequest; ///< Request field (depends on data structure) - CDS_ATOMIC::atomic nState; ///< Record state: inactive, active, removed + atomics::atomic nRequest; ///< Request field (depends on data structure) + atomics::atomic nState; ///< Record state: inactive, active, removed unsigned int nAge; ///< Age of the record - CDS_ATOMIC::atomic pNext; ///< Next record in publication list + atomics::atomic pNext; ///< Next record in publication list void * pOwner; ///< [internal data] Pointer to \ref kernel object that manages the publication list /// Initializes publication record @@ -111,13 +111,13 @@ namespace cds { namespace algo { /// Returns the value of \p nRequest field unsigned int op() const { - return nRequest.load( CDS_ATOMIC::memory_order_relaxed ); + return nRequest.load( atomics::memory_order_relaxed ); } /// Checks if the operation is done bool is_done() const { - return nRequest.load( CDS_ATOMIC::memory_order_relaxed ) == req_Response; + return nRequest.load( atomics::memory_order_relaxed ) == req_Response; } }; @@ -543,7 +543,7 @@ namespace cds { namespace algo { if ( pRec->nState.load(memory_model::memory_order_relaxed) == active && pRec->pOwner ) { // record is active and kernel is alive unsigned int nState = active; - pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + pRec->nState.compare_exchange_strong( nState, removed, memory_model::memory_order_release, atomics::memory_order_relaxed ); } else { // record is not in publication list or kernel already deleted @@ -577,7 +577,7 @@ namespace cds { namespace algo { pRec->pNext = p; // Failed CAS changes p } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_release, atomics::memory_order_relaxed )); m_Stat.onActivatPubRecord(); } } @@ -746,7 +746,7 @@ namespace cds { namespace algo { if ( pPrev ) { publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); if ( pPrev->pNext.compare_exchange_strong( p, pNext, - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { p->nState.store( inactive, memory_model::memory_order_release ); p = pNext; @@ -767,7 +767,7 @@ namespace cds { namespace algo { if ( pPrev ) { publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); if ( pPrev->pNext.compare_exchange_strong( p, pNext, - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { cxx11_allocator().Delete( static_cast( p )); m_Stat.onDeletePubRecord(); diff --git a/cds/compiler/cxx11_atomic.h b/cds/compiler/cxx11_atomic.h index 96ab4399..9ad958a6 100644 --- a/cds/compiler/cxx11_atomic.h +++ b/cds/compiler/cxx11_atomic.h @@ -7,7 +7,7 @@ #include #include -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { typedef enum memory_order { memory_order_relaxed, memory_order_consume, @@ -17,7 +17,7 @@ namespace cds { namespace cxx11_atomics { memory_order_seq_cst } memory_order; -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic #if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) @@ -51,7 +51,7 @@ namespace cds { namespace cxx11_atomics { // In C++11, make_unsigned is declared in #include // for make_unsigned -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { // forward declarations template @@ -2271,7 +2271,7 @@ namespace cds { namespace cxx11_atomics { platform::signal_fence( order ); } -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef __CDS_COMPILER_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/amd64/cxx11_atomic.h b/cds/compiler/gcc/amd64/cxx11_atomic.h index b79befcc..fc701e8f 100644 --- a/cds/compiler/gcc/amd64/cxx11_atomic.h +++ b/cds/compiler/gcc/amd64/cxx11_atomic.h @@ -7,11 +7,11 @@ #include //@cond -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 { # ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT // primitives up to 32bit + fences - using namespace cds::cxx11_atomics::platform::gcc::x86; + using namespace cds::cxx11_atomic::platform::gcc::x86; # endif //----------------------------------------------------------------------------- @@ -201,7 +201,7 @@ namespace cds { namespace cxx11_atomics { #endif } // namespace platform -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef __CDS_COMPILER_GCC_AMD64_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/ia64/cxx11_atomic.h b/cds/compiler/gcc/ia64/cxx11_atomic.h index 12582c2d..64867a33 100644 --- a/cds/compiler/gcc/ia64/cxx11_atomic.h +++ b/cds/compiler/gcc/ia64/cxx11_atomic.h @@ -12,7 +12,7 @@ #include //@cond -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace ia64 { static inline void itanium_full_fence() CDS_NOEXCEPT @@ -647,7 +647,7 @@ namespace cds { namespace cxx11_atomics { using namespace gcc::ia64; #endif } // namespace platform -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef __CDS_COMPILER_GCC_IA64_CXX11_ATOMIC_H diff --git a/cds/compiler/gcc/sparc/cxx11_atomic.h b/cds/compiler/gcc/sparc/cxx11_atomic.h index f160e909..771540d2 100644 --- a/cds/compiler/gcc/sparc/cxx11_atomic.h +++ b/cds/compiler/gcc/sparc/cxx11_atomic.h @@ -40,7 +40,7 @@ #define CDS_SPARC_MB_SEQ_CST CDS_SPARC_MB_FULL //@cond -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace Sparc { static inline void fence_before( memory_order order ) CDS_NOEXCEPT @@ -596,7 +596,7 @@ namespace cds { namespace cxx11_atomics { using namespace gcc::Sparc; #endif } // namespace platform -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #undef CDS_SPARC_MB_ACQ diff --git a/cds/compiler/gcc/x86/cxx11_atomic.h b/cds/compiler/gcc/x86/cxx11_atomic.h index 52fc7398..6323291a 100644 --- a/cds/compiler/gcc/x86/cxx11_atomic.h +++ b/cds/compiler/gcc/x86/cxx11_atomic.h @@ -7,7 +7,7 @@ #include //@cond -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 { //----------------------------------------------------------------------------- @@ -178,7 +178,7 @@ namespace cds { namespace cxx11_atomics { using namespace gcc::x86; #endif } // namespace platform -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef __CDS_COMPILER_GCC_X86_CXX11_ATOMIC_H diff --git a/cds/compiler/vc/amd64/cxx11_atomic.h b/cds/compiler/vc/amd64/cxx11_atomic.h index 29d8b859..9115c2c9 100644 --- a/cds/compiler/vc/amd64/cxx11_atomic.h +++ b/cds/compiler/vc/amd64/cxx11_atomic.h @@ -32,7 +32,7 @@ #endif //@cond -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 { static inline void fence_before( memory_order order ) CDS_NOEXCEPT @@ -578,7 +578,7 @@ namespace cds { namespace cxx11_atomics { using namespace vc::amd64; #endif } // namespace platform -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef __CDS_COMPILER_VC_AMD64_CXX11_ATOMIC_H diff --git a/cds/compiler/vc/x86/cxx11_atomic.h b/cds/compiler/vc/x86/cxx11_atomic.h index b11d50f7..fbaea366 100644 --- a/cds/compiler/vc/x86/cxx11_atomic.h +++ b/cds/compiler/vc/x86/cxx11_atomic.h @@ -27,7 +27,7 @@ #endif //@cond -namespace cds { namespace cxx11_atomics { +namespace cds { namespace cxx11_atomic { namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace vc { CDS_CXX11_INLINE_NAMESPACE namespace x86 { static inline void fence_before( memory_order order ) CDS_NOEXCEPT @@ -550,7 +550,7 @@ namespace cds { namespace cxx11_atomics { using namespace vc::x86; #endif } // namespace platform -}} // namespace cds::cxx11_atomics +}} // namespace cds::cxx11_atomic //@endcond #endif // #ifndef __CDS_COMPILER_VC_X86_CXX11_ATOMIC_H diff --git a/cds/container/vyukov_mpmc_cycle_queue.h b/cds/container/vyukov_mpmc_cycle_queue.h index 0b871d13..8f5f8e57 100644 --- a/cds/container/vyukov_mpmc_cycle_queue.h +++ b/cds/container/vyukov_mpmc_cycle_queue.h @@ -98,7 +98,7 @@ namespace cds { namespace container { protected: //@cond - typedef CDS_ATOMIC::atomic sequence_type; + typedef atomics::atomic sequence_type; struct cell_type { sequence_type sequence; diff --git a/cds/cxx11_atomic.h b/cds/cxx11_atomic.h index 8508fd8d..5eb5e482 100644 --- a/cds/cxx11_atomic.h +++ b/cds/cxx11_atomic.h @@ -38,8 +38,8 @@ namespace cds { Using \p CDS_ATOMIC macro you may call \ library functions and classes, for example: \code - CDS_ATOMIC::atomic atomInt; - CDS_ATOMIC::atomic_store_explicit( &atomInt, 0, CDS_ATOMIC::memory_order_release ); + atomics::atomic atomInt; + atomics::atomic_store_explicit( &atomInt, 0, atomics::memory_order_release ); \endcode \par Microsoft Visual C++ @@ -86,8 +86,8 @@ namespace cds { You can compile \p libcds and your projects with boost.atomic specifying \p -DCDS_USE_BOOST_ATOMIC in compiler's command line. */ -namespace cxx11_atomics { -}} // namespace cds::cxx11_atomics +namespace cxx11_atomic { +}} // namespace cds::cxx11_atomic //@cond #if defined(CDS_USE_BOOST_ATOMIC) @@ -95,7 +95,7 @@ namespace cxx11_atomics { # include # if BOOST_VERSION >= 105400 # include -# define CDS_ATOMIC boost + namespace atomics = boost; # define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost { # define CDS_CXX11_ATOMIC_END_NAMESPACE } # else @@ -104,13 +104,13 @@ namespace cxx11_atomics { #elif defined(CDS_USE_LIBCDS_ATOMIC) // libcds atomic # include -# define CDS_ATOMIC cds::cxx11_atomics -# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomics { + namespace atomics = cds::cxx11_atomic; +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomic { # define CDS_CXX11_ATOMIC_END_NAMESPACE }} #else // Compiler provided C++11 atomic # include -# define CDS_ATOMIC std + namespace atomics = std; # define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std { # define CDS_CXX11_ATOMIC_END_NAMESPACE } #endif @@ -132,7 +132,7 @@ namespace cds { class event_counter { //@cond - CDS_ATOMIC::atomic_size_t m_counter; + atomics::atomic_size_t m_counter; //@endcond public: @@ -152,7 +152,7 @@ namespace cds { value_type n //< new value of the counter ) CDS_NOEXCEPT { - m_counter.exchange( n, CDS_ATOMIC::memory_order_relaxed ); + m_counter.exchange( n, atomics::memory_order_relaxed ); return n; } @@ -164,7 +164,7 @@ namespace cds { size_t n ///< addendum ) CDS_NOEXCEPT { - return m_counter.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ) + n; + return m_counter.fetch_add( n, atomics::memory_order_relaxed ) + n; } /// Substraction @@ -175,47 +175,47 @@ namespace cds { size_t n ///< subtrahend ) CDS_NOEXCEPT { - return m_counter.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed ) - n; + return m_counter.fetch_sub( n, atomics::memory_order_relaxed ) - n; } /// Get current value of the counter operator size_t () const CDS_NOEXCEPT { - return m_counter.load( CDS_ATOMIC::memory_order_relaxed ); + return m_counter.load( atomics::memory_order_relaxed ); } /// Preincrement size_t operator ++() CDS_NOEXCEPT { - return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1; + return m_counter.fetch_add( 1, atomics::memory_order_relaxed ) + 1; } /// Postincrement size_t operator ++(int) CDS_NOEXCEPT { - return m_counter.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + return m_counter.fetch_add( 1, atomics::memory_order_relaxed ); } /// Predecrement size_t operator --() CDS_NOEXCEPT { - return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) - 1; + return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ) - 1; } /// Postdecrement size_t operator --(int) CDS_NOEXCEPT { - return m_counter.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ); + return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ); } /// Get current value of the counter size_t get() const CDS_NOEXCEPT { - return m_counter.load( CDS_ATOMIC::memory_order_relaxed ); + return m_counter.load( atomics::memory_order_relaxed ); } /// Resets the counter to 0 void reset() CDS_NOEXCEPT { - m_counter.store( 0, CDS_ATOMIC::memory_order_release ); + m_counter.store( 0, atomics::memory_order_release ); } }; @@ -228,7 +228,7 @@ namespace cds { class item_counter { public: - typedef CDS_ATOMIC::atomic_size_t atomic_type ; ///< atomic type used + typedef atomics::atomic_size_t atomic_type ; ///< atomic type used typedef size_t counter_type ; ///< Integral item counter type (size_t) private: @@ -243,7 +243,7 @@ namespace cds { {} /// Returns current value of the counter - counter_type value(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) const + counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const { return m_Counter.load( order ); } @@ -267,13 +267,13 @@ namespace cds { } /// Increments the counter. Semantics: postincrement - counter_type inc(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed ) + counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed ) { return m_Counter.fetch_add( 1, order ); } /// Decrements the counter. Semantics: postdecrement - counter_type dec(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) + counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed) { return m_Counter.fetch_sub( 1, order ); } @@ -301,7 +301,7 @@ namespace cds { } /// Resets count to 0 - void reset(CDS_ATOMIC::memory_order order = CDS_ATOMIC::memory_order_relaxed) + void reset(atomics::memory_order order = atomics::memory_order_relaxed) { m_Counter.store( 0, order ); } @@ -320,7 +320,7 @@ namespace cds { typedef size_t counter_type ; ///< Counter type public: /// Returns 0 - counter_type value(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) const + counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) const { return 0; } @@ -332,13 +332,13 @@ namespace cds { } /// Dummy increment. Always returns 0 - size_t inc(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) + size_t inc(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) { return 0; } /// Dummy increment. Always returns 0 - size_t dec(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) + size_t dec(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) { return 0; } @@ -366,7 +366,7 @@ namespace cds { } /// Dummy function - void reset(CDS_ATOMIC::memory_order /*order*/ = CDS_ATOMIC::memory_order_relaxed) + void reset(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) {} }; diff --git a/cds/details/defs.h b/cds/details/defs.h index 7260b893..b74820f9 100644 --- a/cds/details/defs.h +++ b/cds/details/defs.h @@ -34,7 +34,7 @@ schema used. However, any implementation supports common interface for the type of data structure. To implement any lock-free data structure, two things are needed: - - atomic operation library conforming with C++11 memory model. The libcds has such feature, see cds::cxx11_atomics namespace for + - atomic operation library conforming with C++11 memory model. The libcds has such feature, see cds::cxx11_atomic namespace for details and compiler-specific information. - safe memory reclamation (SMR) or garbage collecting (GC) algorithm. The libcds has an implementation of several well-known SMR algos, see below. diff --git a/cds/details/marked_ptr.h b/cds/details/marked_ptr.h index 24eed5d3..b656f020 100644 --- a/cds/details/marked_ptr.h +++ b/cds/details/marked_ptr.h @@ -257,7 +257,7 @@ CDS_CXX11_ATOMIC_BEGIN_NAMESPACE { private: typedef cds::details::marked_ptr marked_ptr; - typedef CDS_ATOMIC::atomic atomic_impl; + typedef atomics::atomic atomic_impl; atomic_impl m_atomic; public: diff --git a/cds/gc/hp_decl.h b/cds/gc/hp_decl.h index 6c00f4c5..d2c98b74 100644 --- a/cds/gc/hp_decl.h +++ b/cds/gc/hp_decl.h @@ -33,24 +33,24 @@ namespace cds { namespace gc { /** @headerfile cds/gc/hp.h */ - template using atomic_ref = CDS_ATOMIC::atomic; + template using atomic_ref = atomics::atomic; /// Atomic marked pointer /** @headerfile cds/gc/hp.h */ - template using atomic_marked_ptr = CDS_ATOMIC::atomic; + template using atomic_marked_ptr = atomics::atomic; /// Atomic type /** @headerfile cds/gc/hp.h */ - template using atomic_type = CDS_ATOMIC::atomic; + template using atomic_type = atomics::atomic; #else template - class atomic_ref: public CDS_ATOMIC::atomic + class atomic_ref: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_ref() = default; @@ -65,9 +65,9 @@ namespace cds { namespace gc { }; template - class atomic_type: public CDS_ATOMIC::atomic + class atomic_type: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_type() = default; @@ -82,9 +82,9 @@ namespace cds { namespace gc { }; template - class atomic_marked_ptr: public CDS_ATOMIC::atomic + class atomic_marked_ptr: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default; @@ -170,13 +170,13 @@ namespace cds { namespace gc { to the HP slot repeatedly until the guard's value equals \p toGuard */ template - T protect( CDS_ATOMIC::atomic const& toGuard ) + T protect( atomics::atomic const& toGuard ) { - T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pCur = toGuard.load(atomics::memory_order_relaxed); T pRet; do { pRet = assign( pCur ); - pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } @@ -199,14 +199,14 @@ namespace cds { namespace gc { Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. */ template - T protect( CDS_ATOMIC::atomic const& toGuard, Func f ) + T protect( atomics::atomic const& toGuard, Func f ) { - T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pCur = toGuard.load(atomics::memory_order_relaxed); T pRet; do { pRet = pCur; assign( f( pCur ) ); - pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } @@ -297,12 +297,12 @@ namespace cds { namespace gc { to the slot \p nIndex repeatedly until the guard's value equals \p toGuard */ template - T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard ) + T protect(size_t nIndex, atomics::atomic const& toGuard ) { T pRet; do { - pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_acquire) ); - } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed)); + pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire) ); + } while ( pRet != toGuard.load(atomics::memory_order_relaxed)); return pRet; } @@ -325,12 +325,12 @@ namespace cds { namespace gc { Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. */ template - T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard, Func f ) + T protect(size_t nIndex, atomics::atomic const& toGuard, Func f ) { T pRet; do { - assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_acquire) )); - } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_relaxed)); + assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire) )); + } while ( pRet != toGuard.load(atomics::memory_order_relaxed)); return pRet; } diff --git a/cds/gc/hrc/details/hrc_retired.h b/cds/gc/hrc/details/hrc_retired.h index cee87104..aa2adcd4 100644 --- a/cds/gc/hrc/details/hrc_retired.h +++ b/cds/gc/hrc/details/hrc_retired.h @@ -16,11 +16,11 @@ namespace cds { namespace gc { namespace hrc { /// Retired node descriptor struct retired_node { - CDS_ATOMIC::atomic m_pNode ; ///< node to destroy + atomics::atomic m_pNode ; ///< node to destroy free_retired_ptr_func m_funcFree ; ///< pointer to the destructor function size_t m_nNextFree ; ///< Next free item in retired array - CDS_ATOMIC::atomic m_nClaim ; ///< Access to reclaimed node - CDS_ATOMIC::atomic m_bDone ; ///< the record is in work (concurrent access flag) + atomics::atomic m_nClaim ; ///< Access to reclaimed node + atomics::atomic m_bDone ; ///< the record is in work (concurrent access flag) /// Default ctor retired_node() @@ -45,16 +45,16 @@ namespace cds { namespace gc { namespace hrc { /// Compares two \ref retired_node static bool Less( const retired_node& p1, const retired_node& p2 ) { - return p1.m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) < p2.m_pNode.load( CDS_ATOMIC::memory_order_relaxed ); + return p1.m_pNode.load( atomics::memory_order_relaxed ) < p2.m_pNode.load( atomics::memory_order_relaxed ); } /// Assignment operator retired_node& set( ContainerNode * pNode, free_retired_ptr_func func ) { - m_bDone.store( false, CDS_ATOMIC::memory_order_relaxed ); - m_nClaim.store( 0, CDS_ATOMIC::memory_order_relaxed ); + m_bDone.store( false, atomics::memory_order_relaxed ); + m_nClaim.store( 0, atomics::memory_order_relaxed ); m_funcFree = func; - m_pNode.store( pNode, CDS_ATOMIC::memory_order_release ); + m_pNode.store( pNode, atomics::memory_order_release ); CDS_COMPILER_RW_BARRIER; return *this; } @@ -63,7 +63,7 @@ namespace cds { namespace gc { namespace hrc { void free() { assert( m_funcFree != nullptr ); - m_funcFree( m_pNode.load( CDS_ATOMIC::memory_order_relaxed )); + m_funcFree( m_pNode.load( atomics::memory_order_relaxed )); } }; @@ -116,7 +116,7 @@ namespace cds { namespace gc { namespace hrc { size_t nCount = 0; const size_t nCapacity = capacity(); for ( size_t i = 0; i < nCapacity; ++i ) { - if ( m_arr[i].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) != nullptr ) + if ( m_arr[i].m_pNode.load( atomics::memory_order_relaxed ) != nullptr ) ++nCount; } return nCount; @@ -128,7 +128,7 @@ namespace cds { namespace gc { namespace hrc { assert( !isFull()); size_t n = m_nFreeList; - assert( m_arr[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( m_arr[n].m_pNode.load( atomics::memory_order_relaxed ) == nullptr ); m_nFreeList = m_arr[n].m_nNextFree; CDS_DEBUG_DO( m_arr[n].m_nNextFree = m_nEndFreeList ; ) m_arr[n].set( p, pFunc ); @@ -138,7 +138,7 @@ namespace cds { namespace gc { namespace hrc { void pop( size_t n ) { assert( n < capacity() ); - m_arr[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_release ); + m_arr[n].m_pNode.store( nullptr, atomics::memory_order_release ); m_arr[n].m_nNextFree = m_nFreeList; m_nFreeList = n; } diff --git a/cds/gc/hrc/hrc.h b/cds/gc/hrc/hrc.h index 068adecd..a5e45e51 100644 --- a/cds/gc/hrc/hrc.h +++ b/cds/gc/hrc/hrc.h @@ -21,7 +21,7 @@ #if CDS_COMPILER == CDS_COMPILER_MSVC # pragma warning(push) -// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic' +// warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic' // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector' # pragma warning(disable: 4251) #endif @@ -75,8 +75,8 @@ namespace cds { namespace gc { friend class gc::HRC; unsigned_ref_counter m_RC ; ///< reference counter - CDS_ATOMIC::atomic m_bTrace ; ///< \p true - node is tracing by Scan - CDS_ATOMIC::atomic m_bDeleted ; ///< \p true - node is deleted + atomics::atomic m_bTrace ; ///< \p true - node is tracing by Scan + atomics::atomic m_bDeleted ; ///< \p true - node is deleted protected: //@cond @@ -106,12 +106,12 @@ namespace cds { namespace gc { /// Returns the mark whether the node is deleted bool isDeleted() const CDS_NOEXCEPT { - return m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ); + return m_bDeleted.load( atomics::memory_order_acquire ); } protected: //@cond - void clean( CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + void clean( atomics::memory_order order ) CDS_NOEXCEPT { m_bDeleted.store( false, order ); m_bTrace.store( false, order ); @@ -294,7 +294,7 @@ namespace cds { namespace gc { { thread_list_node * m_pNext ; ///< next list record ThreadGC * m_pOwner ; ///< Owner of record - CDS_ATOMIC::atomic m_idOwner ; ///< Id of thread owned; 0 - record is free + atomics::atomic m_idOwner ; ///< Id of thread owned; 0 - record is free bool m_bFree ; ///< Node is help-scanned //@cond @@ -309,13 +309,13 @@ namespace cds { namespace gc { ~thread_list_node() { assert( m_pOwner == nullptr ); - assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId ); + assert( m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::c_NullThreadId ); } //@endcond }; private: - CDS_ATOMIC::atomic m_pListHead ; ///< Head of thread list + atomics::atomic m_pListHead ; ///< Head of thread list static GarbageCollector * m_pGC ; ///< HRC garbage collector instance @@ -545,7 +545,7 @@ namespace cds { namespace gc { /// Retire (deferred delete) node \p pNode guarded by \p hp hazard pointer void retireNode( ContainerNode * pNode, details::HPGuard& hp, details::free_retired_ptr_func pFunc ) { - assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) ); + assert( !pNode->m_bDeleted.load( atomics::memory_order_relaxed ) ); assert( pNode == hp ); retireNode( pNode, pFunc ); @@ -555,10 +555,10 @@ namespace cds { namespace gc { /// Retire (deferred delete) node \p pNode. Do not use this function directly! void retireNode( ContainerNode * pNode, details::free_retired_ptr_func pFunc ) { - assert( !pNode->m_bDeleted.load( CDS_ATOMIC::memory_order_relaxed ) ); + assert( !pNode->m_bDeleted.load( atomics::memory_order_relaxed ) ); - pNode->m_bDeleted.store( true, CDS_ATOMIC::memory_order_release ); - pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + pNode->m_bDeleted.store( true, atomics::memory_order_release ); + pNode->m_bTrace.store( false, atomics::memory_order_release ); m_pDesc->m_arrRetired.push( pNode, pFunc ); @@ -580,8 +580,8 @@ namespace cds { namespace gc { details::retired_vector::iterator itEnd = m_pDesc->m_arrRetired.end(); for ( details::retired_vector::iterator it = m_pDesc->m_arrRetired.begin(); it != itEnd; ++it ) { details::retired_node& node = *it; - ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire); - if ( pNode && !node.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) + ContainerNode * pNode = node.m_pNode.load(atomics::memory_order_acquire); + if ( pNode && !node.m_bDone.load(atomics::memory_order_acquire) ) pNode->cleanUp( this ); } } diff --git a/cds/gc/hrc_decl.h b/cds/gc/hrc_decl.h index 8c1c6260..bd33362b 100644 --- a/cds/gc/hrc_decl.h +++ b/cds/gc/hrc_decl.h @@ -138,10 +138,10 @@ namespace cds { namespace gc { @headerfile cds/gc/hrc.h */ template - class atomic_ref: protected CDS_ATOMIC::atomic + class atomic_ref: protected atomics::atomic { //@cond - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; //@endcond public: //@cond @@ -158,26 +158,26 @@ namespace cds { namespace gc { //@endcond /// Read reference value - T * load( CDS_ATOMIC::memory_order order ) const CDS_NOEXCEPT + T * load( atomics::memory_order order ) const CDS_NOEXCEPT { return base_class::load( order ); } //@cond - T * load( CDS_ATOMIC::memory_order order ) const volatile CDS_NOEXCEPT + T * load( atomics::memory_order order ) const volatile CDS_NOEXCEPT { return base_class::load( order ); } //@endcond /// Store new value to reference - void store( T * pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + void store( T * pNew, atomics::memory_order order ) CDS_NOEXCEPT { before_store( pNew ); T * pOld = base_class::exchange( pNew, order ); after_store( pOld, pNew ); } //@cond - void store( T * pNew, CDS_ATOMIC::memory_order order ) volatile CDS_NOEXCEPT + void store( T * pNew, atomics::memory_order order ) volatile CDS_NOEXCEPT { before_store( pNew ); T * pOld = base_class::exchange( pNew, order ); @@ -191,7 +191,7 @@ namespace cds { namespace gc { \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type */ - bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT { before_cas( pNew ); bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail ); @@ -199,20 +199,20 @@ namespace cds { namespace gc { return bSuccess; } //@cond - bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT + bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT { before_cas( pNew ); bool bSuccess = base_class::compare_exchange_strong( pOld, pNew, mo_success, mo_fail ); after_cas( bSuccess, pOld, pNew ); return bSuccess; } - bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT { - return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed ); } - bool compare_exchange_strong( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT + bool compare_exchange_strong( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT { - return compare_exchange_strong( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + return compare_exchange_strong( pOld, pNew, mo_success, atomics::memory_order_relaxed ); } //@endcond @@ -222,7 +222,7 @@ namespace cds { namespace gc { \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type */ - bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT { before_cas( pNew ); bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail ); @@ -230,20 +230,20 @@ namespace cds { namespace gc { return bSuccess; } //@cond - bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) volatile CDS_NOEXCEPT + bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) volatile CDS_NOEXCEPT { before_cas( pNew ); bool bSuccess = base_class::compare_exchange_weak( pOld, pNew, mo_success, mo_fail ); after_cas( bSuccess, pOld, pNew ); return bSuccess; } - bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT { - return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed ); } - bool compare_exchange_weak( T *& pOld, T * pNew, CDS_ATOMIC::memory_order mo_success ) volatile CDS_NOEXCEPT + bool compare_exchange_weak( T *& pOld, T * pNew, atomics::memory_order mo_success ) volatile CDS_NOEXCEPT { - return compare_exchange_weak( pOld, pNew, mo_success, CDS_ATOMIC::memory_order_relaxed ); + return compare_exchange_weak( pOld, pNew, mo_success, atomics::memory_order_relaxed ); } //@endcond @@ -257,7 +257,7 @@ namespace cds { namespace gc { static void after_store( T * pOld, T * pNew ) CDS_NOEXCEPT { if ( pNew ) - pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + pNew->m_bTrace.store( false, atomics::memory_order_release ); if ( pOld ) --pOld->m_RC; } @@ -265,7 +265,7 @@ namespace cds { namespace gc { { if ( p ) { ++p->m_RC; - p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + p->m_bTrace.store( false, atomics::memory_order_release ); } } static void after_cas( bool bSuccess, T * pOld, T * pNew ) CDS_NOEXCEPT @@ -290,7 +290,7 @@ namespace cds { namespace gc { class atomic_marked_ptr { //@cond - CDS_ATOMIC::atomic< MarkedPtr > m_a; + atomics::atomic< MarkedPtr > m_a; //@endcond public: /// Marked pointer type @@ -316,13 +316,13 @@ namespace cds { namespace gc { /// Read reference value - marked_ptr load(CDS_ATOMIC::memory_order order) const CDS_NOEXCEPT + marked_ptr load(atomics::memory_order order) const CDS_NOEXCEPT { return m_a.load(order); } /// Store new value to reference - void store( marked_ptr pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + void store( marked_ptr pNew, atomics::memory_order order ) CDS_NOEXCEPT { before_store( pNew.ptr() ); marked_ptr pOld = m_a.exchange( pNew, order ); @@ -330,7 +330,7 @@ namespace cds { namespace gc { } /// Store new value to reference - void store( typename marked_ptr::pointer_type pNew, CDS_ATOMIC::memory_order order ) CDS_NOEXCEPT + void store( typename marked_ptr::pointer_type pNew, atomics::memory_order order ) CDS_NOEXCEPT { before_store( pNew ); marked_ptr pOld = m_a.exchange( marked_ptr(pNew), order ); @@ -343,7 +343,7 @@ namespace cds { namespace gc { \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type */ - bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT { before_cas( pNew.ptr() ); bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success, mo_fail ); @@ -351,7 +351,7 @@ namespace cds { namespace gc { return bSuccess; } //@cond - bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + bool compare_exchange_weak( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT { before_cas( pNew.ptr() ); bool bSuccess = m_a.compare_exchange_weak( pOld, pNew, mo_success ); @@ -366,7 +366,7 @@ namespace cds { namespace gc { \p T - class derived from \ref hrc_gc_HRC_container_node "container_node" type */ - bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success, CDS_ATOMIC::memory_order mo_fail ) CDS_NOEXCEPT + bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success, atomics::memory_order mo_fail ) CDS_NOEXCEPT { // protect pNew before_cas( pNew.ptr() ); @@ -375,7 +375,7 @@ namespace cds { namespace gc { return bSuccess; } //@cond - bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, CDS_ATOMIC::memory_order mo_success ) CDS_NOEXCEPT + bool compare_exchange_strong( marked_ptr& pOld, marked_ptr pNew, atomics::memory_order mo_success ) CDS_NOEXCEPT { before_cas( pNew.ptr() ); bool bSuccess = m_a.compare_exchange_strong( pOld, pNew, mo_success ); @@ -394,7 +394,7 @@ namespace cds { namespace gc { static void after_store( typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT { if ( pNew ) - pNew->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + pNew->m_bTrace.store( false, atomics::memory_order_release ); if ( pOld ) --pOld->m_RC; } @@ -402,7 +402,7 @@ namespace cds { namespace gc { { if ( p ) { ++p->m_RC; - p->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + p->m_bTrace.store( false, atomics::memory_order_release ); } } static void after_cas( bool bSuccess, typename marked_ptr::pointer_type pOld, typename marked_ptr::pointer_type pNew ) CDS_NOEXCEPT @@ -444,11 +444,11 @@ namespace cds { namespace gc { template T * protect( atomic_ref const& toGuard ) { - T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T * pCur = toGuard.load(atomics::memory_order_relaxed); T * pRet; do { pRet = assign( pCur ); - pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } @@ -473,12 +473,12 @@ namespace cds { namespace gc { template T * protect( atomic_ref const& toGuard, Func f ) { - T * pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T * pCur = toGuard.load(atomics::memory_order_relaxed); T * pRet; do { pRet = pCur; assign( f( pCur ) ); - pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } @@ -495,8 +495,8 @@ namespace cds { namespace gc { { typename atomic_marked_ptr::marked_ptr p; do { - assign( ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() ); - } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + assign( ( p = link.load(atomics::memory_order_relaxed)).ptr() ); + } while ( p != link.load(atomics::memory_order_acquire) ); return p; } @@ -522,9 +522,9 @@ namespace cds { namespace gc { { typename atomic_marked_ptr::marked_ptr pCur; do { - pCur = link.load(CDS_ATOMIC::memory_order_relaxed); + pCur = link.load(atomics::memory_order_relaxed); assign( f( pCur )); - } while ( pCur != link.load(CDS_ATOMIC::memory_order_acquire) ); + } while ( pCur != link.load(atomics::memory_order_acquire) ); return pCur; } @@ -615,8 +615,8 @@ namespace cds { namespace gc { { T * p; do { - p = assign( nIndex, link.load(CDS_ATOMIC::memory_order_relaxed) ); - } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + p = assign( nIndex, link.load(atomics::memory_order_relaxed) ); + } while ( p != link.load(atomics::memory_order_acquire) ); return p; } @@ -632,8 +632,8 @@ namespace cds { namespace gc { { typename atomic_marked_ptr::marked_ptr p; do { - assign( nIndex, ( p = link.load(CDS_ATOMIC::memory_order_relaxed)).ptr() ); - } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + assign( nIndex, ( p = link.load(atomics::memory_order_relaxed)).ptr() ); + } while ( p != link.load(atomics::memory_order_acquire) ); return p; } @@ -659,8 +659,8 @@ namespace cds { namespace gc { { T * pRet; do { - assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) )); - } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire)); + assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) )); + } while ( pRet != toGuard.load(atomics::memory_order_acquire)); return pRet; } @@ -687,9 +687,9 @@ namespace cds { namespace gc { { typename atomic_marked_ptr::marked_ptr p; do { - p = link.load(CDS_ATOMIC::memory_order_relaxed); + p = link.load(atomics::memory_order_relaxed); assign( nIndex, f( p ) ); - } while ( p != link.load(CDS_ATOMIC::memory_order_acquire) ); + } while ( p != link.load(atomics::memory_order_acquire) ); return p; } diff --git a/cds/gc/hzp/details/hp_alloc.h b/cds/gc/hzp/details/hp_alloc.h index 40561fba..ad69e3cf 100644 --- a/cds/gc/hzp/details/hp_alloc.h +++ b/cds/gc/hzp/details/hp_alloc.h @@ -23,13 +23,13 @@ namespace cds { \li HazardPointer - type of hazard pointer. It is \ref hazard_pointer for Michael's Hazard Pointer reclamation schema */ template - class HPGuardT: protected CDS_ATOMIC::atomic + class HPGuardT: protected atomics::atomic { public: typedef HazardPointer hazard_ptr ; ///< Hazard pointer type private: //@cond - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; //@endcond protected: @@ -52,7 +52,7 @@ namespace cds { T * operator =( T * p ) CDS_NOEXCEPT { // We use atomic store with explicit memory order because other threads may read this hazard pointer concurrently - base_class::store( reinterpret_cast(p), CDS_ATOMIC::memory_order_release ); + base_class::store( reinterpret_cast(p), atomics::memory_order_release ); return p; } @@ -79,7 +79,7 @@ namespace cds { */ hazard_ptr get() const CDS_NOEXCEPT { - return base_class::load( CDS_ATOMIC::memory_order_acquire ); + return base_class::load( atomics::memory_order_acquire ); } /// Clears HP @@ -89,7 +89,7 @@ namespace cds { void clear() CDS_NOEXCEPT { // memory order is not necessary here - base_class::store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + base_class::store( nullptr, atomics::memory_order_relaxed ); //CDS_COMPILER_RW_BARRIER; } }; diff --git a/cds/gc/hzp/hzp.h b/cds/gc/hzp/hzp.h index 27d1cec7..92eb84d3 100644 --- a/cds/gc/hzp/hzp.h +++ b/cds/gc/hzp/hzp.h @@ -16,7 +16,7 @@ #if CDS_COMPILER == CDS_COMPILER_MSVC # pragma warning(push) - // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomics::atomic' + // warning C4251: 'cds::gc::hzp::GarbageCollector::m_pListHead' : class 'cds::cxx11_atomic::atomic' // needs to have dll-interface to be used by clients of class 'cds::gc::hzp::GarbageCollector' # pragma warning(disable: 4251) #endif @@ -208,8 +208,8 @@ namespace cds { struct hplist_node: public details::HPRec { hplist_node * m_pNextNode ; ///< next hazard ptr record in list - CDS_ATOMIC::atomic m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned) - CDS_ATOMIC::atomic m_bFree ; ///< true if record if free (not owned) + atomics::atomic m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned) + atomics::atomic m_bFree ; ///< true if record if free (not owned) //@cond hplist_node( const GarbageCollector& HzpMgr ) @@ -221,13 +221,13 @@ namespace cds { ~hplist_node() { - assert( m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == OS::c_NullThreadId ); - assert( m_bFree.load(CDS_ATOMIC::memory_order_relaxed) ); + assert( m_idOwner.load( atomics::memory_order_relaxed ) == OS::c_NullThreadId ); + assert( m_bFree.load(atomics::memory_order_relaxed) ); } //@endcond }; - CDS_ATOMIC::atomic m_pListHead ; ///< Head of GC list + atomics::atomic m_pListHead ; ///< Head of GC list static GarbageCollector * m_pHZPManager ; ///< GC instance pointer diff --git a/cds/gc/ptb/ptb.h b/cds/gc/ptb/ptb.h index b1afc4ff..b4d716b2 100644 --- a/cds/gc/ptb/ptb.h +++ b/cds/gc/ptb/ptb.h @@ -75,7 +75,7 @@ namespace cds { namespace gc { typedef retired_ptr_node * handoff_ptr ; ///< trapped value type typedef void * guarded_ptr ; ///< type of value guarded - CDS_ATOMIC::atomic pPost ; ///< pointer guarded + atomics::atomic pPost ; ///< pointer guarded #if 0 typedef cds::SpinLock handoff_spin ; ///< type of spin-lock for accessing to \p pHandOff field @@ -83,8 +83,8 @@ namespace cds { namespace gc { handoff_ptr pHandOff ; ///< trapped pointer #endif - CDS_ATOMIC::atomic pGlobalNext ; ///< next item of global list of allocated guards - CDS_ATOMIC::atomic pNextFree ; ///< pointer to the next item in global or thread-local free-list + atomics::atomic pGlobalNext ; ///< next item of global list of allocated guards + atomics::atomic pNextFree ; ///< pointer to the next item in global or thread-local free-list guard_data * pThreadNext ; ///< next item of thread's local list of guards @@ -101,14 +101,14 @@ namespace cds { namespace gc { void init() { - pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + pPost.store( nullptr, atomics::memory_order_relaxed ); } //@endcond /// Checks if the guard is free, that is, it does not contain any pointer guarded bool isFree() const { - return pPost.load( CDS_ATOMIC::memory_order_acquire ) == nullptr; + return pPost.load( atomics::memory_order_acquire ) == nullptr; } }; @@ -118,8 +118,8 @@ namespace cds { namespace gc { { cds::details::Allocator m_GuardAllocator ; ///< guard allocator - CDS_ATOMIC::atomic m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field) - CDS_ATOMIC::atomic m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field) + atomics::atomic m_GuardList ; ///< Head of allocated guard list (linked by guard_data::pGlobalNext field) + atomics::atomic m_FreeGuardList ; ///< Head of free guard list (linked by guard_data::pNextFree field) SpinLock m_freeListLock ; ///< Access to m_FreeGuardList /* @@ -139,11 +139,11 @@ namespace cds { namespace gc { // Link guard to the list // m_GuardList is accumulated list and it cannot support concurrent deletion, // so, ABA problem is impossible for it - details::guard_data * pHead = m_GuardList.load( CDS_ATOMIC::memory_order_acquire ); + details::guard_data * pHead = m_GuardList.load( atomics::memory_order_acquire ); do { - pGuard->pGlobalNext.store( pHead, CDS_ATOMIC::memory_order_relaxed ); + pGuard->pGlobalNext.store( pHead, atomics::memory_order_relaxed ); // pHead is changed by compare_exchange_weak - } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_GuardList.compare_exchange_weak( pHead, pGuard, atomics::memory_order_release, atomics::memory_order_relaxed )); pGuard->init(); return pGuard; @@ -160,8 +160,8 @@ namespace cds { namespace gc { ~guard_allocator() { guard_data * pNext; - for ( guard_data * pData = m_GuardList.load( CDS_ATOMIC::memory_order_relaxed ); pData != nullptr; pData = pNext ) { - pNext = pData->pGlobalNext.load( CDS_ATOMIC::memory_order_relaxed ); + for ( guard_data * pData = m_GuardList.load( atomics::memory_order_relaxed ); pData != nullptr; pData = pNext ) { + pNext = pData->pGlobalNext.load( atomics::memory_order_relaxed ); m_GuardAllocator.Delete( pData ); } } @@ -174,9 +174,9 @@ namespace cds { namespace gc { { cds::lock::scoped_lock al( m_freeListLock ); - pGuard = m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed); + pGuard = m_FreeGuardList.load(atomics::memory_order_relaxed); if ( pGuard ) - m_FreeGuardList.store( pGuard->pNextFree.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); + m_FreeGuardList.store( pGuard->pNextFree.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed ); } if ( !pGuard ) return allocNew(); @@ -191,11 +191,11 @@ namespace cds { namespace gc { */ void free( guard_data * pGuard ) { - pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + pGuard->pPost.store( nullptr, atomics::memory_order_relaxed ); cds::lock::scoped_lock al( m_freeListLock ); - pGuard->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); - m_FreeGuardList.store( pGuard, CDS_ATOMIC::memory_order_relaxed ); + pGuard->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed ); + m_FreeGuardList.store( pGuard, atomics::memory_order_relaxed ); } /// Allocates list of guard @@ -218,11 +218,11 @@ namespace cds { namespace gc { // so, we can use relaxed memory order while ( --nCount ) { guard_data * p = alloc(); - pLast->pNextFree.store( pLast->pThreadNext = p, CDS_ATOMIC::memory_order_relaxed ); + pLast->pNextFree.store( pLast->pThreadNext = p, atomics::memory_order_relaxed ); pLast = p; } - pLast->pNextFree.store( pLast->pThreadNext = nullptr, CDS_ATOMIC::memory_order_relaxed ); + pLast->pNextFree.store( pLast->pThreadNext = nullptr, atomics::memory_order_relaxed ); return pHead; } @@ -239,21 +239,21 @@ namespace cds { namespace gc { guard_data * pLast = pList; while ( pLast->pThreadNext ) { - pLast->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + pLast->pPost.store( nullptr, atomics::memory_order_relaxed ); guard_data * p; - pLast->pNextFree.store( p = pLast->pThreadNext, CDS_ATOMIC::memory_order_relaxed ); + pLast->pNextFree.store( p = pLast->pThreadNext, atomics::memory_order_relaxed ); pLast = p; } cds::lock::scoped_lock al( m_freeListLock ); - pLast->pNextFree.store( m_FreeGuardList.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); - m_FreeGuardList.store( pList, CDS_ATOMIC::memory_order_relaxed ); + pLast->pNextFree.store( m_FreeGuardList.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed ); + m_FreeGuardList.store( pList, atomics::memory_order_relaxed ); } /// Returns the list's head of guards allocated guard_data * begin() { - return m_GuardList.load(CDS_ATOMIC::memory_order_acquire); + return m_GuardList.load(atomics::memory_order_acquire); } }; @@ -265,8 +265,8 @@ namespace cds { namespace gc { */ class retired_ptr_buffer { - CDS_ATOMIC::atomic m_pHead ; ///< head of buffer - CDS_ATOMIC::atomic m_nItemCount; ///< buffer's item count + atomics::atomic m_pHead ; ///< head of buffer + atomics::atomic m_nItemCount; ///< buffer's item count public: //@cond @@ -277,20 +277,20 @@ namespace cds { namespace gc { ~retired_ptr_buffer() { - assert( m_pHead.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( m_pHead.load( atomics::memory_order_relaxed ) == nullptr ); } //@endcond /// Pushes new node into the buffer. Returns current buffer size size_t push( retired_ptr_node& node ) { - retired_ptr_node * pHead = m_pHead.load(CDS_ATOMIC::memory_order_acquire); + retired_ptr_node * pHead = m_pHead.load(atomics::memory_order_acquire); do { node.m_pNext = pHead; // pHead is changed by compare_exchange_weak - } while ( !m_pHead.compare_exchange_weak( pHead, &node, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pHead.compare_exchange_weak( pHead, &node, atomics::memory_order_release, atomics::memory_order_relaxed )); - return m_nItemCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ) + 1; + return m_nItemCount.fetch_add( 1, atomics::memory_order_relaxed ) + 1; } /// Result of \ref ptb_gc_privatve "privatize" function. @@ -305,18 +305,18 @@ namespace cds { namespace gc { privatize_result privatize() { privatize_result res; - res.first = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_acq_rel ); + res.first = m_pHead.exchange( nullptr, atomics::memory_order_acq_rel ); // Item counter is needed only as a threshold for liberate function // So, we may clear the item counter without synchronization with m_pHead - res.second = m_nItemCount.exchange( 0, CDS_ATOMIC::memory_order_relaxed ); + res.second = m_nItemCount.exchange( 0, atomics::memory_order_relaxed ); return res; } /// Returns current size of buffer (approximate) size_t size() const { - return m_nItemCount.load(CDS_ATOMIC::memory_order_relaxed); + return m_nItemCount.load(atomics::memory_order_relaxed); } }; @@ -339,13 +339,13 @@ namespace cds { namespace gc { item items[m_nItemPerBlock] ; ///< item array }; - CDS_ATOMIC::atomic m_pBlockListHead ; ///< head of of allocated block list + atomics::atomic m_pBlockListHead ; ///< head of of allocated block list // To solve ABA problem we use epoch-based approach static const unsigned int c_nEpochCount = 4 ; ///< Max epoch count - CDS_ATOMIC::atomic m_nCurEpoch ; ///< Current epoch - CDS_ATOMIC::atomic m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch - CDS_ATOMIC::atomic m_pGlobalFreeHead ; ///< Head of unallocated item list + atomics::atomic m_nCurEpoch ; ///< Current epoch + atomics::atomic m_pEpochFree[c_nEpochCount] ; ///< List of free item per epoch + atomics::atomic m_pGlobalFreeHead ; ///< Head of unallocated item list cds::details::Allocator< block, Alloc > m_BlockAllocator ; ///< block allocator @@ -365,30 +365,30 @@ namespace cds { namespace gc { // link new block to block list { - block * pHead = m_pBlockListHead.load(CDS_ATOMIC::memory_order_acquire); + block * pHead = m_pBlockListHead.load(atomics::memory_order_acquire); do { pNew->pNext = pHead; // pHead is changed by compare_exchange_weak - } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pBlockListHead.compare_exchange_weak( pHead, pNew, atomics::memory_order_release, atomics::memory_order_relaxed )); } // link block's items to free list { - item * pHead = m_pGlobalFreeHead.load(CDS_ATOMIC::memory_order_acquire); + item * pHead = m_pGlobalFreeHead.load(atomics::memory_order_acquire); do { pLastItem->m_pNextFree = pHead; // pHead is changed by compare_exchange_weak - } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pGlobalFreeHead.compare_exchange_weak( pHead, pNew->items, atomics::memory_order_release, atomics::memory_order_relaxed )); } } unsigned int current_epoch() const { - return m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) & (c_nEpochCount - 1); + return m_nCurEpoch.load(atomics::memory_order_acquire) & (c_nEpochCount - 1); } unsigned int next_epoch() const { - return (m_nCurEpoch.load(CDS_ATOMIC::memory_order_acquire) - 1) & (c_nEpochCount - 1); + return (m_nCurEpoch.load(atomics::memory_order_acquire) - 1) & (c_nEpochCount - 1); } //@endcond @@ -400,7 +400,7 @@ namespace cds { namespace gc { , m_pGlobalFreeHead( nullptr ) { for (unsigned int i = 0; i < sizeof(m_pEpochFree)/sizeof(m_pEpochFree[0]); ++i ) - m_pEpochFree[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + m_pEpochFree[i].store( nullptr, atomics::memory_order_relaxed ); allocNewBlock(); } @@ -408,7 +408,7 @@ namespace cds { namespace gc { ~retired_ptr_pool() { block * p; - for ( block * pBlock = m_pBlockListHead.load(CDS_ATOMIC::memory_order_relaxed); pBlock; pBlock = p ) { + for ( block * pBlock = m_pBlockListHead.load(atomics::memory_order_relaxed); pBlock; pBlock = p ) { p = pBlock->pNext; m_BlockAllocator.Delete( pBlock ); } @@ -417,7 +417,7 @@ namespace cds { namespace gc { /// Increments current epoch void inc_epoch() { - m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_acq_rel ); + m_nCurEpoch.fetch_add( 1, atomics::memory_order_acq_rel ); } //@endcond @@ -428,17 +428,17 @@ namespace cds { namespace gc { unsigned int nEpoch; item * pItem; for (;;) { - pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire); + pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire); if ( !pItem ) goto retry; - if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed )) goto success; } /* - item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(CDS_ATOMIC::memory_order_acquire); + item * pItem = m_pEpochFree[ nEpoch = current_epoch() ].load(atomics::memory_order_acquire); while ( pItem ) { - if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( m_pEpochFree[nEpoch].compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed )) goto success; } */ @@ -446,14 +446,14 @@ namespace cds { namespace gc { // Epoch free list is empty // Alloc from global free list retry: - pItem = m_pGlobalFreeHead.load( CDS_ATOMIC::memory_order_acquire ); + pItem = m_pGlobalFreeHead.load( atomics::memory_order_acquire ); do { if ( !pItem ) { allocNewBlock(); goto retry; } // pItem is changed by compare_exchange_weak - } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pGlobalFreeHead.compare_exchange_weak( pItem, pItem->m_pNextFree, atomics::memory_order_release, atomics::memory_order_relaxed )); success: CDS_STRICT_DO( pItem->m_pNextFree = nullptr ); @@ -480,9 +480,9 @@ namespace cds { namespace gc { unsigned int nEpoch; item * pCurHead; do { - pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(CDS_ATOMIC::memory_order_acquire); + pCurHead = m_pEpochFree[nEpoch = next_epoch()].load(atomics::memory_order_acquire); pTail->m_pNextFree = pCurHead; - } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pEpochFree[nEpoch].compare_exchange_weak( pCurHead, pHead, atomics::memory_order_release, atomics::memory_order_relaxed )); } }; @@ -506,7 +506,7 @@ namespace cds { namespace gc { void set( void * p ) { assert( m_pGuard != nullptr ); - m_pGuard->pPost.store( p, CDS_ATOMIC::memory_order_release ); + m_pGuard->pPost.store( p, atomics::memory_order_release ); //CDS_COMPILER_RW_BARRIER; } @@ -514,7 +514,7 @@ namespace cds { namespace gc { void clear() { assert( m_pGuard != nullptr ); - m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed ); CDS_STRICT_DO( CDS_COMPILER_RW_BARRIER ); } @@ -689,8 +689,8 @@ namespace cds { namespace gc { /// Internal GC statistics struct internal_stat { - CDS_ATOMIC::atomic m_nGuardCount ; ///< Total guard count - CDS_ATOMIC::atomic m_nFreeGuardCount ; ///< Count of free guard + atomics::atomic m_nGuardCount ; ///< Total guard count + atomics::atomic m_nFreeGuardCount ; ///< Count of free guard internal_stat() : m_nGuardCount(0) @@ -717,8 +717,8 @@ namespace cds { namespace gc { InternalState& operator =( internal_stat const& s ) { - m_nGuardCount = s.m_nGuardCount.load(CDS_ATOMIC::memory_order_relaxed); - m_nFreeGuardCount = s.m_nFreeGuardCount.load(CDS_ATOMIC::memory_order_relaxed); + m_nGuardCount = s.m_nGuardCount.load(atomics::memory_order_relaxed); + m_nFreeGuardCount = s.m_nFreeGuardCount.load(atomics::memory_order_relaxed); return *this; } @@ -731,9 +731,9 @@ namespace cds { namespace gc { details::guard_allocator<> m_GuardPool ; ///< Guard pool details::retired_ptr_pool<> m_RetiredAllocator ; ///< Pool of free retired pointers details::retired_ptr_buffer m_RetiredBuffer ; ///< Retired pointer buffer for liberating - //CDS_ATOMIC::atomic m_nInLiberate ; ///< number of parallel \p liberate fnction call + //atomics::atomic m_nInLiberate ; ///< number of parallel \p liberate fnction call - CDS_ATOMIC::atomic m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate + atomics::atomic m_nLiberateThreshold; ///< Max size of retired pointer buffer to call liberate const size_t m_nInitialThreadGuardCount; ///< Initial count of guards allocated for ThreadGC internal_stat m_stat ; ///< Internal statistics @@ -827,7 +827,7 @@ namespace cds { namespace gc { /// Places retired pointer \p into thread's array of retired pointer for deferred reclamation void retirePtr( retired_ptr const& p ) { - if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed) ) + if ( m_RetiredBuffer.push( m_RetiredAllocator.alloc(p)) >= m_nLiberateThreshold.load(atomics::memory_order_relaxed) ) liberate(); } @@ -933,7 +933,7 @@ namespace cds { namespace gc { assert( m_pList != nullptr ); if ( m_pFree ) { g.m_pGuard = m_pFree; - m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed); + m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed); } else { g.m_pGuard = m_gc.allocGuard(); @@ -946,8 +946,8 @@ namespace cds { namespace gc { void freeGuard( Guard& g ) { assert( m_pList != nullptr ); - g.m_pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); - g.m_pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed ); + g.m_pGuard->pPost.store( nullptr, atomics::memory_order_relaxed ); + g.m_pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed ); m_pFree = g.m_pGuard; } @@ -960,7 +960,7 @@ namespace cds { namespace gc { while ( m_pFree && nCount < Count ) { arr[nCount].set_guard( m_pFree ); - m_pFree = m_pFree->pNextFree.load(CDS_ATOMIC::memory_order_relaxed); + m_pFree = m_pFree->pNextFree.load(atomics::memory_order_relaxed); ++nCount; } @@ -981,12 +981,12 @@ namespace cds { namespace gc { details::guard_data * pGuard; for ( size_t i = 0; i < Count - 1; ++i ) { pGuard = arr[i].get_guard(); - pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); - pGuard->pNextFree.store( arr[i+1].get_guard(), CDS_ATOMIC::memory_order_relaxed ); + pGuard->pPost.store( nullptr, atomics::memory_order_relaxed ); + pGuard->pNextFree.store( arr[i+1].get_guard(), atomics::memory_order_relaxed ); } pGuard = arr[Count-1].get_guard(); - pGuard->pPost.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); - pGuard->pNextFree.store( m_pFree, CDS_ATOMIC::memory_order_relaxed ); + pGuard->pPost.store( nullptr, atomics::memory_order_relaxed ); + pGuard->pNextFree.store( m_pFree, atomics::memory_order_relaxed ); m_pFree = arr[0].get_guard(); } diff --git a/cds/gc/ptb_decl.h b/cds/gc/ptb_decl.h index 175e9064..eae4dcf8 100644 --- a/cds/gc/ptb_decl.h +++ b/cds/gc/ptb_decl.h @@ -35,24 +35,24 @@ namespace cds { namespace gc { /** @headerfile cds/gc/ptb.h */ - template using atomic_ref = CDS_ATOMIC::atomic; + template using atomic_ref = atomics::atomic; /// Atomic type /** @headerfile cds/gc/ptb.h */ - template using atomic_type = CDS_ATOMIC::atomic; + template using atomic_type = atomics::atomic; /// Atomic marked pointer /** @headerfile cds/gc/ptb.h */ - template using atomic_marked_ptr = CDS_ATOMIC::atomic; + template using atomic_marked_ptr = atomics::atomic; #else template - class atomic_ref: public CDS_ATOMIC::atomic + class atomic_ref: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_ref() = default; @@ -67,9 +67,9 @@ namespace cds { namespace gc { }; template - class atomic_type: public CDS_ATOMIC::atomic + class atomic_type: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_type() = default; @@ -84,9 +84,9 @@ namespace cds { namespace gc { }; template - class atomic_marked_ptr: public CDS_ATOMIC::atomic + class atomic_marked_ptr: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_marked_ptr() = default; @@ -172,13 +172,13 @@ namespace cds { namespace gc { to the HP slot repeatedly until the guard's value equals \p toGuard */ template - T protect( CDS_ATOMIC::atomic const& toGuard ) + T protect( atomics::atomic const& toGuard ) { - T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pCur = toGuard.load(atomics::memory_order_relaxed); T pRet; do { pRet = assign( pCur ); - pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } @@ -201,14 +201,14 @@ namespace cds { namespace gc { Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. */ template - T protect( CDS_ATOMIC::atomic const& toGuard, Func f ) + T protect( atomics::atomic const& toGuard, Func f ) { - T pCur = toGuard.load(CDS_ATOMIC::memory_order_relaxed); + T pCur = toGuard.load(atomics::memory_order_relaxed); T pRet; do { pRet = pCur; assign( f( pCur ) ); - pCur = toGuard.load(CDS_ATOMIC::memory_order_acquire); + pCur = toGuard.load(atomics::memory_order_acquire); } while ( pRet != pCur ); return pCur; } @@ -264,7 +264,7 @@ namespace cds { namespace gc { /// Get native guarded pointer stored guarded_pointer get_native() const { - return base_class::get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed); + return base_class::get_guard()->pPost.load(atomics::memory_order_relaxed); } }; @@ -301,12 +301,12 @@ namespace cds { namespace gc { to the slot \p nIndex repeatedly until the guard's value equals \p toGuard */ template - T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard ) + T protect(size_t nIndex, atomics::atomic const& toGuard ) { T pRet; do { - pRet = assign( nIndex, toGuard.load(CDS_ATOMIC::memory_order_relaxed) ); - } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire)); + pRet = assign( nIndex, toGuard.load(atomics::memory_order_relaxed) ); + } while ( pRet != toGuard.load(atomics::memory_order_acquire)); return pRet; } @@ -329,12 +329,12 @@ namespace cds { namespace gc { Really, the result of f( toGuard.load() ) is assigned to the hazard pointer. */ template - T protect(size_t nIndex, CDS_ATOMIC::atomic const& toGuard, Func f ) + T protect(size_t nIndex, atomics::atomic const& toGuard, Func f ) { T pRet; do { - assign( nIndex, f( pRet = toGuard.load(CDS_ATOMIC::memory_order_relaxed) )); - } while ( pRet != toGuard.load(CDS_ATOMIC::memory_order_acquire)); + assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_relaxed) )); + } while ( pRet != toGuard.load(atomics::memory_order_acquire)); return pRet; } @@ -389,7 +389,7 @@ namespace cds { namespace gc { /// Get native guarded pointer stored guarded_pointer get_native( size_t nIndex ) const { - return base_class::operator[](nIndex).get_guard()->pPost.load(CDS_ATOMIC::memory_order_relaxed); + return base_class::operator[](nIndex).get_guard()->pPost.load(atomics::memory_order_relaxed); } /// Capacity of the guard array diff --git a/cds/intrusive/basket_queue.h b/cds/intrusive/basket_queue.h index ccf89645..4e52726c 100644 --- a/cds/intrusive/basket_queue.h +++ b/cds/intrusive/basket_queue.h @@ -70,9 +70,9 @@ namespace cds { namespace intrusive { while ( true ) { marked_ptr pNext = aGuards.protect( 0, m_pNext ); - if ( pNext.ptr() && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + if ( pNext.ptr() && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) { marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); - m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + m_pNext.compare_exchange_strong( pNext, p, atomics::memory_order_acquire, atomics::memory_order_relaxed ); continue; } else { @@ -84,11 +84,11 @@ namespace cds { namespace intrusive { virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) { if ( bConcurrent ) { - marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed); - do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + marked_ptr pNext = m_pNext.load(atomics::memory_order_relaxed); + do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) ); } else { - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pNext.store( marked_ptr(), atomics::memory_order_relaxed ); } } }; diff --git a/cds/intrusive/cuckoo_set.h b/cds/intrusive/cuckoo_set.h index 340c4ede..89a9fd16 100644 --- a/cds/intrusive/cuckoo_set.h +++ b/cds/intrusive/cuckoo_set.h @@ -658,8 +658,8 @@ namespace cds { namespace intrusive { //@cond static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; - CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) - CDS_ATOMIC::atomic m_nCapacity ; ///< lock array capacity + atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) + atomics::atomic m_nCapacity ; ///< lock array capacity lock_array_ptr m_arrLocks[ c_nArity ] ; ///< Lock array. The capacity of array is specified in constructor. spinlock_type m_access ; ///< access to m_arrLocks statistics_type m_Stat ; ///< internal statistics @@ -695,7 +695,7 @@ namespace cds { namespace intrusive { // wait while resizing while ( true ) { - who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + who = m_Owner.load( atomics::memory_order_acquire ); if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) ) break; bkoff(); @@ -715,7 +715,7 @@ namespace cds { namespace intrusive { parrLock[i]->lock(); } - who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + who = m_Owner.load( atomics::memory_order_acquire ); if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks[0] == pLockArr[0] ) { m_Stat.onCellLock(); return; @@ -742,7 +742,7 @@ namespace cds { namespace intrusive { // It is assumed that the current thread already has a lock // and requires a second lock for other hash - size_t const nMask = m_nCapacity.load(CDS_ATOMIC::memory_order_acquire) - 1; + size_t const nMask = m_nCapacity.load(atomics::memory_order_acquire) - 1; size_t nCell = m_arrLocks[0]->try_lock( arrHash[0] & nMask); if ( nCell == lock_array_type::c_nUnspecifiedCell ) { m_Stat.onSecondCellLockFailed(); @@ -765,7 +765,7 @@ namespace cds { namespace intrusive { back_off bkoff; while ( true ) { owner_t ownNull = 0; - if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) { m_arrLocks[0]->lock_all(); m_Stat.onFullLock(); @@ -779,7 +779,7 @@ namespace cds { namespace intrusive { void release_all() { m_arrLocks[0]->unlock_all(); - m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + m_Owner.store( 0, atomics::memory_order_release ); } void acquire_resize( lock_array_ptr * pOldLocks ) @@ -795,9 +795,9 @@ namespace cds { namespace intrusive { // global lock owner_t ownNull = 0; - if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) { + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) { if ( pOldLocks[0] != m_arrLocks[0] ) { - m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + m_Owner.store( 0, atomics::memory_order_release ); m_Stat.onResizeLockArrayChanged(); } else { @@ -820,7 +820,7 @@ namespace cds { namespace intrusive { void release_resize( lock_array_ptr * pOldLocks ) { - m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + m_Owner.store( 0, atomics::memory_order_release ); pOldLocks[0]->unlock_all(); } //@endcond @@ -935,7 +935,7 @@ namespace cds { namespace intrusive { for ( unsigned int i = 0; i < c_nArity; ++i ) m_arrLocks[i] = pNew[i]; } - m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_release ); + m_nCapacity.store( nCapacity, atomics::memory_order_release ); m_Stat.onResize(); } @@ -947,7 +947,7 @@ namespace cds { namespace intrusive { */ size_t lock_count() const { - return m_nCapacity.load(CDS_ATOMIC::memory_order_relaxed); + return m_nCapacity.load(atomics::memory_order_relaxed); } /// Returns the arity of \p refinable mutex policy diff --git a/cds/intrusive/details/dummy_node_holder.h b/cds/intrusive/details/dummy_node_holder.h index af21db42..56a2a925 100644 --- a/cds/intrusive/details/dummy_node_holder.h +++ b/cds/intrusive/details/dummy_node_holder.h @@ -39,7 +39,7 @@ namespace cds { namespace intrusive { namespace details { { assert( p != nullptr ); - p->m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release ); + p->m_pNext.store( nullptr, atomics::memory_order_release ); allocator_type().Delete( p ); } }; diff --git a/cds/intrusive/details/ellen_bintree_base.h b/cds/intrusive/details/ellen_bintree_base.h index 25b81e8f..a52390a4 100644 --- a/cds/intrusive/details/ellen_bintree_base.h +++ b/cds/intrusive/details/ellen_bintree_base.h @@ -200,9 +200,9 @@ namespace cds { namespace intrusive { typedef typename update_desc_type::update_ptr update_ptr ; ///< Marked pointer to update descriptor key_type m_Key ; ///< Regular key - CDS_ATOMIC::atomic m_pLeft ; ///< Left subtree - CDS_ATOMIC::atomic m_pRight ; ///< Right subtree - CDS_ATOMIC::atomic m_pUpdate ; ///< Update descriptor + atomics::atomic m_pLeft ; ///< Left subtree + atomics::atomic m_pRight ; ///< Right subtree + atomics::atomic m_pUpdate ; ///< Update descriptor //@cond uintptr_t m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4 //@endcond diff --git a/cds/intrusive/ellen_bintree_impl.h b/cds/intrusive/ellen_bintree_impl.h index f4890a8d..d8e6c0dd 100644 --- a/cds/intrusive/ellen_bintree_impl.h +++ b/cds/intrusive/ellen_bintree_impl.h @@ -970,8 +970,8 @@ namespace cds { namespace intrusive { bool check_consistency( internal_node const * pRoot ) const { - tree_node * pLeft = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed ); - tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed ); + tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed ); + tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed ); assert( pLeft ); assert( pRight ); @@ -1017,7 +1017,7 @@ namespace cds { namespace intrusive { return p; } - update_ptr search_protect_update( search_result& res, CDS_ATOMIC::atomic const& src ) const + update_ptr search_protect_update( search_result& res, atomics::atomic const& src ) const { update_ptr ret; update_ptr upd( src.load( memory_model::memory_order_relaxed ) ); @@ -1221,17 +1221,17 @@ namespace cds { namespace intrusive { tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); if ( pOp->iInfo.bRightLeaf ) { CDS_VERIFY( pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), - memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_relaxed, atomics::memory_order_relaxed )); } else { CDS_VERIFY( pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), - memory_model::memory_order_relaxed, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_relaxed, atomics::memory_order_relaxed )); } // Unflag parent update_ptr cur( pOp, update_desc::IFlag ); CDS_VERIFY( pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_release, atomics::memory_order_relaxed )); } bool check_delete_precondition( search_result& res ) const @@ -1261,7 +1261,7 @@ namespace cds { namespace intrusive { update_ptr pUpdate( pOp->dInfo.pUpdateParent ); update_ptr pMark( pOp, update_desc::Mark ); if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, // * - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { help_marked( pOp ); @@ -1280,7 +1280,7 @@ namespace cds { namespace intrusive { // Undo grandparent dInfo update_ptr pDel( pOp, update_desc::DFlag ); if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { retire_update_desc( pOp ); } @@ -1288,7 +1288,7 @@ namespace cds { namespace intrusive { } } - tree_node * protect_sibling( typename gc::Guard& guard, CDS_ATOMIC::atomic& sibling ) + tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic& sibling ) { typename gc::Guard guardLeaf; @@ -1317,16 +1317,16 @@ namespace cds { namespace intrusive { if ( pOp->dInfo.bRightParent ) { CDS_VERIFY( pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( pParent, pOpposite, - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_release, atomics::memory_order_relaxed )); } else { CDS_VERIFY( pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( pParent, pOpposite, - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_release, atomics::memory_order_relaxed )); } update_ptr upd( pOp, update_desc::DFlag ); CDS_VERIFY( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + memory_model::memory_order_release, atomics::memory_order_relaxed )); } bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res ) @@ -1376,7 +1376,7 @@ namespace cds { namespace intrusive { update_ptr updCur( res.updParent.ptr() ); if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // do insert help_insert( pOp ); @@ -1421,7 +1421,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) { // res.pLeaf is not deleted yet since it is guarded @@ -1504,7 +1504,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) break; @@ -1552,7 +1552,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp )) break; diff --git a/cds/intrusive/ellen_bintree_rcu.h b/cds/intrusive/ellen_bintree_rcu.h index 3a817091..9e608ca4 100644 --- a/cds/intrusive/ellen_bintree_rcu.h +++ b/cds/intrusive/ellen_bintree_rcu.h @@ -1390,8 +1390,8 @@ namespace cds { namespace intrusive { bool check_consistency( internal_node const * pRoot ) const { - tree_node * pLeft = pRoot->m_pLeft.load( CDS_ATOMIC::memory_order_relaxed ); - tree_node * pRight = pRoot->m_pRight.load( CDS_ATOMIC::memory_order_relaxed ); + tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed ); + tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed ); assert( pLeft ); assert( pRight ); @@ -1440,16 +1440,16 @@ namespace cds { namespace intrusive { tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); if ( pOp->iInfo.bRightLeaf ) { pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + memory_model::memory_order_release, atomics::memory_order_relaxed ); } else { pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + memory_model::memory_order_release, atomics::memory_order_relaxed ); } update_ptr cur( pOp, update_desc::IFlag ); pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + memory_model::memory_order_release, atomics::memory_order_relaxed ); } bool check_delete_precondition( search_result& res ) @@ -1475,7 +1475,7 @@ namespace cds { namespace intrusive { update_ptr pUpdate( pOp->dInfo.pUpdateParent ); update_ptr pMark( pOp, update_desc::Mark ); if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { help_marked( pOp ); retire_node( pOp->dInfo.pParent, rl ); @@ -1499,7 +1499,7 @@ namespace cds { namespace intrusive { // Undo grandparent dInfo update_ptr pDel( pOp, update_desc::DFlag ); if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { retire_update_desc( pOp, rl, false ); } @@ -1517,19 +1517,19 @@ namespace cds { namespace intrusive { pOp->dInfo.bRightLeaf ? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire ) : pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + memory_model::memory_order_release, atomics::memory_order_relaxed ); } else { pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( p, pOp->dInfo.bRightLeaf ? pOp->dInfo.pParent->m_pLeft.load( memory_model::memory_order_acquire ) : pOp->dInfo.pParent->m_pRight.load( memory_model::memory_order_acquire ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + memory_model::memory_order_release, atomics::memory_order_relaxed ); } update_ptr upd( pOp, update_desc::DFlag ); pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + memory_model::memory_order_release, atomics::memory_order_relaxed ); } template @@ -1722,7 +1722,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp, updRetire )) { // res.pLeaf is not deleted yet since RCU is blocked @@ -1797,7 +1797,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp, updRetire )) { ptr = node_traits::to_value_ptr( res.pLeaf ); @@ -1860,7 +1860,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp, updRetire )) { result = node_traits::to_value_ptr( res.pLeaf ); @@ -1921,7 +1921,7 @@ namespace cds { namespace intrusive { update_ptr updGP( res.updGrandParent.ptr() ); if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { if ( help_delete( pOp, updRetire )) { result = node_traits::to_value_ptr( res.pLeaf ); @@ -2047,7 +2047,7 @@ namespace cds { namespace intrusive { update_ptr updCur( res.updParent.ptr() ); if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // do insert help_insert( pOp ); diff --git a/cds/intrusive/lazy_list_base.h b/cds/intrusive/lazy_list_base.h index f2ee2bb7..8c1cd61b 100644 --- a/cds/intrusive/lazy_list_base.h +++ b/cds/intrusive/lazy_list_base.h @@ -45,7 +45,7 @@ namespace cds { namespace intrusive { /// Checks if node is marked bool is_marked() const { - return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0; + return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; } /// Default ctor @@ -177,7 +177,7 @@ namespace cds { namespace intrusive { */ static void is_empty( node_type const * pNode ) { - assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); } }; diff --git a/cds/intrusive/lazy_list_hrc.h b/cds/intrusive/lazy_list_hrc.h index 9d397ed0..445c4736 100644 --- a/cds/intrusive/lazy_list_hrc.h +++ b/cds/intrusive/lazy_list_hrc.h @@ -26,7 +26,7 @@ namespace cds { namespace intrusive { namespace lazy_list { /// Checks if node is marked bool is_marked() const { - return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0; + return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; } node() @@ -42,9 +42,9 @@ namespace cds { namespace intrusive { namespace lazy_list { while ( true ) { marked_ptr pNextMarked( aGuards.protect( 0, m_pNext )); node * pNext = pNextMarked.ptr(); - if ( pNext != nullptr && pNext->m_bDeleted.load( CDS_ATOMIC::memory_order_acquire ) ) { + if ( pNext != nullptr && pNext->m_bDeleted.load( atomics::memory_order_acquire ) ) { marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); - m_pNext.compare_exchange_weak( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + m_pNext.compare_exchange_weak( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed ); continue; } else { @@ -56,11 +56,11 @@ namespace cds { namespace intrusive { namespace lazy_list { virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) { if ( bConcurrent ) { - marked_ptr pNext( m_pNext.load(CDS_ATOMIC::memory_order_relaxed)); - do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + marked_ptr pNext( m_pNext.load(atomics::memory_order_relaxed)); + do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) ); } else { - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pNext.store( marked_ptr(), atomics::memory_order_relaxed ); } } }; diff --git a/cds/intrusive/lazy_list_nogc.h b/cds/intrusive/lazy_list_nogc.h index ca43491f..70be1c4c 100644 --- a/cds/intrusive/lazy_list_nogc.h +++ b/cds/intrusive/lazy_list_nogc.h @@ -20,7 +20,7 @@ namespace cds { namespace intrusive { typedef Lock lock_type ; ///< Lock type typedef Tag tag ; ///< tag - CDS_ATOMIC::atomic m_pNext ; ///< pointer to the next node in the list + atomics::atomic m_pNext ; ///< pointer to the next node in the list mutable lock_type m_Lock ; ///< Node lock node() diff --git a/cds/intrusive/lazy_list_rcu.h b/cds/intrusive/lazy_list_rcu.h index 7e15553d..027ff733 100644 --- a/cds/intrusive/lazy_list_rcu.h +++ b/cds/intrusive/lazy_list_rcu.h @@ -23,7 +23,7 @@ namespace cds { namespace intrusive { typedef Tag tag ; ///< tag typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer - typedef CDS_ATOMIC::atomic atomic_marked_ptr ; ///< atomic marked pointer specific for GC + typedef atomics::atomic atomic_marked_ptr ; ///< atomic marked pointer specific for GC atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list mutable lock_type m_Lock ; ///< Node lock @@ -31,7 +31,7 @@ namespace cds { namespace intrusive { /// Checks if node is marked bool is_marked() const { - return m_pNext.load(CDS_ATOMIC::memory_order_relaxed).bits() != 0; + return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; } /// Default ctor @@ -42,7 +42,7 @@ namespace cds { namespace intrusive { /// Clears internal fields void clear() { - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + m_pNext.store( marked_ptr(), atomics::memory_order_release ); } }; } // namespace lazy_list diff --git a/cds/intrusive/michael_deque.h b/cds/intrusive/michael_deque.h index d9d2af4a..8de82e1e 100644 --- a/cds/intrusive/michael_deque.h +++ b/cds/intrusive/michael_deque.h @@ -113,14 +113,14 @@ namespace cds { namespace intrusive { //@cond node() { - m_Links.store( anchor(0,0), CDS_ATOMIC::memory_order_release ); + m_Links.store( anchor(0,0), atomics::memory_order_release ); } explicit node( anchor const& a ) : m_Links() , m_nIndex(0) { - m_Links.store( a, CDS_ATOMIC::memory_order_release ); + m_Links.store( a, atomics::memory_order_release ); } //@endcond }; @@ -240,7 +240,7 @@ namespace cds { namespace intrusive { static void is_empty( const node_type * pNode ) { # ifdef _DEBUG - anchor a = pNode->m_Links.load(CDS_ATOMIC::memory_order_relaxed); + anchor a = pNode->m_Links.load(atomics::memory_order_relaxed); assert( a.idxLeft == 0 && a.idxRight == 0 ); # endif } @@ -490,7 +490,7 @@ namespace cds { namespace intrusive { # endif mapper_type m_set; - CDS_ATOMIC::atomic m_nLastIndex; + atomics::atomic m_nLastIndex; public: @@ -795,7 +795,7 @@ namespace cds { namespace intrusive { :m_Anchor() ,m_Mapper( 4096, 4 ) { - m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release ); + m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), atomics::memory_order_release ); // GC and node_type::gc must be the same static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); @@ -814,7 +814,7 @@ namespace cds { namespace intrusive { :m_Anchor() ,m_Mapper( nMaxItemCount, nLoadFactor ) { - m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), CDS_ATOMIC::memory_order_release ); + m_Anchor.store( anchor_type( c_nEmptyIndex, c_nEmptyIndex ), atomics::memory_order_release ); // GC and node_type::gc must be the same static_assert(( std::is_same::value ), "GC and node_type::gc must be the same"); diff --git a/cds/intrusive/michael_list_base.h b/cds/intrusive/michael_list_base.h index 39ccaa55..bb0bf8ea 100644 --- a/cds/intrusive/michael_list_base.h +++ b/cds/intrusive/michael_list_base.h @@ -129,7 +129,7 @@ namespace cds { namespace intrusive { */ static void is_empty( const node_type * pNode ) { - assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); } }; diff --git a/cds/intrusive/michael_list_hrc.h b/cds/intrusive/michael_list_hrc.h index f3a783fb..fdcf37d5 100644 --- a/cds/intrusive/michael_list_hrc.h +++ b/cds/intrusive/michael_list_hrc.h @@ -32,9 +32,9 @@ namespace cds { namespace intrusive { namespace michael_list { while ( true ) { marked_ptr pNextMarked( aGuards.protect( 0, m_pNext )); node * pNext = pNextMarked.ptr(); - if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) { marked_ptr p = aGuards.protect( 1, pNext->m_pNext ); - m_pNext.compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + m_pNext.compare_exchange_strong( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed ); continue; } else { @@ -46,11 +46,11 @@ namespace cds { namespace intrusive { namespace michael_list { virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) { if ( bConcurrent ) { - marked_ptr pNext = m_pNext.load(CDS_ATOMIC::memory_order_acquire); - do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + marked_ptr pNext = m_pNext.load(atomics::memory_order_acquire); + do {} while ( !m_pNext.compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) ); } else { - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pNext.store( marked_ptr(), atomics::memory_order_relaxed ); } } }; diff --git a/cds/intrusive/michael_list_impl.h b/cds/intrusive/michael_list_impl.h index 4fd358bc..3fbd853f 100644 --- a/cds/intrusive/michael_list_impl.h +++ b/cds/intrusive/michael_list_impl.h @@ -271,7 +271,7 @@ namespace cds { namespace intrusive { marked_node_ptr cur(pos.pCur); pNode->m_pNext.store( cur, memory_model::memory_order_relaxed ); - return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + return pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ); } bool unlink_node( position& pos ) @@ -281,11 +281,11 @@ namespace cds { namespace intrusive { // Mark the node (logical deleting) marked_node_ptr next(pos.pNext, 0); - if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, atomics::memory_order_relaxed )) { // physical deletion may be performed by search function if it detects that a node is logically deleted (marked) // CAS may be successful here or in other thread that searching something marked_node_ptr cur(pos.pCur); - if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, atomics::memory_order_relaxed )) retire_node( pos.pCur ); return true; } @@ -1143,7 +1143,7 @@ try_again: if ( pNext.bits() == 1 ) { // pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node marked_node_ptr cur( pCur.ptr()); - if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr() ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { retire_node( pCur.ptr() ); } else { diff --git a/cds/intrusive/michael_list_nogc.h b/cds/intrusive/michael_list_nogc.h index 67952fba..e1054a6e 100644 --- a/cds/intrusive/michael_list_nogc.h +++ b/cds/intrusive/michael_list_nogc.h @@ -20,7 +20,7 @@ namespace cds { namespace intrusive { typedef gc::nogc gc ; ///< Garbage collector typedef Tag tag ; ///< tag - typedef CDS_ATOMIC::atomic< node * > atomic_ptr ; ///< atomic marked pointer + typedef atomics::atomic< node * > atomic_ptr ; ///< atomic marked pointer atomic_ptr m_pNext ; ///< pointer to the next node in the container @@ -121,7 +121,7 @@ namespace cds { namespace intrusive { link_checker::is_empty( pNode ); pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed ); - return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + return pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ); } //@endcond diff --git a/cds/intrusive/michael_list_rcu.h b/cds/intrusive/michael_list_rcu.h index c19953e4..0239c191 100644 --- a/cds/intrusive/michael_list_rcu.h +++ b/cds/intrusive/michael_list_rcu.h @@ -165,19 +165,19 @@ namespace cds { namespace intrusive { marked_node_ptr p( pos.pCur ); pNode->m_pNext.store( p, memory_model::memory_order_relaxed ); - return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + return pos.pPrev->compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ); } bool unlink_node( position& pos ) { // Mark the node (logical deleting) marked_node_ptr next(pos.pNext, 0); - if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { + if ( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { marked_node_ptr cur(pos.pCur); - if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_release, atomics::memory_order_relaxed )) return true; next |= 1; - CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + CDS_VERIFY( pos.pCur->m_pNext.compare_exchange_strong( next, next ^ 1, memory_model::memory_order_release, atomics::memory_order_relaxed )); } return false; } diff --git a/cds/intrusive/moir_queue.h b/cds/intrusive/moir_queue.h index 0ccbbdc3..fe9ad8c5 100644 --- a/cds/intrusive/moir_queue.h +++ b/cds/intrusive/moir_queue.h @@ -126,10 +126,10 @@ namespace cds { namespace intrusive { if ( pNext == nullptr ) return false ; // queue is empty - if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) { node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire); if ( h == t ) - base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); break; } diff --git a/cds/intrusive/msqueue.h b/cds/intrusive/msqueue.h index 67f51d4d..7a819613 100644 --- a/cds/intrusive/msqueue.h +++ b/cds/intrusive/msqueue.h @@ -204,12 +204,12 @@ namespace cds { namespace intrusive { node_type * t = m_pTail.load(memory_model::memory_order_acquire); if ( h == t ) { // It is needed to help enqueue - m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); m_Stat.onBadTail(); continue; } - if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) break; m_Stat.onDequeueRace(); @@ -334,13 +334,13 @@ namespace cds { namespace intrusive { node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire); if ( pNext != nullptr ) { // Tail is misplaced, advance it - m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); m_Stat.onBadTail(); continue; } node_type * tmp = nullptr; - if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) break; m_Stat.onEnqueueRace(); @@ -349,7 +349,7 @@ namespace cds { namespace intrusive { ++m_ItemCounter; m_Stat.onEnqueue(); - if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )) + if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) m_Stat.onAdvanceTailFailed(); return true; } diff --git a/cds/intrusive/optimistic_queue.h b/cds/intrusive/optimistic_queue.h index eda9ce20..43421108 100644 --- a/cds/intrusive/optimistic_queue.h +++ b/cds/intrusive/optimistic_queue.h @@ -117,8 +117,8 @@ namespace cds { namespace intrusive { */ static void is_empty( const node_type * pNode ) { - assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); - assert( pNode->m_pPrev.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pPrev.load( atomics::memory_order_relaxed ) == nullptr ); } }; @@ -408,7 +408,7 @@ namespace cds { namespace intrusive { fix_list( pTail, pHead ); continue; } - if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // dequeue success break; } @@ -513,7 +513,7 @@ namespace cds { namespace intrusive { node_type * pTail = guards.protect( 0, m_pTail, node_to_value() ) ; // Read the tail while( true ) { pNew->m_pNext.store( pTail, memory_model::memory_order_release ); - if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { // Try to CAS the tail + if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { // Try to CAS the tail pTail->m_pPrev.store( pNew, memory_model::memory_order_release ) ; // Success, write prev ++m_ItemCounter; m_Stat.onEnqueue(); diff --git a/cds/intrusive/segmented_queue.h b/cds/intrusive/segmented_queue.h index 05ff51c4..1b61e12b 100644 --- a/cds/intrusive/segmented_queue.h +++ b/cds/intrusive/segmented_queue.h @@ -209,14 +209,14 @@ namespace cds { namespace intrusive { // Segment struct segment: public boost::intrusive::slist_base_hook<> { - CDS_ATOMIC::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor + atomics::atomic< cell > * cells; // Cell array of size \ref m_nQuasiFactor size_t version; // version tag (ABA prevention tag) // cell array is placed here in one continuous memory block // Initializes the segment segment( size_t nCellCount ) // MSVC warning C4355: 'this': used in base member initializer list - : cells( reinterpret_cast< CDS_ATOMIC::atomic< cell > * >( this + 1 )) + : cells( reinterpret_cast< atomics::atomic< cell > * >( this + 1 )) , version( 0 ) { init( nCellCount ); @@ -224,17 +224,17 @@ namespace cds { namespace intrusive { void init( size_t nCellCount ) { - CDS_ATOMIC::atomic< cell > * pLastCell = cells + nCellCount; - for ( CDS_ATOMIC::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell ) - pCell->store( cell(), CDS_ATOMIC::memory_order_relaxed ); - CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + atomics::atomic< cell > * pLastCell = cells + nCellCount; + for ( atomics::atomic< cell > * pCell = cells; pCell < pLastCell; ++pCell ) + pCell->store( cell(), atomics::memory_order_relaxed ); + atomics::atomic_thread_fence( memory_model::memory_order_release ); } private: segment(); //=delete }; - typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic, options::alignment >::type aligned_segment_ptr; + typedef typename opt::details::alignment_setter< atomics::atomic, options::alignment >::type aligned_segment_ptr; //@endcond protected: @@ -300,8 +300,8 @@ namespace cds { namespace intrusive { bool populated( segment const& s ) const { // The lock should be held - CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor(); - for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor(); + for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { if ( !pCell->load( memory_model::memory_order_relaxed ).all() ) return false; } @@ -310,8 +310,8 @@ namespace cds { namespace intrusive { bool exhausted( segment const& s ) const { // The lock should be held - CDS_ATOMIC::atomic< cell > const * pLastCell = s.cells + quasi_factor(); - for ( CDS_ATOMIC::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + atomics::atomic< cell > const * pLastCell = s.cells + quasi_factor(); + for ( atomics::atomic< cell > const * pCell = s.cells; pCell < pLastCell; ++pCell ) { if ( !pCell->load( memory_model::memory_order_relaxed ).bits() ) return false; } @@ -474,7 +474,7 @@ namespace cds { namespace intrusive { // Empty cell found, try to enqueue here cell nullCell; if ( pTailSegment->cells[i].compare_exchange_strong( nullCell, cell( &val ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { // Ok to push item m_Stat.onPush(); @@ -641,7 +641,7 @@ namespace cds { namespace intrusive { if ( !item.bits() ) { // Try to mark the cell as deleted if ( pHeadSegment->cells[i].compare_exchange_strong( item, item | 1, - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { --m_ItemCounter; m_Stat.onPop(); diff --git a/cds/intrusive/single_link_struct.h b/cds/intrusive/single_link_struct.h index 324409ab..4f557f33 100644 --- a/cds/intrusive/single_link_struct.h +++ b/cds/intrusive/single_link_struct.h @@ -65,9 +65,9 @@ namespace cds { namespace intrusive { while ( true ) { node * pNext = aGuards.protect( 0, m_pNext ); - if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) { node * p = aGuards.protect( 1, pNext->m_pNext ); - m_pNext.compare_exchange_strong( pNext, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + m_pNext.compare_exchange_strong( pNext, p, atomics::memory_order_acquire, atomics::memory_order_relaxed ); continue; } else { @@ -79,11 +79,11 @@ namespace cds { namespace intrusive { virtual void terminate( cds::gc::hrc::ThreadGC * pGC, bool bConcurrent ) { if ( bConcurrent ) { - node * pNext = m_pNext.load(CDS_ATOMIC::memory_order_relaxed); - do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + node * pNext = m_pNext.load(atomics::memory_order_relaxed); + do {} while ( !m_pNext.compare_exchange_weak( pNext, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) ); } else { - m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + m_pNext.store( nullptr, atomics::memory_order_relaxed ); } } }; @@ -166,7 +166,7 @@ namespace cds { namespace intrusive { */ static void is_empty( const node_type * pNode ) { - assert( pNode->m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); } }; diff --git a/cds/intrusive/skip_list_base.h b/cds/intrusive/skip_list_base.h index 9e048e6e..57a5a85f 100644 --- a/cds/intrusive/skip_list_base.h +++ b/cds/intrusive/skip_list_base.h @@ -117,7 +117,7 @@ namespace cds { namespace intrusive { void clear() { assert( m_arrNext == nullptr ); - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + m_pNext.store( marked_ptr(), atomics::memory_order_release ); } //@cond @@ -248,7 +248,7 @@ namespace cds { namespace intrusive { */ class xorshift { //@cond - CDS_ATOMIC::atomic m_nSeed; + atomics::atomic m_nSeed; //@endcond public: /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) @@ -257,7 +257,7 @@ namespace cds { namespace intrusive { /// Initializes the generator instance xorshift() { - m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed ); + m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed ); } /// Main generator function @@ -276,11 +276,11 @@ namespace cds { namespace intrusive { return level; } */ - unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed ); + unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ); x ^= x << 13; x ^= x >> 17; x ^= x << 5; - m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed ); + m_nSeed.store( x, atomics::memory_order_relaxed ); unsigned int nLevel = ((x & 0x00000001) != 0) ? 0 : cds::bitop::LSB( (~(x >> 1)) & 0x7FFFFFFF ); assert( nLevel < c_nUpperBound ); return nLevel; @@ -298,7 +298,7 @@ namespace cds { namespace intrusive { class turbo_pascal { //@cond - CDS_ATOMIC::atomic m_nSeed; + atomics::atomic m_nSeed; //@endcond public: /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) @@ -307,7 +307,7 @@ namespace cds { namespace intrusive { /// Initializes the generator instance turbo_pascal() { - m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), CDS_ATOMIC::memory_order_relaxed ); + m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed ); } /// Main generator function @@ -330,8 +330,8 @@ namespace cds { namespace intrusive { upper 16 bits) so we traverse from highest bit down (i.e., test sign), thus hardly ever use lower bits. */ - unsigned int x = m_nSeed.load( CDS_ATOMIC::memory_order_relaxed ) * 134775813 + 1; - m_nSeed.store( x, CDS_ATOMIC::memory_order_relaxed ); + unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ) * 134775813 + 1; + m_nSeed.store( x, atomics::memory_order_relaxed ); unsigned int nLevel = ( x & 0x80000000 ) ? (31 - cds::bitop::MSBnz( (x & 0x7FFFFFFF) | 1 )) : 0; assert( nLevel < c_nUpperBound ); return nLevel; @@ -588,7 +588,7 @@ namespace cds { namespace intrusive { head_node( unsigned int nHeight ) { for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) - m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed ); node_type::make_tower( nHeight, m_Tower ); } diff --git a/cds/intrusive/skip_list_hrc.h b/cds/intrusive/skip_list_hrc.h index a283915d..67bcff64 100644 --- a/cds/intrusive/skip_list_hrc.h +++ b/cds/intrusive/skip_list_hrc.h @@ -40,7 +40,7 @@ namespace cds { namespace intrusive { namespace skip_list { ~node() { release_tower(); - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pNext.store( marked_ptr(), atomics::memory_order_relaxed ); } /// Constructs a node of height \p nHeight @@ -63,7 +63,7 @@ namespace cds { namespace intrusive { namespace skip_list { m_arrNext = nullptr; m_nHeight = 1; for ( unsigned int i = 0; i < nHeight; ++i ) - pTower[i].store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + pTower[i].store( marked_ptr(), atomics::memory_order_release ); } return pTower; } @@ -120,9 +120,9 @@ namespace cds { namespace intrusive { namespace skip_list { while ( true ) { marked_ptr pNextMarked( aGuards.protect( 0, next(i) )); node * pNext = pNextMarked.ptr(); - if ( pNext && pNext->m_bDeleted.load(CDS_ATOMIC::memory_order_acquire) ) { + if ( pNext && pNext->m_bDeleted.load(atomics::memory_order_acquire) ) { marked_ptr p = aGuards.protect( 1, pNext->next(i) ); - next(i).compare_exchange_strong( pNextMarked, p, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + next(i).compare_exchange_strong( pNextMarked, p, atomics::memory_order_acquire, atomics::memory_order_relaxed ); continue; } else { @@ -137,13 +137,13 @@ namespace cds { namespace intrusive { namespace skip_list { unsigned int const nHeight = height(); if ( bConcurrent ) { for (unsigned int i = 0; i < nHeight; ++i ) { - marked_ptr pNext = next(i).load(CDS_ATOMIC::memory_order_relaxed); - while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + marked_ptr pNext = next(i).load(atomics::memory_order_relaxed); + while ( !next(i).compare_exchange_weak( pNext, marked_ptr(), atomics::memory_order_release, atomics::memory_order_relaxed ) ); } } else { for (unsigned int i = 0; i < nHeight; ++i ) - next(i).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + next(i).store( marked_ptr(), atomics::memory_order_relaxed ); } } }; @@ -173,7 +173,7 @@ namespace cds { namespace intrusive { namespace skip_list { : m_pHead( new head_tower() ) { for ( size_t i = 0; i < sizeof(m_pHead->m_Tower) / sizeof(m_pHead->m_Tower[0]); ++i ) - m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + m_pHead->m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed ); m_pHead->make_tower( nHeight, m_pHead->m_Tower ); } diff --git a/cds/intrusive/skip_list_impl.h b/cds/intrusive/skip_list_impl.h index b19fb52f..b58ba19c 100644 --- a/cds/intrusive/skip_list_impl.h +++ b/cds/intrusive/skip_list_impl.h @@ -48,7 +48,7 @@ namespace cds { namespace intrusive { back_off bkoff; for (;;) { - if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) { // Current node is marked as deleted. So, its next pointer can point to anything // In this case we interrupt our iteration and returns end() iterator. *this = iterator(); @@ -62,7 +62,7 @@ namespace cds { namespace intrusive { bkoff(); continue; } - else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) { + else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; @@ -89,7 +89,7 @@ namespace cds { namespace intrusive { node_type * pp = p.ptr(); // Logically deleted node is marked from highest level - if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) { m_pNode = pp; break; } @@ -481,7 +481,7 @@ namespace cds { namespace intrusive { item_counter m_ItemCounter ; ///< item counter random_level_generator m_RandomLevelGen ; ///< random level generator instance - CDS_ATOMIC::atomic m_nHeight ; ///< estimated high level + atomics::atomic m_nHeight ; ///< estimated high level mutable stat m_Stat ; ///< internal statistics protected: @@ -550,7 +550,7 @@ namespace cds { namespace intrusive { // pCur is marked, i.e. logically deleted. marked_node_ptr p( pCur.ptr() ); if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( nLevel == 0 ) { gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); @@ -618,7 +618,7 @@ namespace cds { namespace intrusive { // pCur is marked, i.e. logically deleted. marked_node_ptr p( pCur.ptr() ); if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( nLevel == 0 ) gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); @@ -672,7 +672,7 @@ namespace cds { namespace intrusive { // pCur is marked, i.e. logically deleted. marked_node_ptr p( pCur.ptr() ); if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( nLevel == 0 ) gc::retire( node_traits::to_value_ptr( pCur.ptr() ), dispose_node ); @@ -708,7 +708,7 @@ namespace cds { namespace intrusive { { marked_node_ptr p( pos.pSucc[0] ); pNode->next( 0 ).store( p, memory_model::memory_order_release ); - if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { + if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { return false; } cds::unref( f )( val ); @@ -718,7 +718,7 @@ namespace cds { namespace intrusive { marked_node_ptr p; while ( true ) { marked_node_ptr q( pos.pSucc[ nLevel ]); - if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // pNode has been marked as removed while we are inserting it // Stop inserting assert( p.bits() ); @@ -726,7 +726,7 @@ namespace cds { namespace intrusive { return true; } p = q; - if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) + if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) break; // Renew insert position @@ -754,7 +754,7 @@ namespace cds { namespace intrusive { while ( true ) { pSucc = gSucc.protect( pDel->next(nLevel), gc_protect ); if ( pSucc.bits() || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { break; } @@ -765,7 +765,7 @@ namespace cds { namespace intrusive { pSucc = gSucc.protect( pDel->next(0), gc_protect ); marked_node_ptr p( pSucc.ptr() ); if ( pDel->next(0).compare_exchange_strong( p, marked_node_ptr(p.ptr(), 1), - memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { cds::unref(f)( *node_traits::to_value_ptr( pDel )); @@ -775,7 +775,7 @@ namespace cds { namespace intrusive { for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { pSucc = gSucc.protect( pDel->next(nLevel), gc_protect ); if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( p, marked_node_ptr(pSucc.ptr()), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ) + memory_model::memory_order_release, atomics::memory_order_relaxed) ) { // Make slow erase find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); @@ -1036,7 +1036,7 @@ namespace cds { namespace intrusive { { unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); if ( nCur < nHeight ) - m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); } //@endcond @@ -1055,7 +1055,7 @@ namespace cds { namespace intrusive { gc::check_available_guards( c_nHazardPtrCount ); // Barrier for head node - CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + atomics::atomic_thread_fence( memory_model::memory_order_release ); } /// Clears and destructs the skip-list diff --git a/cds/intrusive/skip_list_nogc.h b/cds/intrusive/skip_list_nogc.h index b5e6c97d..3062062b 100644 --- a/cds/intrusive/skip_list_nogc.h +++ b/cds/intrusive/skip_list_nogc.h @@ -22,7 +22,7 @@ namespace cds { namespace intrusive { typedef cds::gc::nogc gc ; ///< Garbage collector typedef Tag tag ; ///< tag - typedef CDS_ATOMIC::atomic atomic_ptr; + typedef atomics::atomic atomic_ptr; typedef atomic_ptr tower_item_type; protected: @@ -103,12 +103,12 @@ namespace cds { namespace intrusive { void clear() { assert( m_arrNext == nullptr ); - m_pNext.store( nullptr, CDS_ATOMIC::memory_order_release ); + m_pNext.store( nullptr, atomics::memory_order_release ); } bool is_cleared() const { - return m_pNext.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr + return m_pNext.load( atomics::memory_order_relaxed ) == nullptr && m_arrNext == nullptr && m_nHeight <= 1 ; @@ -137,7 +137,7 @@ namespace cds { namespace intrusive { public: // for internal use only!!! iterator( node_type& refHead ) - : m_pNode( refHead[0].load( CDS_ATOMIC::memory_order_relaxed ) ) + : m_pNode( refHead[0].load( atomics::memory_order_relaxed ) ) {} static iterator from_node( node_type * pNode ) @@ -176,7 +176,7 @@ namespace cds { namespace intrusive { iterator& operator ++() { if ( m_pNode ) - m_pNode = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed ); + m_pNode = m_pNode->next(0).load( atomics::memory_order_relaxed ); return *this; } @@ -443,7 +443,7 @@ namespace cds { namespace intrusive { head_node( unsigned int nHeight ) { for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) - m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + m_Tower[i].store( nullptr, atomics::memory_order_relaxed ); node_type::make_tower( nHeight, m_Tower ); } @@ -456,8 +456,8 @@ namespace cds { namespace intrusive { void clear() { for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) - m_Tower[i].store( nullptr, CDS_ATOMIC::memory_order_relaxed ); - node_type::m_pNext.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + m_Tower[i].store( nullptr, atomics::memory_order_relaxed ); + node_type::m_pNext.store( nullptr, atomics::memory_order_relaxed ); } }; //@endcond @@ -467,7 +467,7 @@ namespace cds { namespace intrusive { item_counter m_ItemCounter ; ///< item counter random_level_generator m_RandomLevelGen ; ///< random level generator instance - CDS_ATOMIC::atomic m_nHeight ; ///< estimated high level + atomics::atomic m_nHeight ; ///< estimated high level mutable stat m_Stat ; ///< internal statistics protected: @@ -601,7 +601,7 @@ namespace cds { namespace intrusive { void increase_height( unsigned int nHeight ) { unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); - while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ) ); + while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, atomics::memory_order_relaxed ) ); } //@endcond @@ -618,7 +618,7 @@ namespace cds { namespace intrusive { static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); // Barrier for head node - CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + atomics::atomic_thread_fence( memory_model::memory_order_release ); } /// Clears and destructs the skip-list diff --git a/cds/intrusive/skip_list_rcu.h b/cds/intrusive/skip_list_rcu.h index 3fb6778c..da58a7ea 100644 --- a/cds/intrusive/skip_list_rcu.h +++ b/cds/intrusive/skip_list_rcu.h @@ -29,7 +29,7 @@ namespace cds { namespace intrusive { // bit 0 - the item is logically deleted // bit 1 - the item is extracted (only for level 0) typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer - typedef CDS_ATOMIC::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer + typedef atomics::atomic< marked_ptr > atomic_marked_ptr ; ///< atomic marked pointer typedef atomic_marked_ptr tower_item_type; protected: @@ -92,7 +92,7 @@ namespace cds { namespace intrusive { void clear_tower() { for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel ) - next(nLevel).store( marked_ptr(), CDS_ATOMIC::memory_order_relaxed ); + next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed ); } /// Access to element of next pointer array @@ -135,7 +135,7 @@ namespace cds { namespace intrusive { void clear() { assert( m_arrNext == nullptr ); - m_pNext.store( marked_ptr(), CDS_ATOMIC::memory_order_release ); + m_pNext.store( marked_ptr(), atomics::memory_order_release ); m_pDelChain = nullptr; } @@ -180,21 +180,21 @@ namespace cds { namespace intrusive { back_off bkoff; for (;;) { - if ( m_pNode->next( m_pNode->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) { // Current node is marked as deleted. So, its next pointer can point to anything // In this case we interrupt our iteration and returns end() iterator. *this = iterator(); return; } - marked_ptr p = m_pNode->next(0).load( CDS_ATOMIC::memory_order_relaxed ); + marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed ); node_type * pp = p.ptr(); if ( p.bits() ) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; } - else if ( pp && pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_relaxed ).bits() ) { + else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits() ) { // p is marked as deleted. Spin waiting for physical removal bkoff(); continue; @@ -215,7 +215,7 @@ namespace cds { namespace intrusive { back_off bkoff; for (;;) { - marked_ptr p = refHead.next(0).load( CDS_ATOMIC::memory_order_relaxed ); + marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed ); if ( !p.ptr() ) { // empty skip-list break; @@ -223,7 +223,7 @@ namespace cds { namespace intrusive { node_type * pp = p.ptr(); // Logically deleted node is marked from highest level - if ( !pp->next( pp->height() - 1 ).load( CDS_ATOMIC::memory_order_acquire ).bits() ) { + if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits() ) { m_pNode = pp; break; } @@ -653,8 +653,8 @@ namespace cds { namespace intrusive { item_counter m_ItemCounter ; ///< item counter random_level_generator m_RandomLevelGen ; ///< random level generator instance - CDS_ATOMIC::atomic m_nHeight ; ///< estimated high level - CDS_ATOMIC::atomic m_pDeferredDelChain ; ///< Deferred deleted node chain + atomics::atomic m_nHeight ; ///< estimated high level + atomics::atomic m_pDeferredDelChain ; ///< Deferred deleted node chain mutable stat m_Stat ; ///< internal statistics protected: @@ -737,7 +737,7 @@ namespace cds { namespace intrusive { // pCur is marked, i.e. logically deleted. marked_node_ptr p( pCur.ptr() ); if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( nLevel == 0 ) { # ifdef _DEBUG @@ -811,7 +811,7 @@ namespace cds { namespace intrusive { // pCur is marked, i.e. logically deleted. marked_node_ptr p( pCur.ptr() ); if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( nLevel == 0 ) { # ifdef _DEBUG @@ -875,7 +875,7 @@ retry: // pCur is marked, i.e. logically deleted. marked_node_ptr p( pCur.ptr() ); if ( pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( nLevel == 0 ) { # ifdef _DEBUG @@ -922,7 +922,7 @@ retry: { marked_node_ptr p( pos.pSucc[0] ); pNode->next( 0 ).store( p, memory_model::memory_order_release ); - if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed )) { return false; } # ifdef _DEBUG @@ -935,7 +935,7 @@ retry: marked_node_ptr p; while ( true ) { marked_node_ptr q( pos.pSucc[ nLevel ]); - if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { + if ( !pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // pNode has been marked as removed while we are inserting it // Stop inserting assert( p.bits() ); @@ -943,7 +943,7 @@ retry: return true; } p = q; - if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) + if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ) ) break; // Renew insert position @@ -979,7 +979,7 @@ retry: pSucc = pDel->next(nLevel).load( memory_model::memory_order_relaxed ); while ( true ) { if ( pSucc.bits() - || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + || pDel->next(nLevel).compare_exchange_weak( pSucc, pSucc | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { break; } @@ -992,7 +992,7 @@ retry: return false; int const nMask = bExtract ? 3 : 1; - if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) + if ( pDel->next(0).compare_exchange_strong( pSucc, pSucc | nMask, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { cds::unref(f)( *node_traits::to_value_ptr( pDel )); @@ -1002,7 +1002,7 @@ retry: for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { if ( !pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( pSucc, marked_node_ptr( pDel->next(nLevel).load(memory_model::memory_order_relaxed).ptr() ), - memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ) + memory_model::memory_order_release, atomics::memory_order_relaxed) ) { // Do slow erase find_position( *node_traits::to_value_ptr(pDel), pos, key_comparator(), false ); @@ -1366,7 +1366,7 @@ retry: { unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); if ( nCur < nHeight ) - m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); } class deferred_list_iterator @@ -1439,7 +1439,7 @@ retry: node_type * pDeferList = m_pDeferredDelChain.load( memory_model::memory_order_relaxed ); do { pTail->m_pDelChain = pDeferList; - } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pDeferredDelChain.compare_exchange_weak( pDeferList, pHead, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )); pos.pDelChain = nullptr; } @@ -1457,7 +1457,7 @@ retry: static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); // Barrier for head node - CDS_ATOMIC::atomic_thread_fence( memory_model::memory_order_release ); + atomics::atomic_thread_fence( memory_model::memory_order_release ); } /// Clears and destructs the skip-list diff --git a/cds/intrusive/split_list.h b/cds/intrusive/split_list.h index 98143326..790b5ecb 100644 --- a/cds/intrusive/split_list.h +++ b/cds/intrusive/split_list.h @@ -346,7 +346,7 @@ namespace cds { namespace intrusive { protected: ordered_list_wrapper m_List ; ///< Ordered list containing split-list items bucket_table m_Buckets ; ///< bucket table - CDS_ATOMIC::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) + atomics::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) item_counter m_ItemCounter ; ///< Item counter hash m_HashFunctor ; ///< Hash functor @@ -371,7 +371,7 @@ namespace cds { namespace intrusive { size_t bucket_no( size_t nHash ) const { - return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 ); + return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 ); } static size_t parent_bucket( size_t nBucket ) @@ -449,10 +449,10 @@ namespace cds { namespace intrusive { void inc_item_count() { - size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed); + size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed); if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) { - m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ); } } diff --git a/cds/intrusive/split_list_base.h b/cds/intrusive/split_list_base.h index a7bc10ab..7f366ddc 100644 --- a/cds/intrusive/split_list_base.h +++ b/cds/intrusive/split_list_base.h @@ -172,7 +172,7 @@ namespace cds { namespace intrusive { public: typedef GC gc ; ///< Garbage collector typedef Node node_type ; ///< Bucket node type - typedef CDS_ATOMIC::atomic table_entry ; ///< Table entry type + typedef atomics::atomic table_entry ; ///< Table entry type /// Bucket table allocator typedef cds::details::Allocator< table_entry, typename options::allocator > bucket_table_allocator; @@ -283,13 +283,13 @@ namespace cds { namespace intrusive { public: typedef GC gc ; ///< Garbage collector typedef Node node_type ; ///< Bucket node type - typedef CDS_ATOMIC::atomic table_entry ; ///< Table entry type + typedef atomics::atomic table_entry ; ///< Table entry type /// Memory model for atomic operations typedef typename options::memory_model memory_model; protected: - typedef CDS_ATOMIC::atomic segment_type ; ///< Bucket table segment type + typedef atomics::atomic segment_type ; ///< Bucket table segment type public: /// Bucket table allocator @@ -442,7 +442,7 @@ namespace cds { namespace intrusive { if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) { table_entry * pNewSegment = allocate_segment(); table_entry * pNull = nullptr; - if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) { destroy_segment( pNewSegment ); } } diff --git a/cds/intrusive/split_list_nogc.h b/cds/intrusive/split_list_nogc.h index 8d664096..f5f585dd 100644 --- a/cds/intrusive/split_list_nogc.h +++ b/cds/intrusive/split_list_nogc.h @@ -140,7 +140,7 @@ namespace cds { namespace intrusive { protected: ordered_list_wrapper m_List ; ///< Ordered list containing split-list items bucket_table m_Buckets ; ///< bucket table - CDS_ATOMIC::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) + atomics::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) item_counter m_ItemCounter ; ///< Item counter hash m_HashFunctor ; ///< Hash functor @@ -165,7 +165,7 @@ namespace cds { namespace intrusive { size_t bucket_no( size_t nHash ) const { - return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 ); + return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 ); } static size_t parent_bucket( size_t nBucket ) @@ -243,10 +243,10 @@ namespace cds { namespace intrusive { void inc_item_count() { - size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed); + size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed); if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) { - m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ); } } diff --git a/cds/intrusive/split_list_rcu.h b/cds/intrusive/split_list_rcu.h index 92be12f7..4eeb2b0c 100644 --- a/cds/intrusive/split_list_rcu.h +++ b/cds/intrusive/split_list_rcu.h @@ -235,7 +235,7 @@ namespace cds { namespace intrusive { protected: ordered_list_wrapper m_List ; ///< Ordered list containing split-list items bucket_table m_Buckets ; ///< bucket table - CDS_ATOMIC::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) + atomics::atomic m_nBucketCountLog2 ; ///< log2( current bucket count ) item_counter m_ItemCounter ; ///< Item counter hash m_HashFunctor ; ///< Hash functor @@ -260,7 +260,7 @@ namespace cds { namespace intrusive { size_t bucket_no( size_t nHash ) const { - return nHash & ( (1 << m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed)) - 1 ); + return nHash & ( (1 << m_nBucketCountLog2.load(atomics::memory_order_relaxed)) - 1 ); } static size_t parent_bucket( size_t nBucket ) @@ -338,10 +338,10 @@ namespace cds { namespace intrusive { void inc_item_count() { - size_t sz = m_nBucketCountLog2.load(CDS_ATOMIC::memory_order_relaxed); + size_t sz = m_nBucketCountLog2.load(atomics::memory_order_relaxed); if ( ( ++m_ItemCounter >> sz ) > m_Buckets.load_factor() && ((size_t)(1 << sz )) < m_Buckets.capacity() ) { - m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ); } } diff --git a/cds/intrusive/striped_set/striping_policy.h b/cds/intrusive/striped_set/striping_policy.h index 1e3ec4c3..f3798601 100644 --- a/cds/intrusive/striped_set/striping_policy.h +++ b/cds/intrusive/striped_set/striping_policy.h @@ -153,8 +153,8 @@ namespace cds { namespace intrusive { namespace striped_set { static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor. - CDS_ATOMIC::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) - CDS_ATOMIC::atomic m_nCapacity ; ///< Lock array capacity + atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) + atomics::atomic m_nCapacity ; ///< Lock array capacity spinlock_type m_access ; ///< access to m_arrLocks //@endcond @@ -169,7 +169,7 @@ namespace cds { namespace intrusive { namespace striped_set { lock_array_ptr create_lock_array( size_t nCapacity ) { - m_nCapacity.store( nCapacity, CDS_ATOMIC::memory_order_relaxed ); + m_nCapacity.store( nCapacity, atomics::memory_order_relaxed ); return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer() ); } @@ -182,7 +182,7 @@ namespace cds { namespace intrusive { namespace striped_set { while ( true ) { // wait while resizing while ( true ) { - who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + who = m_Owner.load( atomics::memory_order_acquire ); if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) ) break; bkoff(); @@ -197,7 +197,7 @@ namespace cds { namespace intrusive { namespace striped_set { lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1)); lock.lock(); - who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + who = m_Owner.load( atomics::memory_order_acquire ); if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks ) return lock; lock.unlock(); @@ -213,7 +213,7 @@ namespace cds { namespace intrusive { namespace striped_set { while ( true ) { // wait while resizing while ( true ) { - who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + who = m_Owner.load( atomics::memory_order_acquire ); if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask) ) break; bkoff(); @@ -227,7 +227,7 @@ namespace cds { namespace intrusive { namespace striped_set { pLocks->lock_all(); - who = m_Owner.load( CDS_ATOMIC::memory_order_acquire ); + who = m_Owner.load( atomics::memory_order_acquire ); if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask) ) && m_arrLocks == pLocks ) return pLocks; @@ -247,7 +247,7 @@ namespace cds { namespace intrusive { namespace striped_set { back_off bkoff; for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) { owner_t ownNull = 0; - if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) { lock_array_ptr pOldLocks = m_arrLocks; size_t const nLockCount = pOldLocks->size(); for ( size_t i = 0; i < nLockCount; ++i ) { @@ -267,7 +267,7 @@ namespace cds { namespace intrusive { namespace striped_set { void release_resize() { - m_Owner.store( 0, CDS_ATOMIC::memory_order_release ); + m_Owner.store( 0, atomics::memory_order_release ); } //@endcond public: @@ -338,7 +338,7 @@ namespace cds { namespace intrusive { namespace striped_set { */ size_t lock_count() const { - return m_nCapacity.load( CDS_ATOMIC::memory_order_relaxed ); + return m_nCapacity.load( atomics::memory_order_relaxed ); } /// Resize for new capacity diff --git a/cds/intrusive/treiber_stack.h b/cds/intrusive/treiber_stack.h index dfb549d5..9a580f04 100644 --- a/cds/intrusive/treiber_stack.h +++ b/cds/intrusive/treiber_stack.h @@ -32,7 +32,7 @@ namespace cds { namespace intrusive { { operation_id idOp; ///< Op id T * pVal; ///< for push: pointer to argument; for pop: accepts a return value - CDS_ATOMIC::atomic nStatus; ///< Internal elimination status + atomics::atomic nStatus; ///< Internal elimination status operation() : pVal( nullptr ) @@ -165,7 +165,7 @@ namespace cds { namespace intrusive { struct bkoff_predicate { operation_desc * pOp; bkoff_predicate( operation_desc * p ): pOp(p) {} - bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } + bool operator()() { return pOp->nStatus.load( atomics::memory_order_acquire ) != op_busy; } }; # endif @@ -212,7 +212,7 @@ namespace cds { namespace intrusive { bool backoff( operation_desc& op, Stat& stat ) { elimination_backoff_type bkoff; - op.nStatus.store( op_busy, CDS_ATOMIC::memory_order_relaxed ); + op.nStatus.store( op_busy, atomics::memory_order_relaxed ); elimination_rec * myRec = cds::algo::elimination::init_record( op ); @@ -231,12 +231,12 @@ namespace cds { namespace intrusive { slot.pRec = nullptr; slot.lock.unlock(); - himOp->nStatus.store( op_collided, CDS_ATOMIC::memory_order_release ); + himOp->nStatus.store( op_collided, atomics::memory_order_release ); cds::algo::elimination::clear_record(); stat.onActiveCollision( op.idOp ); return true; } - himOp->nStatus.store( op_free, CDS_ATOMIC::memory_order_release ); + himOp->nStatus.store( op_free, atomics::memory_order_release ); } slot.pRec = myRec; slot.lock.unlock(); @@ -245,13 +245,13 @@ namespace cds { namespace intrusive { // Wait for colliding operation # if defined(CDS_CXX11_LAMBDA_SUPPORT) && !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION == CDS_COMPILER_MSVC10) // MSVC++ 2010 compiler error C2065: 'op_busy' : undeclared identifier - bkoff( [&op]() -> bool { return op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } ); + bkoff( [&op]() -> bool { return op.nStatus.load( atomics::memory_order_acquire ) != op_busy; } ); # else // Local structs is not supported by old compilers (for example, GCC 4.3) //struct bkoff_predicate { // operation_desc * pOp; // bkoff_predicate( operation_desc * p ): pOp(p) {} - // bool operator()() { return pOp->nStatus.load( CDS_ATOMIC::memory_order_acquire ) != op_busy; } + // bool operator()() { return pOp->nStatus.load( atomics::memory_order_acquire ) != op_busy; } //}; bkoff( bkoff_predicate(&op) ); # endif @@ -262,7 +262,7 @@ namespace cds { namespace intrusive { slot.pRec = nullptr; } - bool bCollided = op.nStatus.load( CDS_ATOMIC::memory_order_acquire ) == op_collided; + bool bCollided = op.nStatus.load( atomics::memory_order_acquire ) == op_collided; if ( !bCollided ) stat.onEliminationFailed(); @@ -584,7 +584,7 @@ namespace cds { namespace intrusive { node_type * t = m_Top.load(memory_model::memory_order_relaxed); while ( true ) { pNew->m_pNext.store( t, memory_model::memory_order_relaxed ); - if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { // #1 sync-with #2 + if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) { // #1 sync-with #2 ++m_ItemCounter; m_stat.onPush(); return true; @@ -618,7 +618,7 @@ namespace cds { namespace intrusive { return nullptr; // stack is empty node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed); - if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed )) { // #2 + if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { // #2 clear_links( t ); --m_ItemCounter; m_stat.onPop(); @@ -656,7 +656,7 @@ namespace cds { namespace intrusive { pTop = m_Top.load( memory_model::memory_order_relaxed ); if ( pTop == nullptr ) return; - if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) ) { // sync-with #1 and #2 + if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acq_rel, atomics::memory_order_relaxed ) ) { // sync-with #1 and #2 m_ItemCounter.reset(); break; } diff --git a/cds/intrusive/tsigas_cycle_queue.h b/cds/intrusive/tsigas_cycle_queue.h index 01822729..b734d7f6 100644 --- a/cds/intrusive/tsigas_cycle_queue.h +++ b/cds/intrusive/tsigas_cycle_queue.h @@ -98,10 +98,10 @@ namespace cds { namespace intrusive { protected: //@cond - typedef typename options::buffer::template rebind< CDS_ATOMIC::atomic >::other buffer; + typedef typename options::buffer::template rebind< atomics::atomic >::other buffer; typedef typename opt::details::alignment_setter< buffer, options::alignment >::type aligned_buffer; typedef size_t index_type; - typedef typename opt::details::alignment_setter< CDS_ATOMIC::atomic, options::alignment >::type aligned_index; + typedef typename opt::details::alignment_setter< atomics::atomic, options::alignment >::type aligned_index; //@endcond protected: @@ -216,7 +216,7 @@ namespace cds { namespace intrusive { } // help the dequeue to update head - m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nHead.compare_exchange_strong( temp, ate, memory_model::memory_order_release, atomics::memory_order_relaxed ); continue; } @@ -226,9 +226,9 @@ namespace cds { namespace intrusive { continue; // get actual tail and try to enqueue new node - if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { + if ( m_buffer[ate].compare_exchange_strong( tt, pNewNode, memory_model::memory_order_release, atomics::memory_order_relaxed ) ) { if ( temp % 2 == 0 ) - m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nTail.compare_exchange_strong( te, temp, memory_model::memory_order_release, atomics::memory_order_relaxed ); ++m_ItemCounter; return true; } @@ -275,7 +275,7 @@ namespace cds { namespace intrusive { // check whether the queue is empty if ( temp == m_nTail.load(memory_model::memory_order_acquire) ) { // help the enqueue to update end - m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nTail.compare_exchange_strong( temp, (temp + 1) & nModulo, memory_model::memory_order_release, atomics::memory_order_relaxed ); continue; } @@ -285,9 +285,9 @@ namespace cds { namespace intrusive { continue; // Get the actual head, null means empty - if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( m_buffer[temp].compare_exchange_strong( tt, pNull, memory_model::memory_order_release, atomics::memory_order_relaxed )) { if ( temp % 2 == 0 ) - m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nHead.compare_exchange_strong( th, temp, memory_model::memory_order_release, atomics::memory_order_relaxed ); --m_ItemCounter; return reinterpret_cast(reinterpret_cast( tt ) & ~intptr_t(1)); } diff --git a/cds/lock/spinlock.h b/cds/lock/spinlock.h index daf9e9e8..0e5f457f 100644 --- a/cds/lock/spinlock.h +++ b/cds/lock/spinlock.h @@ -52,7 +52,7 @@ namespace cds { public: typedef Backoff backoff_strategy ; ///< back-off strategy type private: - CDS_ATOMIC::atomic m_spin ; ///< Spin + atomics::atomic m_spin ; ///< Spin # ifdef CDS_DEBUG typename OS::ThreadId m_dbgOwnerId ; ///< Owner thread id (only for debug mode) # endif @@ -64,7 +64,7 @@ namespace cds { :m_dbgOwnerId( OS::c_NullThreadId ) # endif { - m_spin.store( false, CDS_ATOMIC::memory_order_relaxed ); + m_spin.store( false, atomics::memory_order_relaxed ); } /// Construct spin-lock in specified state @@ -76,7 +76,7 @@ namespace cds { :m_dbgOwnerId( bLocked ? OS::getCurrentThreadId() : OS::c_NullThreadId ) # endif { - m_spin.store( bLocked, CDS_ATOMIC::memory_order_relaxed ); + m_spin.store( bLocked, atomics::memory_order_relaxed ); } /// Dummy copy constructor @@ -95,13 +95,13 @@ namespace cds { /// Destructor. On debug time it checks whether spin-lock is free ~Spinlock() { - assert( !m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ); + assert( !m_spin.load( atomics::memory_order_relaxed ) ); } /// Check if the spin is locked bool is_locked() const CDS_NOEXCEPT { - return m_spin.load( CDS_ATOMIC::memory_order_relaxed ); + return m_spin.load( atomics::memory_order_relaxed ); } /// Try to lock the object @@ -120,7 +120,7 @@ namespace cds { bool tryLock() CDS_NOEXCEPT { bool bCurrent = false; - m_spin.compare_exchange_strong( bCurrent, true, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + m_spin.compare_exchange_strong( bCurrent, true, atomics::memory_order_acquire, atomics::memory_order_relaxed ); CDS_DEBUG_DO( if ( !bCurrent ) { @@ -162,7 +162,7 @@ namespace cds { // TATAS algorithm while ( !tryLock() ) { - while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) { + while ( m_spin.load( atomics::memory_order_relaxed ) ) { backoff(); } } @@ -172,12 +172,12 @@ namespace cds { /// Unlock the spin-lock. Debug version: deadlock may be detected void unlock() CDS_NOEXCEPT { - assert( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ); + assert( m_spin.load( atomics::memory_order_relaxed ) ); assert( m_dbgOwnerId == OS::getCurrentThreadId() ); CDS_DEBUG_DO( m_dbgOwnerId = OS::c_NullThreadId; ) - m_spin.store( false, CDS_ATOMIC::memory_order_release ); + m_spin.store( false, atomics::memory_order_release ); } }; @@ -202,7 +202,7 @@ namespace cds { typedef Backoff backoff_strategy ; ///< The backoff type private: - CDS_ATOMIC::atomic m_spin ; ///< spin-lock atomic + atomics::atomic m_spin ; ///< spin-lock atomic thread_id m_OwnerId ; ///< Owner thread id. If spin-lock is not locked it usually equals to OS::c_NullThreadId private: @@ -225,7 +225,7 @@ namespace cds { bool tryLockOwned( thread_id tid ) CDS_NOEXCEPT { if ( isOwned( tid )) { - m_spin.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + m_spin.fetch_add( 1, atomics::memory_order_relaxed ); return true; } return false; @@ -234,7 +234,7 @@ namespace cds { bool tryAcquireLock() CDS_NOEXCEPT { integral_type nCurrent = 0; - return m_spin.compare_exchange_weak( nCurrent, 1, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed ); + return m_spin.compare_exchange_weak( nCurrent, 1, atomics::memory_order_acquire, atomics::memory_order_relaxed ); } bool tryAcquireLock( unsigned int nTryCount ) CDS_NOEXCEPT_( noexcept( backoff_strategy()() )) @@ -254,7 +254,7 @@ namespace cds { // TATAS algorithm backoff_strategy bkoff; while ( !tryAcquireLock() ) { - while ( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) ) + while ( m_spin.load( atomics::memory_order_relaxed ) ) bkoff(); } } @@ -294,7 +294,7 @@ namespace cds { */ bool is_locked() const CDS_NOEXCEPT { - return !( m_spin.load( CDS_ATOMIC::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() )); + return !( m_spin.load( atomics::memory_order_relaxed ) == 0 || isOwned( cds::OS::getCurrentThreadId() )); } /// Try to lock the spin-lock (synonym for \ref try_lock) @@ -364,12 +364,12 @@ namespace cds { bool unlock() CDS_NOEXCEPT { if ( isOwned( OS::getCurrentThreadId() ) ) { - integral_type n = m_spin.load( CDS_ATOMIC::memory_order_relaxed ); + integral_type n = m_spin.load( atomics::memory_order_relaxed ); if ( n > 1 ) - m_spin.store( n - 1, CDS_ATOMIC::memory_order_relaxed ); + m_spin.store( n - 1, atomics::memory_order_relaxed ); else { free(); - m_spin.store( 0, CDS_ATOMIC::memory_order_release ); + m_spin.store( 0, atomics::memory_order_release ); } return true; } diff --git a/cds/memory/michael/allocator.h b/cds/memory/michael/allocator.h index d6ce92d7..cbc9f4a7 100644 --- a/cds/memory/michael/allocator.h +++ b/cds/memory/michael/allocator.h @@ -786,7 +786,7 @@ namespace michael { : public options::free_list::item_hook , public options::partial_list::item_hook { - CDS_ATOMIC::atomic anchor ; ///< anchor, see \ref anchor_tag + atomics::atomic anchor ; ///< anchor, see \ref anchor_tag byte * pSB ; ///< ptr to superblock processor_heap_base * pProcHeap ; ///< pointer to owner processor heap unsigned int nBlockSize ; ///< block size in bytes @@ -1099,10 +1099,10 @@ namespace michael { /// Processor heap struct processor_heap_base { - CDS_DATA_ALIGNMENT(8) CDS_ATOMIC::atomic active; ///< pointer to the descriptor of active superblock owned by processor heap + CDS_DATA_ALIGNMENT(8) atomics::atomic active; ///< pointer to the descriptor of active superblock owned by processor heap processor_desc * pProcDesc ; ///< pointer to parent processor descriptor const size_class * pSizeClass ; ///< pointer to size class - CDS_ATOMIC::atomic pPartial ; ///< pointer to partial filled superblock (may be \p nullptr) + atomics::atomic pPartial ; ///< pointer to partial filled superblock (may be \p nullptr) partial_list partialList ; ///< list of partial filled superblocks owned by the processor heap unsigned int nPageIdx ; ///< page size-class index, \ref c_nPageSelfAllocation - "small page" @@ -1130,13 +1130,13 @@ namespace michael { /// Get partial superblock owned by the processor heap superblock_desc * get_partial() { - superblock_desc * pDesc = pPartial.load(CDS_ATOMIC::memory_order_acquire); + superblock_desc * pDesc = pPartial.load(atomics::memory_order_acquire); do { if ( !pDesc ) { pDesc = partialList.pop(); break; } - } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } while ( !pPartial.compare_exchange_weak( pDesc, nullptr, atomics::memory_order_release, atomics::memory_order_relaxed ) ); //assert( pDesc == nullptr || free_desc_list::node_algorithms::inited( static_cast(pDesc) )); //assert( pDesc == nullptr || partial_desc_list::node_algorithms::inited( static_cast(pDesc) ) ); @@ -1150,7 +1150,7 @@ namespace michael { //assert( partial_desc_list::node_algorithms::inited( static_cast(pDesc) ) ); superblock_desc * pCur = nullptr; - if ( !pPartial.compare_exchange_strong(pCur, pDesc, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed) ) + if ( !pPartial.compare_exchange_strong(pCur, pDesc, atomics::memory_order_acq_rel, atomics::memory_order_relaxed) ) partialList.push( pDesc ); } @@ -1186,7 +1186,7 @@ namespace michael { system_heap m_LargeHeap ; ///< Heap for large block aligned_heap m_AlignedHeap ; ///< Internal aligned heap sizeclass_selector m_SizeClassSelector ; ///< Size-class selector - CDS_ATOMIC::atomic * m_arrProcDesc ; ///< array of pointers to the processor descriptors + atomics::atomic * m_arrProcDesc ; ///< array of pointers to the processor descriptors unsigned int m_nProcessorCount ; ///< Processor count bound_checker m_BoundChecker ; ///< Bound checker @@ -1213,7 +1213,7 @@ namespace michael { // Reserve block while ( true ) { ++nCollision; - oldActive = pProcHeap->active.load(CDS_ATOMIC::memory_order_acquire); + oldActive = pProcHeap->active.load(atomics::memory_order_acquire); if ( !oldActive.ptr() ) return nullptr; unsigned int nCredits = oldActive.credits(); @@ -1222,7 +1222,7 @@ namespace michael { newActive = oldActive; newActive.credits( nCredits - 1 ); } - if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( pProcHeap->active.compare_exchange_strong( oldActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed )) break; } @@ -1240,7 +1240,7 @@ namespace michael { nCollision = -1; do { ++nCollision; - newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); assert( oldAnchor.avail < pDesc->nCapacity ); pAddr = pDesc->pSB + oldAnchor.avail * (unsigned long long) pDesc->nBlockSize; @@ -1256,7 +1256,7 @@ namespace michael { newAnchor.count -= nMoreCredits; } } - } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed )); if ( nCollision ) pProcHeap->stat.incActiveAnchorCASFailureCount( nCollision ); @@ -1297,7 +1297,7 @@ namespace michael { do { ++nCollision; - newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); if ( oldAnchor.state == SBSTATE_EMPTY ) { free_superblock( pDesc ); goto retry; @@ -1307,7 +1307,7 @@ namespace michael { newAnchor.count -= nMoreCredits + 1; newAnchor.state = (nMoreCredits > 0) ? SBSTATE_ACTIVE : SBSTATE_FULL; newAnchor.tag += 1; - } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ); + } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) ); if ( nCollision ) pProcHeap->stat.incPartialDescCASFailureCount( nCollision ); @@ -1322,13 +1322,13 @@ namespace michael { do { ++nCollision; - newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); assert( oldAnchor.avail < pDesc->nCapacity ); pAddr = pDesc->pSB + oldAnchor.avail * pDesc->nBlockSize; newAnchor.avail = reinterpret_cast( pAddr )->nNextFree; ++newAnchor.tag; - } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed) ); + } while ( !pDesc->anchor.compare_exchange_strong(oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed) ); if ( nCollision ) pProcHeap->stat.incPartialAnchorCASFailureCount( nCollision ); @@ -1356,7 +1356,7 @@ namespace michael { assert( pDesc != nullptr ); pDesc->pSB = new_superblock_buffer( pProcHeap ); - anchor_tag anchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_relaxed); + anchor_tag anchor = pDesc->anchor.load(atomics::memory_order_relaxed); anchor.tag += 1; // Make single-linked list of free blocks in superblock @@ -1374,10 +1374,10 @@ namespace michael { anchor.count = pDesc->nCapacity - 1 - (newActive.credits() + 1); anchor.state = SBSTATE_ACTIVE; - pDesc->anchor.store(anchor, CDS_ATOMIC::memory_order_relaxed); + pDesc->anchor.store(anchor, atomics::memory_order_relaxed); active_tag curActive; - if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) { + if ( pProcHeap->active.compare_exchange_strong( curActive, newActive, atomics::memory_order_release, atomics::memory_order_relaxed )) { pProcHeap->stat.incAllocFromNew(); //reinterpret_cast( pDesc->pSB )->set( pDesc, 0 ); return reinterpret_cast( pDesc->pSB ); @@ -1398,11 +1398,11 @@ namespace michael { if ( nProcessorId >= m_nProcessorCount ) nProcessorId = 0; - processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( CDS_ATOMIC::memory_order_relaxed ); + processor_desc * pDesc = m_arrProcDesc[ nProcessorId ].load( atomics::memory_order_relaxed ); while ( !pDesc ) { processor_desc * pNewDesc = new_processor_desc( nProcessorId ); - if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ) { + if ( m_arrProcDesc[nProcessorId].compare_exchange_strong( pDesc, pNewDesc, atomics::memory_order_release, atomics::memory_order_relaxed ) ) { pDesc = pNewDesc; break; } @@ -1421,7 +1421,7 @@ namespace michael { active_tag newActive; newActive.set( pDesc, nCredits - 1 ); - if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) ) + if ( pProcHeap->active.compare_exchange_strong( nullActive, newActive, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) ) return; // Someone installed another active superblock. @@ -1431,10 +1431,10 @@ namespace michael { anchor_tag newAnchor; do { - newAnchor = oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + newAnchor = oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); newAnchor.count += nCredits; newAnchor.state = SBSTATE_PARTIAL; - } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !pDesc->anchor.compare_exchange_weak( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed )); pDesc->pProcHeap->add_partial( pDesc ); } @@ -1509,13 +1509,13 @@ namespace michael { m_AlignedHeap.free( pDesc ); } - superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed); + superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed); if ( pPartial ) { free( pPartial->pSB ); m_AlignedHeap.free( pPartial ); } - pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr(); + pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr(); if ( pDesc ) { free( pDesc->pSB ); m_AlignedHeap.free( pDesc ); @@ -1530,13 +1530,13 @@ namespace michael { m_AlignedHeap.free( pDesc ); } - superblock_desc * pPartial = pProcHeap->pPartial.load(CDS_ATOMIC::memory_order_relaxed); + superblock_desc * pPartial = pProcHeap->pPartial.load(atomics::memory_order_relaxed); if ( pPartial ) { pageHeap.free( pPartial->pSB ); m_AlignedHeap.free( pPartial ); } - pDesc = pProcHeap->active.load(CDS_ATOMIC::memory_order_relaxed).ptr(); + pDesc = pProcHeap->active.load(atomics::memory_order_relaxed).ptr(); if ( pDesc ) { pageHeap.free( pDesc->pSB ); m_AlignedHeap.free( pDesc ); @@ -1575,9 +1575,9 @@ namespace michael { pDesc = new( m_AlignedHeap.alloc(sizeof(superblock_desc), c_nAlignment ) ) superblock_desc; assert( (uptr_atomic_t(pDesc) & (c_nAlignment - 1)) == 0 ); - anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed ); + anchor = pDesc->anchor.load( atomics::memory_order_relaxed ); anchor.tag = 0; - pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed ); + pDesc->anchor.store( anchor, atomics::memory_order_relaxed ); pProcHeap->stat.incDescAllocCount(); } @@ -1586,9 +1586,9 @@ namespace michael { assert( pDesc->nCapacity <= c_nMaxBlockInSuperBlock ); pDesc->pProcHeap = pProcHeap; - anchor = pDesc->anchor.load( CDS_ATOMIC::memory_order_relaxed ); + anchor = pDesc->anchor.load( atomics::memory_order_relaxed ); anchor.avail = 1; - pDesc->anchor.store( anchor, CDS_ATOMIC::memory_order_relaxed ); + pDesc->anchor.store( anchor, atomics::memory_order_relaxed ); return pDesc; } @@ -1663,7 +1663,7 @@ namespace michael { m_nProcessorCount = m_Topology.processor_count(); m_arrProcDesc = new( m_AlignedHeap.alloc(sizeof(processor_desc *) * m_nProcessorCount, c_nAlignment )) - CDS_ATOMIC::atomic[ m_nProcessorCount ]; + atomics::atomic[ m_nProcessorCount ]; memset( m_arrProcDesc, 0, sizeof(processor_desc *) * m_nProcessorCount ) ; // ?? memset for atomic<> } @@ -1674,7 +1674,7 @@ namespace michael { ~Heap() { for ( unsigned int i = 0; i < m_nProcessorCount; ++i ) { - processor_desc * pDesc = m_arrProcDesc[i].load(CDS_ATOMIC::memory_order_relaxed); + processor_desc * pDesc = m_arrProcDesc[i].load(atomics::memory_order_relaxed); if ( pDesc ) free_processor_desc( pDesc ); } @@ -1739,7 +1739,7 @@ namespace michael { pProcHeap->stat.incDeallocatedBytes( pDesc->nBlockSize ); - oldAnchor = pDesc->anchor.load(CDS_ATOMIC::memory_order_acquire); + oldAnchor = pDesc->anchor.load(atomics::memory_order_acquire); do { newAnchor = oldAnchor; reinterpret_cast( pBlock )->nNextFree = oldAnchor.avail; @@ -1758,7 +1758,7 @@ namespace michael { } else newAnchor.count += 1; - } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ) ); + } while ( !pDesc->anchor.compare_exchange_strong( oldAnchor, newAnchor, atomics::memory_order_release, atomics::memory_order_relaxed ) ); pProcHeap->stat.incFreeCount(); @@ -1897,7 +1897,7 @@ namespace michael { { size_t nProcHeapCount = m_SizeClassSelector.size(); for ( unsigned int nProcessor = 0; nProcessor < m_nProcessorCount; ++nProcessor ) { - processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(CDS_ATOMIC::memory_order_relaxed); + processor_desc * pProcDesc = m_arrProcDesc[nProcessor].load(atomics::memory_order_relaxed); if ( pProcDesc ) { for ( unsigned int i = 0; i < nProcHeapCount; ++i ) { processor_heap_base * pProcHeap = pProcDesc->arrProcHeap + i; diff --git a/cds/memory/michael/osalloc_stat.h b/cds/memory/michael/osalloc_stat.h index 4d04dfb0..52d861ec 100644 --- a/cds/memory/michael/osalloc_stat.h +++ b/cds/memory/michael/osalloc_stat.h @@ -11,10 +11,10 @@ namespace cds { namespace memory { namespace michael { struct os_allocated_atomic { ///@cond - CDS_ATOMIC::atomic nAllocCount ; ///< Event count of large block allocation from %OS - CDS_ATOMIC::atomic nFreeCount ; ///< Event count of large block deallocation to %OS - CDS_ATOMIC::atomic nBytesAllocated ; ///< Total size of allocated large blocks, in bytes - CDS_ATOMIC::atomic nBytesDeallocated ; ///< Total size of deallocated large blocks, in bytes + atomics::atomic nAllocCount ; ///< Event count of large block allocation from %OS + atomics::atomic nFreeCount ; ///< Event count of large block deallocation to %OS + atomics::atomic nBytesAllocated ; ///< Total size of allocated large blocks, in bytes + atomics::atomic nBytesDeallocated ; ///< Total size of deallocated large blocks, in bytes os_allocated_atomic() : nAllocCount(0) @@ -27,39 +27,39 @@ namespace cds { namespace memory { namespace michael { /// Adds \p nSize to nBytesAllocated counter void incBytesAllocated( size_t nSize ) { - nAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed); - nBytesAllocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed ); + nAllocCount.fetch_add( 1, atomics::memory_order_relaxed); + nBytesAllocated.fetch_add( nSize, atomics::memory_order_relaxed ); } /// Adds \p nSize to nBytesDeallocated counter void incBytesDeallocated( size_t nSize ) { - nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); - nBytesDeallocated.fetch_add( nSize, CDS_ATOMIC::memory_order_relaxed ); + nFreeCount.fetch_add( 1, atomics::memory_order_relaxed ); + nBytesDeallocated.fetch_add( nSize, atomics::memory_order_relaxed ); } /// Returns count of \p alloc and \p alloc_aligned function call (for large block allocated directly from %OS) size_t allocCount() const { - return nAllocCount.load(CDS_ATOMIC::memory_order_relaxed); + return nAllocCount.load(atomics::memory_order_relaxed); } /// Returns count of \p free and \p free_aligned function call (for large block allocated directly from %OS) size_t freeCount() const { - return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed); + return nFreeCount.load(atomics::memory_order_relaxed); } /// Returns current value of nBytesAllocated counter atomic64u_t allocatedBytes() const { - return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed); + return nBytesAllocated.load(atomics::memory_order_relaxed); } /// Returns current value of nBytesAllocated counter atomic64u_t deallocatedBytes() const { - return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed); + return nBytesDeallocated.load(atomics::memory_order_relaxed); } }; diff --git a/cds/memory/michael/procheap_stat.h b/cds/memory/michael/procheap_stat.h index fa2466c5..1200a892 100644 --- a/cds/memory/michael/procheap_stat.h +++ b/cds/memory/michael/procheap_stat.h @@ -19,21 +19,21 @@ namespace cds { namespace memory { namespace michael { class procheap_atomic_stat { //@cond - CDS_ATOMIC::atomic nAllocFromActive ; ///< Event count of allocation from active superblock - CDS_ATOMIC::atomic nAllocFromPartial ; ///< Event count of allocation from partial superblock - CDS_ATOMIC::atomic nAllocFromNew ; ///< Event count of allocation from new superblock - CDS_ATOMIC::atomic nFreeCount ; ///< \ref free function call count - CDS_ATOMIC::atomic nBlockCount ; ///< Count of superblock allocated - CDS_ATOMIC::atomic nBlockDeallocCount ; ///< Count of superblock deallocated - CDS_ATOMIC::atomic nDescAllocCount ; ///< Count of superblock descriptors - CDS_ATOMIC::atomic nDescFull ; ///< Count of full superblock - CDS_ATOMIC::atomic nBytesAllocated ; ///< Count of allocated bytes - CDS_ATOMIC::atomic nBytesDeallocated ; ///< Count of deallocated bytes - - CDS_ATOMIC::atomic nActiveDescCASFailureCount ; ///< CAS failure counter for active block of \p alloc_from_active Heap function - CDS_ATOMIC::atomic nActiveAnchorCASFailureCount; ///< CAS failure counter for active block of \p alloc_from_active Heap function - CDS_ATOMIC::atomic nPartialDescCASFailureCount ; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function - CDS_ATOMIC::atomic nPartialAnchorCASFailureCount; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function + atomics::atomic nAllocFromActive ; ///< Event count of allocation from active superblock + atomics::atomic nAllocFromPartial ; ///< Event count of allocation from partial superblock + atomics::atomic nAllocFromNew ; ///< Event count of allocation from new superblock + atomics::atomic nFreeCount ; ///< \ref free function call count + atomics::atomic nBlockCount ; ///< Count of superblock allocated + atomics::atomic nBlockDeallocCount ; ///< Count of superblock deallocated + atomics::atomic nDescAllocCount ; ///< Count of superblock descriptors + atomics::atomic nDescFull ; ///< Count of full superblock + atomics::atomic nBytesAllocated ; ///< Count of allocated bytes + atomics::atomic nBytesDeallocated ; ///< Count of deallocated bytes + + atomics::atomic nActiveDescCASFailureCount ; ///< CAS failure counter for active block of \p alloc_from_active Heap function + atomics::atomic nActiveAnchorCASFailureCount; ///< CAS failure counter for active block of \p alloc_from_active Heap function + atomics::atomic nPartialDescCASFailureCount ; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function + atomics::atomic nPartialAnchorCASFailureCount; ///< CAS failure counter for partial block of \p alloc_from_partial Heap function //@endcond @@ -59,134 +59,134 @@ namespace cds { namespace memory { namespace michael { /// Increment event counter of allocation from active superblock void incAllocFromActive() { - nAllocFromActive.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nAllocFromActive.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment event counter of allocation from active superblock by \p n void incAllocFromActive( size_t n ) { - nAllocFromActive.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nAllocFromActive.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment event counter of allocation from partial superblock void incAllocFromPartial() { - nAllocFromPartial.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nAllocFromPartial.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment event counter of allocation from partial superblock by \p n void incAllocFromPartial( size_t n ) { - nAllocFromPartial.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nAllocFromPartial.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment event count of allocation from new superblock void incAllocFromNew() { - nAllocFromNew.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nAllocFromNew.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment event count of allocation from new superblock by \p n void incAllocFromNew( size_t n ) { - nAllocFromNew.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nAllocFromNew.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment event counter of free calling void incFreeCount() { - nFreeCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nFreeCount.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment event counter of free calling by \p n void incFreeCount( size_t n ) { - nFreeCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nFreeCount.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment counter of superblock allocated void incBlockAllocated() { - nBlockCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nBlockCount.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment counter of superblock allocated by \p n void incBlockAllocated( size_t n ) { - nBlockCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nBlockCount.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment counter of superblock deallocated void incBlockDeallocated() { - nBlockDeallocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nBlockDeallocCount.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment counter of superblock deallocated by \p n void incBlockDeallocated( size_t n ) { - nBlockDeallocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nBlockDeallocCount.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment counter of superblock descriptor allocated void incDescAllocCount() { - nDescAllocCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nDescAllocCount.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment counter of superblock descriptor allocated by \p n void incDescAllocCount( size_t n ) { - nDescAllocCount.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nDescAllocCount.fetch_add( n, atomics::memory_order_relaxed ); } /// Increment counter of full superblock descriptor void incDescFull() { - nDescFull.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nDescFull.fetch_add( 1, atomics::memory_order_relaxed ); } /// Increment counter of full superblock descriptor by \p n void incDescFull( size_t n ) { - nDescFull.fetch_add( n, CDS_ATOMIC::memory_order_relaxed ); + nDescFull.fetch_add( n, atomics::memory_order_relaxed ); } /// Decrement counter of full superblock descriptor void decDescFull() { - nDescFull.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ); + nDescFull.fetch_sub( 1, atomics::memory_order_relaxed ); } /// Decrement counter of full superblock descriptor by \p n void decDescFull(size_t n) { - nDescFull.fetch_sub( n, CDS_ATOMIC::memory_order_relaxed ); + nDescFull.fetch_sub( n, atomics::memory_order_relaxed ); } /// Add \p nBytes to allocated bytes counter void incAllocatedBytes( size_t nBytes ) { - nBytesAllocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed ); + nBytesAllocated.fetch_add( nBytes, atomics::memory_order_relaxed ); } /// Add \p nBytes to deallocated bytes counter void incDeallocatedBytes( size_t nBytes ) { - nBytesDeallocated.fetch_add( nBytes, CDS_ATOMIC::memory_order_relaxed); + nBytesDeallocated.fetch_add( nBytes, atomics::memory_order_relaxed); } /// Add \p nCount to CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function void incActiveDescCASFailureCount( int nCount ) { - nActiveDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + nActiveDescCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed ); } /// Add \p nCount to CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function void incActiveAnchorCASFailureCount( int nCount ) { - nActiveAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + nActiveAnchorCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed ); } /// Add \p nCount to CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_partial internal Heap function void incPartialDescCASFailureCount( int nCount ) { - nPartialDescCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + nPartialDescCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed ); } /// Add \p nCount to CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_partial internal Heap function void incPartialAnchorCASFailureCount( int nCount ) { - nPartialAnchorCASFailureCount.fetch_add( nCount, CDS_ATOMIC::memory_order_relaxed ); + nPartialAnchorCASFailureCount.fetch_add( nCount, atomics::memory_order_relaxed ); } // ----------------------------------------------------------------- @@ -195,49 +195,49 @@ namespace cds { namespace memory { namespace michael { /// Read event counter of allocation from active superblock size_t allocFromActive() const { - return nAllocFromActive.load(CDS_ATOMIC::memory_order_relaxed); + return nAllocFromActive.load(atomics::memory_order_relaxed); } /// Read event counter of allocation from partial superblock size_t allocFromPartial() const { - return nAllocFromPartial.load(CDS_ATOMIC::memory_order_relaxed); + return nAllocFromPartial.load(atomics::memory_order_relaxed); } /// Read event count of allocation from new superblock size_t allocFromNew() const { - return nAllocFromNew.load(CDS_ATOMIC::memory_order_relaxed); + return nAllocFromNew.load(atomics::memory_order_relaxed); } /// Read event counter of free calling size_t freeCount() const { - return nFreeCount.load(CDS_ATOMIC::memory_order_relaxed); + return nFreeCount.load(atomics::memory_order_relaxed); } /// Read counter of superblock allocated size_t blockAllocated() const { - return nBlockCount.load(CDS_ATOMIC::memory_order_relaxed); + return nBlockCount.load(atomics::memory_order_relaxed); } /// Read counter of superblock deallocated size_t blockDeallocated() const { - return nBlockDeallocCount.load(CDS_ATOMIC::memory_order_relaxed); + return nBlockDeallocCount.load(atomics::memory_order_relaxed); } /// Read counter of superblock descriptor allocated size_t descAllocCount() const { - return nDescAllocCount.load(CDS_ATOMIC::memory_order_relaxed); + return nDescAllocCount.load(atomics::memory_order_relaxed); } /// Read counter of full superblock descriptor size_t descFull() const { - return nDescFull.load(CDS_ATOMIC::memory_order_relaxed); + return nDescFull.load(atomics::memory_order_relaxed); } /// Get counter of allocated bytes @@ -249,7 +249,7 @@ namespace cds { namespace memory { namespace michael { */ atomic64u_t allocatedBytes() const { - return nBytesAllocated.load(CDS_ATOMIC::memory_order_relaxed); + return nBytesAllocated.load(atomics::memory_order_relaxed); } /// Get counter of deallocated bytes @@ -260,31 +260,31 @@ namespace cds { namespace memory { namespace michael { */ atomic64u_t deallocatedBytes() const { - return nBytesDeallocated.load(CDS_ATOMIC::memory_order_relaxed); + return nBytesDeallocated.load(atomics::memory_order_relaxed); } /// Get CAS failure counter of updating \p active field of active descriptor for \p alloc_from_active internal Heap function size_t activeDescCASFailureCount() const { - return nActiveDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + return nActiveDescCASFailureCount.load(atomics::memory_order_relaxed); } /// Get CAS failure counter of updating \p anchor field of active descriptor for \p alloc_from_active internal Heap function size_t activeAnchorCASFailureCount() const { - return nActiveAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + return nActiveAnchorCASFailureCount.load(atomics::memory_order_relaxed); } /// Get CAS failure counter of updating \p active field of partial descriptor for \p alloc_from_active internal Heap function size_t partialDescCASFailureCount() const { - return nPartialDescCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + return nPartialDescCASFailureCount.load(atomics::memory_order_relaxed); } /// Get CAS failure counter of updating \p anchor field of partial descriptor for \p alloc_from_active internal Heap function size_t partialAnchorCASFailureCount() const { - return nPartialAnchorCASFailureCount.load(CDS_ATOMIC::memory_order_relaxed); + return nPartialAnchorCASFailureCount.load(atomics::memory_order_relaxed); } }; diff --git a/cds/opt/options.h b/cds/opt/options.h index 184a241f..9b34428a 100644 --- a/cds/opt/options.h +++ b/cds/opt/options.h @@ -450,12 +450,12 @@ namespace opt { //@cond // For new C++11 (cds-1.1.0) - static const CDS_ATOMIC::memory_order memory_order_relaxed = CDS_ATOMIC::memory_order_relaxed; - static const CDS_ATOMIC::memory_order memory_order_consume = CDS_ATOMIC::memory_order_consume; - static const CDS_ATOMIC::memory_order memory_order_acquire = CDS_ATOMIC::memory_order_acquire; - static const CDS_ATOMIC::memory_order memory_order_release = CDS_ATOMIC::memory_order_release; - static const CDS_ATOMIC::memory_order memory_order_acq_rel = CDS_ATOMIC::memory_order_acq_rel; - static const CDS_ATOMIC::memory_order memory_order_seq_cst = CDS_ATOMIC::memory_order_seq_cst; + static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_consume = atomics::memory_order_consume; + static const atomics::memory_order memory_order_acquire = atomics::memory_order_acquire; + static const atomics::memory_order memory_order_release = atomics::memory_order_release; + static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_acq_rel; + static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst; //@endcond }; @@ -469,12 +469,12 @@ namespace opt { //@cond // For new C++11 (cds-1.1.0) - static const CDS_ATOMIC::memory_order memory_order_relaxed = CDS_ATOMIC::memory_order_seq_cst; - static const CDS_ATOMIC::memory_order memory_order_consume = CDS_ATOMIC::memory_order_seq_cst; - static const CDS_ATOMIC::memory_order memory_order_acquire = CDS_ATOMIC::memory_order_seq_cst; - static const CDS_ATOMIC::memory_order memory_order_release = CDS_ATOMIC::memory_order_seq_cst; - static const CDS_ATOMIC::memory_order memory_order_acq_rel = CDS_ATOMIC::memory_order_seq_cst; - static const CDS_ATOMIC::memory_order memory_order_seq_cst = CDS_ATOMIC::memory_order_seq_cst; + static const atomics::memory_order memory_order_relaxed = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_consume = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_acquire = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_release = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst; //@endcond }; } // namespace v diff --git a/cds/refcounter.h b/cds/refcounter.h index c7798d23..3ccdfce2 100644 --- a/cds/refcounter.h +++ b/cds/refcounter.h @@ -21,7 +21,7 @@ namespace cds { template class ref_counter { - CDS_ATOMIC::atomic m_nRefCount ; ///< The reference counter + atomics::atomic m_nRefCount ; ///< The reference counter public: typedef T ref_counter_type ; ///< The reference counter type @@ -35,7 +35,7 @@ namespace cds { /// Get current value of reference counter. T value() const CDS_NOEXCEPT { - return m_nRefCount.load( CDS_ATOMIC::memory_order_relaxed ); + return m_nRefCount.load( atomics::memory_order_relaxed ); } /// Current value of reference counter @@ -47,14 +47,14 @@ namespace cds { /// Atomic increment void inc() CDS_NOEXCEPT { - m_nRefCount.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + m_nRefCount.fetch_add( 1, atomics::memory_order_relaxed ); } /// Atomic decrement. Return \p true if reference counter is 0, otherwise \p false bool dec() CDS_NOEXCEPT { - if ( m_nRefCount.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) == 1 ) { - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + if ( m_nRefCount.fetch_sub( 1, atomics::memory_order_relaxed ) == 1 ) { + atomics::atomic_thread_fence( atomics::memory_order_release ); return true; } return false; diff --git a/cds/threading/details/_common.h b/cds/threading/details/_common.h index 8c295d85..1d60b66e 100644 --- a/cds/threading/details/_common.h +++ b/cds/threading/details/_common.h @@ -141,7 +141,7 @@ namespace cds { cds::algo::elimination::record m_EliminationRec; //@cond - static CDS_EXPORT_API CDS_ATOMIC::atomic s_nLastUsedProcNo; + static CDS_EXPORT_API atomics::atomic s_nLastUsedProcNo; static CDS_EXPORT_API size_t s_nProcCount; //@endcond @@ -154,7 +154,7 @@ namespace cds { , m_pSHBRCU( nullptr ) , m_pSHTRCU( nullptr ) #endif - , m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) % s_nProcCount ) + , m_nFakeProcessorNumber( s_nLastUsedProcNo.fetch_add(1, atomics::memory_order_relaxed) % s_nProcCount ) , m_nAttachCount(0) { if (cds::gc::HP::isUsed() ) diff --git a/cds/urcu/details/base.h b/cds/urcu/details/base.h index 0ba1359c..5443acb0 100644 --- a/cds/urcu/details/base.h +++ b/cds/urcu/details/base.h @@ -287,12 +287,12 @@ namespace cds { { public: # ifdef CDS_CXX11_TEMPLATE_ALIAS_SUPPORT - template using atomic_marked_ptr = CDS_ATOMIC::atomic; + template using atomic_marked_ptr = atomics::atomic; # else template - class atomic_marked_ptr: public CDS_ATOMIC::atomic + class atomic_marked_ptr: public atomics::atomic { - typedef CDS_ATOMIC::atomic base_class; + typedef atomics::atomic base_class; public: # ifdef CDS_CXX11_EXPLICITLY_DEFAULTED_FUNCTION_SUPPORT atomic_marked_ptr() CDS_NOEXCEPT_DEFAULTED_( noexcept(base_class()) ) = default; @@ -316,7 +316,7 @@ namespace cds { template struct thread_list_record { ThreadData * m_pNext ; ///< Next item in thread list - CDS_ATOMIC::atomic m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned) + atomics::atomic m_idOwner ; ///< Owner thread id; 0 - the record is free (not owned) thread_list_record() : m_pNext( nullptr ) @@ -336,7 +336,7 @@ namespace cds { typedef cds::details::Allocator< thread_record, Alloc > allocator_type; private: - CDS_ATOMIC::atomic m_pHead; + atomics::atomic m_pHead; public: thread_list() @@ -355,9 +355,9 @@ namespace cds { cds::OS::ThreadId const curThreadId = cds::OS::getCurrentThreadId(); // First try to reuse a retired (non-active) HP record - for ( pRec = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) { + for ( pRec = m_pHead.load( atomics::memory_order_acquire ); pRec; pRec = pRec->m_list.m_pNext ) { cds::OS::ThreadId thId = nullThreadId; - if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) ) + if ( !pRec->m_list.m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) ) continue; return pRec; } @@ -365,14 +365,14 @@ namespace cds { // No records available for reuse // Allocate and push a new record pRec = allocator_type().New(); - pRec->m_list.m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed ); + pRec->m_list.m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + atomics::atomic_thread_fence( atomics::memory_order_release ); - thread_record * pOldHead = m_pHead.load( CDS_ATOMIC::memory_order_acquire ); + thread_record * pOldHead = m_pHead.load( atomics::memory_order_acquire ); do { pRec->m_list.m_pNext = pOldHead; - } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pHead.compare_exchange_weak( pOldHead, pRec, atomics::memory_order_release, atomics::memory_order_relaxed )); return pRec; } @@ -380,7 +380,7 @@ namespace cds { void retire( thread_record * pRec ) { assert( pRec != nullptr ); - pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release ); + pRec->m_list.m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release ); } void detach_all() @@ -388,15 +388,15 @@ namespace cds { thread_record * pNext = nullptr; cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId; - for ( thread_record * pRec = m_pHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pNext ) { + for ( thread_record * pRec = m_pHead.load(atomics::memory_order_acquire); pRec; pRec = pNext ) { pNext = pRec->m_list.m_pNext; - if ( pRec->m_list.m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) { + if ( pRec->m_list.m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) { retire( pRec ); } } } - thread_record * head( CDS_ATOMIC::memory_order mo ) const + thread_record * head( atomics::memory_order mo ) const { return m_pHead.load( mo ); } @@ -408,13 +408,13 @@ namespace cds { CDS_DEBUG_DO( cds::OS::ThreadId const nullThreadId = cds::OS::c_NullThreadId; ) CDS_DEBUG_DO( cds::OS::ThreadId const mainThreadId = cds::OS::getCurrentThreadId() ;) - thread_record * p = m_pHead.exchange( nullptr, CDS_ATOMIC::memory_order_seq_cst ); + thread_record * p = m_pHead.exchange( nullptr, atomics::memory_order_seq_cst ); while ( p ) { thread_record * pNext = p->m_list.m_pNext; - assert( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId - || p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId - || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) ) + assert( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId + || p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId + || !cds::OS::isThreadAlive( p->m_list.m_idOwner.load( atomics::memory_order_relaxed ) ) ); al.Delete( p ); diff --git a/cds/urcu/details/gp.h b/cds/urcu/details/gp.h index 181e22ac..26c8ab8c 100644 --- a/cds/urcu/details/gp.h +++ b/cds/urcu/details/gp.h @@ -37,15 +37,15 @@ namespace cds { namespace urcu { namespace details { thread_record * pRec = get_thread_record(); assert( pRec != nullptr ); - uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ); + uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed ); if ( (tmp & rcu_class::c_nNestMask) == 0 ) { - pRec->m_nAccessControl.store( gp_singleton::instance()->global_control_word(CDS_ATOMIC::memory_order_relaxed), - CDS_ATOMIC::memory_order_relaxed ); - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire ); + pRec->m_nAccessControl.store( gp_singleton::instance()->global_control_word(atomics::memory_order_relaxed), + atomics::memory_order_relaxed ); + atomics::atomic_thread_fence( atomics::memory_order_acquire ); //CDS_COMPILER_RW_BARRIER; } else { - pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_relaxed ); } } @@ -56,7 +56,7 @@ namespace cds { namespace urcu { namespace details { assert( pRec != nullptr ); //CDS_COMPILER_RW_BARRIER; - pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); + pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release ); } template @@ -65,7 +65,7 @@ namespace cds { namespace urcu { namespace details { thread_record * pRec = get_thread_record(); assert( pRec != nullptr ); - return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0; + return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0; } @@ -73,9 +73,9 @@ namespace cds { namespace urcu { namespace details { template inline bool gp_singleton::check_grace_period( typename gp_singleton::thread_record * pRec ) const { - uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ); + uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_relaxed ); return (v & general_purpose_rcu::c_nNestMask) - && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask )); + && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~general_purpose_rcu::c_nNestMask )); } template @@ -83,10 +83,10 @@ namespace cds { namespace urcu { namespace details { inline void gp_singleton::flip_and_wait( Backoff& bkoff ) { OS::ThreadId const nullThreadId = OS::c_NullThreadId; - m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst ); + m_nGlobalControl.fetch_xor( general_purpose_rcu::c_nControlBit, atomics::memory_order_seq_cst ); - for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { - while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) { + for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { + while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec ) ) { bkoff(); CDS_COMPILER_RW_BARRIER; } diff --git a/cds/urcu/details/gp_decl.h b/cds/urcu/details/gp_decl.h index f764e727..1c8705c9 100644 --- a/cds/urcu/details/gp_decl.h +++ b/cds/urcu/details/gp_decl.h @@ -15,7 +15,7 @@ namespace cds { namespace urcu { namespace details { // that is not so efficiently # define CDS_GPURCU_DECLARE_THREAD_DATA(tag_) \ template <> struct thread_data { \ - CDS_ATOMIC::atomic m_nAccessControl ; \ + atomics::atomic m_nAccessControl ; \ thread_list_record< thread_data > m_list ; \ thread_data(): m_nAccessControl(0) {} \ ~thread_data() {} \ @@ -101,7 +101,7 @@ namespace cds { namespace urcu { namespace details { typedef gp_singleton_instance< rcu_tag > rcu_instance; protected: - CDS_ATOMIC::atomic m_nGlobalControl; + atomics::atomic m_nGlobalControl; thread_list< rcu_tag > m_ThreadList; protected: @@ -137,7 +137,7 @@ namespace cds { namespace urcu { namespace details { m_ThreadList.retire( pRec ); } - uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) const + uint32_t global_control_word( atomics::memory_order mo ) const { return m_nGlobalControl.load( mo ); } @@ -163,7 +163,7 @@ namespace cds { namespace urcu { namespace details { static rcu_singleton * instance() { assert( rcu_instance::s_pRCU ); return static_cast( rcu_instance::s_pRCU ); } \ static thread_record * attach_thread() { return instance()->attach_thread() ; } \ static void detach_thread( thread_record * pRec ) { return instance()->detach_thread( pRec ) ; } \ - static uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) { return instance()->global_control_word( mo ) ; } \ + static uint32_t global_control_word( atomics::memory_order mo ) { return instance()->global_control_word( mo ) ; } \ } CDS_GP_RCU_DECLARE_SINGLETON( general_instant_tag ); diff --git a/cds/urcu/details/gpb.h b/cds/urcu/details/gpb.h index e75ada61..c822268d 100644 --- a/cds/urcu/details/gpb.h +++ b/cds/urcu/details/gpb.h @@ -70,7 +70,7 @@ namespace cds { namespace urcu { protected: //@cond buffer_type m_Buffer; - CDS_ATOMIC::atomic m_nCurEpoch; + atomics::atomic m_nCurEpoch; lock_type m_Lock; size_t const m_nCapacity; //@endcond @@ -166,7 +166,7 @@ namespace cds { namespace urcu { virtual void retire_ptr( retired_ptr& p ) { if ( p.m_p ) { - epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed )); + epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed )); push_buffer( ep ); } } @@ -175,7 +175,7 @@ namespace cds { namespace urcu { template void batch_retire( ForwardIterator itFirst, ForwardIterator itLast ) { - uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ); + uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed ); while ( itFirst != itLast ) { epoch_retired_ptr ep( *itFirst, nEpoch ); ++itFirst; @@ -186,7 +186,7 @@ namespace cds { namespace urcu { /// Wait to finish a grace period and then clear the buffer void synchronize() { - epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed )); + epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed )); synchronize( ep ); } @@ -194,17 +194,17 @@ namespace cds { namespace urcu { bool synchronize( epoch_retired_ptr& ep ) { uint64_t nEpoch; - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire ); + atomics::atomic_thread_fence( atomics::memory_order_acquire ); { cds::lock::scoped_lock sl( m_Lock ); if ( ep.m_p && m_Buffer.push( ep ) ) return false; - nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed ); flip_and_wait(); flip_and_wait(); } clear_buffer( nEpoch ); - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + atomics::atomic_thread_fence( atomics::memory_order_release ); return true; } //@endcond diff --git a/cds/urcu/details/gpi.h b/cds/urcu/details/gpi.h index 0f0de150..f4eee83e 100644 --- a/cds/urcu/details/gpi.h +++ b/cds/urcu/details/gpi.h @@ -134,13 +134,13 @@ namespace cds { namespace urcu { /// Waits to finish a grace period void synchronize() { - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire ); + atomics::atomic_thread_fence( atomics::memory_order_acquire ); { cds::lock::scoped_lock sl( m_Lock ); flip_and_wait(); flip_and_wait(); } - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + atomics::atomic_thread_fence( atomics::memory_order_release ); } //@cond diff --git a/cds/urcu/details/gpt.h b/cds/urcu/details/gpt.h index 2fb8e114..31e03c21 100644 --- a/cds/urcu/details/gpt.h +++ b/cds/urcu/details/gpt.h @@ -77,7 +77,7 @@ namespace cds { namespace urcu { protected: //@cond buffer_type m_Buffer; - CDS_ATOMIC::atomic m_nCurEpoch; + atomics::atomic m_nCurEpoch; lock_type m_Lock; size_t const m_nCapacity; disposer_thread m_DisposerThread; @@ -152,7 +152,7 @@ namespace cds { namespace urcu { if ( bDetachAll ) pThis->m_ThreadList.detach_all(); - pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire )); + pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire )); delete pThis; singleton_ptr::s_pRCU = nullptr; @@ -170,7 +170,7 @@ namespace cds { namespace urcu { virtual void retire_ptr( retired_ptr& p ) { if ( p.m_p ) { - epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) ); + epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) ); push_buffer( ep ); } } @@ -179,7 +179,7 @@ namespace cds { namespace urcu { template void batch_retire( ForwardIterator itFirst, ForwardIterator itLast ) { - uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ); + uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed ); while ( itFirst != itLast ) { epoch_retired_ptr p( *itFirst, nEpoch ); ++itFirst; @@ -196,9 +196,9 @@ namespace cds { namespace urcu { //@cond void synchronize( bool bSync ) { - uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release ); + uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release ); - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire ); + atomics::atomic_thread_fence( atomics::memory_order_acquire ); { cds::lock::scoped_lock sl( m_Lock ); flip_and_wait(); @@ -206,7 +206,7 @@ namespace cds { namespace urcu { m_DisposerThread.dispose( m_Buffer, nPrevEpoch, bSync ); } - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + atomics::atomic_thread_fence( atomics::memory_order_release ); } void force_dispose() { diff --git a/cds/urcu/details/sh.h b/cds/urcu/details/sh.h index 6ef1fada..e4d0aed2 100644 --- a/cds/urcu/details/sh.h +++ b/cds/urcu/details/sh.h @@ -40,15 +40,15 @@ namespace cds { namespace urcu { namespace details { thread_record * pRec = get_thread_record(); assert( pRec != nullptr ); - uint32_t tmp = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ); + uint32_t tmp = pRec->m_nAccessControl.load( atomics::memory_order_relaxed ); if ( (tmp & rcu_class::c_nNestMask) == 0 ) { pRec->m_nAccessControl.store( - sh_singleton::instance()->global_control_word(CDS_ATOMIC::memory_order_acquire), - CDS_ATOMIC::memory_order_release + sh_singleton::instance()->global_control_word(atomics::memory_order_acquire), + atomics::memory_order_release ); } else { - pRec->m_nAccessControl.fetch_add( 1, CDS_ATOMIC::memory_order_release ); + pRec->m_nAccessControl.fetch_add( 1, atomics::memory_order_release ); } CDS_COMPILER_RW_BARRIER; } @@ -60,7 +60,7 @@ namespace cds { namespace urcu { namespace details { assert( pRec != nullptr); CDS_COMPILER_RW_BARRIER; - pRec->m_nAccessControl.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); + pRec->m_nAccessControl.fetch_sub( 1, atomics::memory_order_release ); } template @@ -69,7 +69,7 @@ namespace cds { namespace urcu { namespace details { thread_record * pRec = get_thread_record(); assert( pRec != nullptr); - return (pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0; + return (pRec->m_nAccessControl.load( atomics::memory_order_relaxed ) & rcu_class::c_nNestMask) != 0; } @@ -99,9 +99,9 @@ namespace cds { namespace urcu { namespace details { { thread_record * pRec = cds::threading::getRCU(); if ( pRec ) { - CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_acquire ); - pRec->m_bNeedMemBar.store( false, CDS_ATOMIC::memory_order_relaxed ); - CDS_ATOMIC::atomic_signal_fence( CDS_ATOMIC::memory_order_release ); + atomics::atomic_signal_fence( atomics::memory_order_acquire ); + pRec->m_bNeedMemBar.store( false, atomics::memory_order_relaxed ); + atomics::atomic_signal_fence( atomics::memory_order_release ); } } @@ -118,21 +118,21 @@ namespace cds { namespace urcu { namespace details { OS::ThreadId const nullThreadId = OS::c_NullThreadId; // Send "need membar" signal to all RCU threads - for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { - OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire); + for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { + OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire); if ( tid != nullThreadId ) { - pRec->m_bNeedMemBar.store( true, CDS_ATOMIC::memory_order_release ); + pRec->m_bNeedMemBar.store( true, atomics::memory_order_release ); raise_signal( tid ); } } // Wait while all RCU threads process the signal - for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { - OS::ThreadId tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire); + for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { + OS::ThreadId tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire); if ( tid != nullThreadId ) { bkOff.reset(); - while ( (tid = pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire )) != nullThreadId - && pRec->m_bNeedMemBar.load( CDS_ATOMIC::memory_order_acquire )) + while ( (tid = pRec->m_list.m_idOwner.load( atomics::memory_order_acquire )) != nullThreadId + && pRec->m_bNeedMemBar.load( atomics::memory_order_acquire )) { // Some versions of OSes can lose signals // So, we resend the signal @@ -146,9 +146,9 @@ namespace cds { namespace urcu { namespace details { template bool sh_singleton::check_grace_period( thread_record * pRec ) const { - uint32_t const v = pRec->m_nAccessControl.load( CDS_ATOMIC::memory_order_acquire ); + uint32_t const v = pRec->m_nAccessControl.load( atomics::memory_order_acquire ); return (v & signal_handling_rcu::c_nNestMask) - && ((( v ^ m_nGlobalControl.load( CDS_ATOMIC::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask )); + && ((( v ^ m_nGlobalControl.load( atomics::memory_order_relaxed )) & ~signal_handling_rcu::c_nNestMask )); } template @@ -157,8 +157,8 @@ namespace cds { namespace urcu { namespace details { { OS::ThreadId const nullThreadId = OS::c_NullThreadId; - for ( thread_record * pRec = m_ThreadList.head( CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { - while ( pRec->m_list.m_idOwner.load( CDS_ATOMIC::memory_order_acquire) != nullThreadId && check_grace_period( pRec )) + for ( thread_record * pRec = m_ThreadList.head( atomics::memory_order_acquire); pRec; pRec = pRec->m_list.m_pNext ) { + while ( pRec->m_list.m_idOwner.load( atomics::memory_order_acquire) != nullThreadId && check_grace_period( pRec )) bkOff(); } } diff --git a/cds/urcu/details/sh_decl.h b/cds/urcu/details/sh_decl.h index 9fc21c7a..55e649ef 100644 --- a/cds/urcu/details/sh_decl.h +++ b/cds/urcu/details/sh_decl.h @@ -19,8 +19,8 @@ namespace cds { namespace urcu { namespace details { // that is not so efficiently # define CDS_SHURCU_DECLARE_THREAD_DATA(tag_) \ template <> struct thread_data { \ - CDS_ATOMIC::atomic m_nAccessControl ; \ - CDS_ATOMIC::atomic m_bNeedMemBar ; \ + atomics::atomic m_nAccessControl ; \ + atomics::atomic m_bNeedMemBar ; \ thread_list_record< thread_data > m_list ; \ thread_data(): m_nAccessControl(0), m_bNeedMemBar(false) {} \ ~thread_data() {} \ @@ -103,7 +103,7 @@ namespace cds { namespace urcu { namespace details { typedef sh_singleton_instance< rcu_tag > rcu_instance; protected: - CDS_ATOMIC::atomic m_nGlobalControl; + atomics::atomic m_nGlobalControl; thread_list< rcu_tag > m_ThreadList; int const m_nSigNo; @@ -150,7 +150,7 @@ namespace cds { namespace urcu { namespace details { m_ThreadList.retire( pRec ); } - uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) const + uint32_t global_control_word( atomics::memory_order mo ) const { return m_nGlobalControl.load( mo ); } @@ -166,7 +166,7 @@ namespace cds { namespace urcu { namespace details { void switch_next_epoch() { - m_nGlobalControl.fetch_xor( rcu_tag::c_nControlBit, CDS_ATOMIC::memory_order_seq_cst ); + m_nGlobalControl.fetch_xor( rcu_tag::c_nControlBit, atomics::memory_order_seq_cst ); } bool check_grace_period( thread_record * pRec ) const; @@ -188,7 +188,7 @@ namespace cds { namespace urcu { namespace details { static rcu_singleton * instance() { assert( rcu_instance::s_pRCU ); return static_cast( rcu_instance::s_pRCU ); } \ static thread_record * attach_thread() { return instance()->attach_thread() ; } \ static void detach_thread( thread_record * pRec ) { return instance()->detach_thread( pRec ) ; } \ - static uint32_t global_control_word( CDS_ATOMIC::memory_order mo ) { return instance()->global_control_word( mo ) ; } \ + static uint32_t global_control_word( atomics::memory_order mo ) { return instance()->global_control_word( mo ) ; } \ } CDS_SIGRCU_DECLARE_SINGLETON( signal_buffered_tag ); diff --git a/cds/urcu/details/sig_buffered.h b/cds/urcu/details/sig_buffered.h index 4f90037b..69530feb 100644 --- a/cds/urcu/details/sig_buffered.h +++ b/cds/urcu/details/sig_buffered.h @@ -72,7 +72,7 @@ namespace cds { namespace urcu { protected: //@cond buffer_type m_Buffer; - CDS_ATOMIC::atomic m_nCurEpoch; + atomics::atomic m_nCurEpoch; lock_type m_Lock; size_t const m_nCapacity; //@endcond @@ -164,7 +164,7 @@ namespace cds { namespace urcu { virtual void retire_ptr( retired_ptr& p ) { if ( p.m_p ) { - epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed )); + epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_relaxed )); push_buffer( ep ); } } @@ -173,7 +173,7 @@ namespace cds { namespace urcu { template void batch_retire( ForwardIterator itFirst, ForwardIterator itLast ) { - uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ); + uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed ); while ( itFirst != itLast ) { epoch_retired_ptr ep( *itFirst, nEpoch ); ++itFirst; @@ -184,7 +184,7 @@ namespace cds { namespace urcu { /// Wait to finish a grace period and then clear the buffer void synchronize() { - epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed )); + epoch_retired_ptr ep( retired_ptr(), m_nCurEpoch.load( atomics::memory_order_relaxed )); synchronize( ep ); } @@ -192,12 +192,12 @@ namespace cds { namespace urcu { bool synchronize( epoch_retired_ptr& ep ) { uint64_t nEpoch; - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire ); + atomics::atomic_thread_fence( atomics::memory_order_acquire ); { cds::lock::scoped_lock sl( m_Lock ); if ( ep.m_p && m_Buffer.push( ep ) && m_Buffer.size() < capacity()) return false; - nEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + nEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_relaxed ); back_off bkOff; base_class::force_membar_all_threads( bkOff ); diff --git a/cds/urcu/details/sig_threaded.h b/cds/urcu/details/sig_threaded.h index dddcf595..ca3795e8 100644 --- a/cds/urcu/details/sig_threaded.h +++ b/cds/urcu/details/sig_threaded.h @@ -79,7 +79,7 @@ namespace cds { namespace urcu { protected: //@cond buffer_type m_Buffer; - CDS_ATOMIC::atomic m_nCurEpoch; + atomics::atomic m_nCurEpoch; lock_type m_Lock; size_t const m_nCapacity; disposer_thread m_DisposerThread; @@ -151,7 +151,7 @@ namespace cds { namespace urcu { if ( bDetachAll ) pThis->m_ThreadList.detach_all(); - pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire )); + pThis->m_DisposerThread.stop( pThis->m_Buffer, pThis->m_nCurEpoch.load( atomics::memory_order_acquire )); delete pThis; singleton_ptr::s_pRCU = nullptr; @@ -169,7 +169,7 @@ namespace cds { namespace urcu { virtual void retire_ptr( retired_ptr& p ) { if ( p.m_p ) { - epoch_retired_ptr ep( p, m_nCurEpoch.load( CDS_ATOMIC::memory_order_acquire ) ); + epoch_retired_ptr ep( p, m_nCurEpoch.load( atomics::memory_order_acquire ) ); push_buffer( ep ); } } @@ -178,7 +178,7 @@ namespace cds { namespace urcu { template void batch_retire( ForwardIterator itFirst, ForwardIterator itLast ) { - uint64_t nEpoch = m_nCurEpoch.load( CDS_ATOMIC::memory_order_relaxed ); + uint64_t nEpoch = m_nCurEpoch.load( atomics::memory_order_relaxed ); while ( itFirst != itLast ) { epoch_retired_ptr p( *itFirst, nEpoch ); ++itFirst; @@ -195,9 +195,9 @@ namespace cds { namespace urcu { //@cond void synchronize( bool bSync ) { - uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, CDS_ATOMIC::memory_order_release ); + uint64_t nPrevEpoch = m_nCurEpoch.fetch_add( 1, atomics::memory_order_release ); - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_acquire ); + atomics::atomic_thread_fence( atomics::memory_order_acquire ); { cds::lock::scoped_lock sl( m_Lock ); diff --git a/src/hrc_gc.cpp b/src/hrc_gc.cpp index 12a37df6..f6f7877e 100644 --- a/src/hrc_gc.cpp +++ b/src/hrc_gc.cpp @@ -36,9 +36,9 @@ namespace cds { namespace gc { GarbageCollector::~GarbageCollector() { - thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed ); + thread_list_node * pNode = m_pListHead.load( atomics::memory_order_relaxed ); while ( pNode ) { - assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == cds::OS::c_NullThreadId ); + assert( pNode->m_idOwner.load( atomics::memory_order_relaxed ) == cds::OS::c_NullThreadId ); clearHRCThreadDesc( pNode ); thread_list_node * pNext = pNode->m_pNext; deleteHRCThreadDesc( pNode ); @@ -103,10 +103,10 @@ namespace cds { namespace gc { assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() ); ContainerNode * pItem; for ( size_t n = 0; n < pNode->m_arrRetired.capacity(); ++n ) { - if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( CDS_ATOMIC::memory_order_relaxed )) != nullptr ) { + if ( (pItem = pNode->m_arrRetired[n].m_pNode.load( atomics::memory_order_relaxed )) != nullptr ) { pNode->m_arrRetired[n].m_funcFree( pItem ); //pItem->destroy(); - pNode->m_arrRetired[n].m_pNode.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + pNode->m_arrRetired[n].m_pNode.store( nullptr, atomics::memory_order_relaxed ); } } assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() ); @@ -117,8 +117,8 @@ namespace cds { namespace gc { thread_list_node * hprec; const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); - for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) { - if ( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_acquire ) == curThreadId ) { + for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) { + if ( hprec->m_idOwner.load( atomics::memory_order_acquire ) == curThreadId ) { assert( !hprec->m_bFree ); return hprec; } @@ -135,9 +135,9 @@ namespace cds { namespace gc { const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); // First try to reuse a retired (non-active) HP record - for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) { + for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNext ) { cds::OS::ThreadId expectedThreadId = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acq_rel, CDS_ATOMIC::memory_order_relaxed ) ) + if ( !hprec->m_idOwner.compare_exchange_strong( expectedThreadId, curThreadId, atomics::memory_order_acq_rel, atomics::memory_order_relaxed ) ) continue; hprec->m_pOwner = pThreadGC; hprec->m_bFree = false; @@ -149,15 +149,15 @@ namespace cds { namespace gc { // Allocate and push a new HP record hprec = newHRCThreadDesc(); assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() ); - hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed ); + hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); hprec->m_pOwner = pThreadGC; hprec->m_bFree = false; thread_list_node * pOldHead; - pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed ); + pOldHead = m_pListHead.load( atomics::memory_order_relaxed ); do { hprec->m_pNext = pOldHead; - } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed )); assert( hprec->m_hzp.size() == hprec->m_hzp.capacity() ); return hprec; @@ -176,9 +176,9 @@ namespace cds { namespace gc { if the destruction of thread object is called by the destructor after thread termination */ - assert( pNode->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) != cds::OS::c_NullThreadId ); + assert( pNode->m_idOwner.load( atomics::memory_order_relaxed ) != cds::OS::c_NullThreadId ); pNode->m_pOwner = nullptr; - pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release ); + pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release ); assert( pNode->m_hzp.size() == pNode->m_hzp.capacity() ); } @@ -189,19 +189,19 @@ namespace cds { namespace gc { typedef std::vector< ContainerNode * > hazard_ptr_list; details::thread_descriptor * pRec = pThreadGC->m_pDesc; - assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); + assert( static_cast< thread_list_node *>( pRec )->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); // Step 1: mark all pRec->m_arrRetired items as "traced" { details::retired_vector::const_iterator itEnd = pRec->m_arrRetired.end(); for ( details::retired_vector::const_iterator it = pRec->m_arrRetired.begin() ; it != itEnd; ++it ) { - ContainerNode * pNode = it->m_pNode.load( CDS_ATOMIC::memory_order_acquire ); + ContainerNode * pNode = it->m_pNode.load( atomics::memory_order_acquire ); if ( pNode ) { if ( pNode->m_RC.value() == 0 ) { - pNode->m_bTrace.store( true, CDS_ATOMIC::memory_order_release ); + pNode->m_bTrace.store( true, atomics::memory_order_release ); if ( pNode->m_RC.value() != 0 ) - pNode->m_bTrace.store( false, CDS_ATOMIC::memory_order_release ); + pNode->m_bTrace.store( false, atomics::memory_order_release ); } } } @@ -214,7 +214,7 @@ namespace cds { namespace gc { // Stage 2: Scan HP list and insert non-null values to plist { - thread_list_node * pNode = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); + thread_list_node * pNode = m_pListHead.load( atomics::memory_order_acquire ); while ( pNode ) { for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { @@ -241,17 +241,17 @@ namespace cds { namespace gc { for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) { details::retired_node& node = *it; - ContainerNode * pNode = node.m_pNode.load(CDS_ATOMIC::memory_order_acquire); + ContainerNode * pNode = node.m_pNode.load(atomics::memory_order_acquire); if ( !pNode ) continue; - if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(CDS_ATOMIC::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) { + if ( pNode->m_RC.value() == 0 && pNode->m_bTrace.load(atomics::memory_order_acquire) && !std::binary_search( itHPBegin, itHPEnd, pNode ) ) { // pNode may be destructed safely - node.m_bDone.store( true, CDS_ATOMIC::memory_order_release ); - if ( node.m_nClaim.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) { + node.m_bDone.store( true, atomics::memory_order_release ); + if ( node.m_nClaim.load( atomics::memory_order_acquire ) == 0 ) { pNode->terminate( pThreadGC, false ); - pNode->clean( CDS_ATOMIC::memory_order_relaxed ); + pNode->clean( atomics::memory_order_relaxed ); node.m_funcFree( pNode ); arr.pop( nRetired ); @@ -260,7 +260,7 @@ namespace cds { namespace gc { } pNode->terminate( pThreadGC, true ); - //node.m_bDone.store( true, CDS_ATOMIC::memory_order_release ); + //node.m_bDone.store( true, atomics::memory_order_release ); CDS_HRC_STATISTIC( ++m_Stat.m_ScanClaimGuarded ); } else { @@ -280,11 +280,11 @@ namespace cds { namespace gc { const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); - for ( thread_list_node * pRec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); pRec; pRec = pRec->m_pNext ) + for ( thread_list_node * pRec = m_pListHead.load(atomics::memory_order_acquire); pRec; pRec = pRec->m_pNext ) { // If threadDesc is free then own its cds::OS::ThreadId expectedThreadId = nullThreadId; - if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, CDS_ATOMIC::memory_order_acquire, CDS_ATOMIC::memory_order_relaxed) ) + if ( !pRec->m_idOwner.compare_exchange_strong(expectedThreadId, curThreadId, atomics::memory_order_acquire, atomics::memory_order_relaxed) ) { continue; } @@ -303,10 +303,10 @@ namespace cds { namespace gc { details::retired_vector::iterator it = src.begin(); for ( size_t nRetired = 0; it != itEnd; ++nRetired, ++it ) { - if ( it->m_pNode.load( CDS_ATOMIC::memory_order_relaxed ) == nullptr ) + if ( it->m_pNode.load( atomics::memory_order_relaxed ) == nullptr ) continue; - dest.push( it->m_pNode.load(CDS_ATOMIC::memory_order_relaxed), it->m_funcFree ); + dest.push( it->m_pNode.load(atomics::memory_order_relaxed), it->m_funcFree ); src.pop( nRetired ); while ( dest.isFull() ) { @@ -321,7 +321,7 @@ namespace cds { namespace gc { } pRec->m_bFree = true; } - pRec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release ); + pRec->m_idOwner.store( nullThreadId, atomics::memory_order_release ); } } @@ -330,19 +330,19 @@ namespace cds { namespace gc { CDS_HRC_STATISTIC( ++m_Stat.m_CleanUpAllCalls ); //const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - thread_list_node * pThread = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); + thread_list_node * pThread = m_pListHead.load(atomics::memory_order_acquire); while ( pThread ) { for ( size_t i = 0; i < pThread->m_arrRetired.capacity(); ++i ) { details::retired_node& rRetiredNode = pThread->m_arrRetired[i]; - ContainerNode * pNode = rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire); - if ( pNode && !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire) ) { - rRetiredNode.m_nClaim.fetch_add( 1, CDS_ATOMIC::memory_order_release ); - if ( !rRetiredNode.m_bDone.load(CDS_ATOMIC::memory_order_acquire) - && pNode == rRetiredNode.m_pNode.load(CDS_ATOMIC::memory_order_acquire) ) + ContainerNode * pNode = rRetiredNode.m_pNode.load(atomics::memory_order_acquire); + if ( pNode && !rRetiredNode.m_bDone.load(atomics::memory_order_acquire) ) { + rRetiredNode.m_nClaim.fetch_add( 1, atomics::memory_order_release ); + if ( !rRetiredNode.m_bDone.load(atomics::memory_order_acquire) + && pNode == rRetiredNode.m_pNode.load(atomics::memory_order_acquire) ) { pNode->cleanUp( pThis ); } - rRetiredNode.m_nClaim.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); + rRetiredNode.m_nClaim.fetch_sub( 1, atomics::memory_order_release ); } } pThread = pThread->m_pNext; @@ -363,7 +363,7 @@ namespace cds { namespace gc { stat.nRetiredPtrInFreeHRCRecs = 0; // Walk through HRC records - for ( thread_list_node *hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNext ) { + for ( thread_list_node *hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNext ) { ++stat.nHRCRecAllocated; size_t nRetiredNodeCount = hprec->m_arrRetired.retiredNodeCount(); if ( hprec->m_bFree ) { diff --git a/src/hzp_gc.cpp b/src/hzp_gc.cpp index 5f38bafc..c6131564 100644 --- a/src/hzp_gc.cpp +++ b/src/hzp_gc.cpp @@ -61,14 +61,14 @@ namespace cds { namespace gc { CDS_DEBUG_DO( const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; ) CDS_DEBUG_DO( const cds::OS::ThreadId mainThreadId = cds::OS::getCurrentThreadId() ;) - hplist_node * pHead = m_pListHead.load( CDS_ATOMIC::memory_order_relaxed ); - m_pListHead.store( nullptr, CDS_ATOMIC::memory_order_relaxed ); + hplist_node * pHead = m_pListHead.load( atomics::memory_order_relaxed ); + m_pListHead.store( nullptr, atomics::memory_order_relaxed ); hplist_node * pNext = nullptr; for ( hplist_node * hprec = pHead; hprec; hprec = pNext ) { - assert( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == nullThreadId - || hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) == mainThreadId - || !cds::OS::isThreadAlive( hprec->m_idOwner.load( CDS_ATOMIC::memory_order_relaxed ) ) + assert( hprec->m_idOwner.load( atomics::memory_order_relaxed ) == nullThreadId + || hprec->m_idOwner.load( atomics::memory_order_relaxed ) == mainThreadId + || !cds::OS::isThreadAlive( hprec->m_idOwner.load( atomics::memory_order_relaxed ) ) ); details::retired_vector& vect = hprec->m_arrRetired; details::retired_vector::iterator itRetired = vect.begin(); @@ -79,7 +79,7 @@ namespace cds { namespace gc { } vect.clear(); pNext = hprec->m_pNextNode; - hprec->m_bFree.store( true, CDS_ATOMIC::memory_order_relaxed ); + hprec->m_bFree.store( true, atomics::memory_order_relaxed ); DeleteHPRec( hprec ); } } @@ -112,26 +112,26 @@ namespace cds { namespace gc { const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); // First try to reuse a retired (non-active) HP record - for ( hprec = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) { + for ( hprec = m_pListHead.load( atomics::memory_order_acquire ); hprec; hprec = hprec->m_pNextNode ) { cds::OS::ThreadId thId = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, CDS_ATOMIC::memory_order_seq_cst, CDS_ATOMIC::memory_order_relaxed ) ) + if ( !hprec->m_idOwner.compare_exchange_strong( thId, curThreadId, atomics::memory_order_seq_cst, atomics::memory_order_relaxed ) ) continue; - hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_release ); + hprec->m_bFree.store( false, atomics::memory_order_release ); return hprec; } // No HP records available for reuse // Allocate and push a new HP record hprec = NewHPRec(); - hprec->m_idOwner.store( curThreadId, CDS_ATOMIC::memory_order_relaxed ); - hprec->m_bFree.store( false, CDS_ATOMIC::memory_order_relaxed ); + hprec->m_idOwner.store( curThreadId, atomics::memory_order_relaxed ); + hprec->m_bFree.store( false, atomics::memory_order_relaxed ); - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + atomics::atomic_thread_fence( atomics::memory_order_release ); - hplist_node * pOldHead = m_pListHead.load( CDS_ATOMIC::memory_order_acquire ); + hplist_node * pOldHead = m_pListHead.load( atomics::memory_order_acquire ); do { hprec->m_pNextNode = pOldHead; - } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )); + } while ( !m_pListHead.compare_exchange_weak( pOldHead, hprec, atomics::memory_order_release, atomics::memory_order_relaxed )); return hprec; } @@ -144,16 +144,16 @@ namespace cds { namespace gc { pRec->clear(); Scan( pRec ); hplist_node * pNode = static_cast( pRec ); - pNode->m_idOwner.store( cds::OS::c_NullThreadId, CDS_ATOMIC::memory_order_release ); + pNode->m_idOwner.store( cds::OS::c_NullThreadId, atomics::memory_order_release ); } void GarbageCollector::detachAllThread() { hplist_node * pNext = nullptr; const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; - for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = pNext ) { + for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = pNext ) { pNext = hprec->m_pNextNode; - if ( hprec->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) != nullThreadId ) { + if ( hprec->m_idOwner.load(atomics::memory_order_relaxed) != nullThreadId ) { RetireHPRec( hprec ); } } @@ -169,7 +169,7 @@ namespace cds { namespace gc { // Stage 1: Scan HP list and insert non-null values in plist - hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); + hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); while ( pNode ) { for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { @@ -230,7 +230,7 @@ namespace cds { namespace gc { // Search guarded pointers in retired array - hplist_node * pNode = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); + hplist_node * pNode = m_pListHead.load(atomics::memory_order_acquire); while ( pNode ) { for ( size_t i = 0; i < m_nHazardPointerCount; ++i ) { @@ -269,27 +269,27 @@ namespace cds { namespace gc { { CDS_HAZARDPTR_STATISTIC( ++m_Stat.m_HelpScanCallCount ); - assert( static_cast(pThis)->m_idOwner.load(CDS_ATOMIC::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); + assert( static_cast(pThis)->m_idOwner.load(atomics::memory_order_relaxed) == cds::OS::getCurrentThreadId() ); const cds::OS::ThreadId nullThreadId = cds::OS::c_NullThreadId; const cds::OS::ThreadId curThreadId = cds::OS::getCurrentThreadId(); - for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { + for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { // If m_bFree == true then hprec->m_arrRetired is empty - we don't need to see it - if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_acquire) ) + if ( hprec->m_bFree.load(atomics::memory_order_acquire) ) continue; // Owns hprec if it is empty. // Several threads may work concurrently so we use atomic technique only. { - cds::OS::ThreadId curOwner = hprec->m_idOwner.load(CDS_ATOMIC::memory_order_acquire); + cds::OS::ThreadId curOwner = hprec->m_idOwner.load(atomics::memory_order_acquire); if ( curOwner == nullThreadId || !cds::OS::isThreadAlive( curOwner )) { - if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) continue; } else { curOwner = nullThreadId; - if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed )) + if ( !hprec->m_idOwner.compare_exchange_strong( curOwner, curThreadId, atomics::memory_order_release, atomics::memory_order_relaxed )) continue; } } @@ -311,8 +311,8 @@ namespace cds { namespace gc { } src.clear(); - hprec->m_bFree.store(true, CDS_ATOMIC::memory_order_release); - hprec->m_idOwner.store( nullThreadId, CDS_ATOMIC::memory_order_release ); + hprec->m_bFree.store(true, atomics::memory_order_release); + hprec->m_idOwner.store( nullThreadId, atomics::memory_order_release ); } } @@ -329,11 +329,11 @@ namespace cds { namespace gc { stat.nTotalRetiredPtrCount = stat.nRetiredPtrInFreeHPRecs = 0; - for ( hplist_node * hprec = m_pListHead.load(CDS_ATOMIC::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { + for ( hplist_node * hprec = m_pListHead.load(atomics::memory_order_acquire); hprec; hprec = hprec->m_pNextNode ) { ++stat.nHPRecAllocated; stat.nTotalRetiredPtrCount += hprec->m_arrRetired.size(); - if ( hprec->m_bFree.load(CDS_ATOMIC::memory_order_relaxed) ) { + if ( hprec->m_bFree.load(atomics::memory_order_relaxed) ) { // Free HP record stat.nRetiredPtrInFreeHPRecs += hprec->m_arrRetired.size(); } diff --git a/src/init.cpp b/src/init.cpp index 7168e6c5..4ca8614e 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -21,7 +21,7 @@ namespace cds { - CDS_EXPORT_API CDS_ATOMIC::atomic threading::ThreadData::s_nLastUsedProcNo(0); + CDS_EXPORT_API atomics::atomic threading::ThreadData::s_nLastUsedProcNo(0); CDS_EXPORT_API size_t threading::ThreadData::s_nProcCount = 1; #if CDS_OS_INTERFACE == CDS_OSI_WINDOWS @@ -45,17 +45,17 @@ namespace cds { #endif namespace details { - static CDS_ATOMIC::atomic s_nInitCallCount(0); + static atomics::atomic s_nInitCallCount(0); bool CDS_EXPORT_API init_first_call() { - return s_nInitCallCount.fetch_add(1, CDS_ATOMIC::memory_order_relaxed) == 0; + return s_nInitCallCount.fetch_add(1, atomics::memory_order_relaxed) == 0; } bool CDS_EXPORT_API fini_last_call() { - if ( s_nInitCallCount.fetch_sub( 1, CDS_ATOMIC::memory_order_relaxed ) == 1 ) { - CDS_ATOMIC::atomic_thread_fence( CDS_ATOMIC::memory_order_release ); + if ( s_nInitCallCount.fetch_sub( 1, atomics::memory_order_relaxed ) == 1 ) { + atomics::atomic_thread_fence( atomics::memory_order_release ); return true; } return false; diff --git a/src/ptb_gc.cpp b/src/ptb_gc.cpp index bce91256..23016498 100644 --- a/src/ptb_gc.cpp +++ b/src/ptb_gc.cpp @@ -168,7 +168,7 @@ namespace cds { namespace gc { namespace ptb { details::retired_ptr_node * pHead = nullptr; details::retired_ptr_node * pTail = nullptr; - for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_relaxed)) { + for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_relaxed)) { details::guard_data::handoff_ptr h = pGuard->pHandOff; pGuard->pHandOff = nullptr; while ( h ) { @@ -192,7 +192,7 @@ namespace cds { namespace gc { namespace ptb { details::retired_ptr_buffer::privatize_result retiredList = m_RetiredBuffer.privatize(); if ( retiredList.first ) { - size_t nLiberateThreshold = m_nLiberateThreshold.load(CDS_ATOMIC::memory_order_relaxed); + size_t nLiberateThreshold = m_nLiberateThreshold.load(atomics::memory_order_relaxed); details::liberate_set set( beans::ceil2( retiredList.second > nLiberateThreshold ? retiredList.second : nLiberateThreshold ) ); // Get list of retired pointers @@ -205,10 +205,10 @@ namespace cds { namespace gc { namespace ptb { } // Liberate cycle - for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) ) + for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) ) { // get guarded pointer - details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire); + details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire); if ( valGuarded ) { details::retired_ptr_node * pRetired = set.erase( valGuarded ); @@ -237,7 +237,7 @@ namespace cds { namespace gc { namespace ptb { } else { // liberate cycle did not free any retired pointer - double liberate threshold - m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, CDS_ATOMIC::memory_order_release, CDS_ATOMIC::memory_order_relaxed ); + m_nLiberateThreshold.compare_exchange_strong( nLiberateThreshold, nLiberateThreshold * 2, atomics::memory_order_release, atomics::memory_order_relaxed ); } } } @@ -247,10 +247,10 @@ namespace cds { namespace gc { namespace ptb { { details::guard_data::handoff_ptr const nullHandOff = nullptr; - for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(CDS_ATOMIC::memory_order_acquire) ) + for ( details::guard_data * pGuard = m_GuardPool.begin(); pGuard; pGuard = pGuard->pGlobalNext.load(atomics::memory_order_acquire) ) { // get guarded pointer - details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire); + details::guard_data::guarded_ptr valGuarded = pGuard->pPost.load(atomics::memory_order_acquire); details::guard_data::handoff_ptr h; if ( valGuarded ) { @@ -263,7 +263,7 @@ namespace cds { namespace gc { namespace ptb { // Now, try to set retired node pRetired as a hand-off node for the guard cds::lock::Auto al( pGuard->spinHandOff ); - if ( valGuarded == pGuard->pPost.load(CDS_ATOMIC::memory_order_acquire) ) { + if ( valGuarded == pGuard->pPost.load(atomics::memory_order_acquire) ) { if ( pGuard->pHandOff && pGuard->pHandOff->m_ptr.m_p == pRetired->m_ptr.m_p ) { h = nullHandOff ; //nullptr; details::retired_ptr_node * pTail = pGuard->pHandOff; diff --git a/tests/cppunit/thread.h b/tests/cppunit/thread.h index 4c066b90..0cf4da76 100644 --- a/tests/cppunit/thread.h +++ b/tests/cppunit/thread.h @@ -27,7 +27,7 @@ namespace CppUnitMini { ThreadPool& m_Pool; boost::thread * m_pThread; cds::OS::Timer m_Timer; - CDS_ATOMIC::atomic m_bTimeElapsed; + atomics::atomic m_bTimeElapsed; public: double m_nDuration; @@ -60,11 +60,11 @@ namespace CppUnitMini { virtual void fini() {} void stop() { - m_bTimeElapsed.store( true, CDS_ATOMIC::memory_order_release ); + m_bTimeElapsed.store( true, atomics::memory_order_release ); } bool time_elapsed() const { - return m_bTimeElapsed.load( CDS_ATOMIC::memory_order_acquire ); + return m_bTimeElapsed.load( atomics::memory_order_acquire ); } bool check_timeout( size_t nMaxDuration ) diff --git a/tests/test-hdr/misc/cxx11_atomic_class.cpp b/tests/test-hdr/misc/cxx11_atomic_class.cpp index 681fe8fc..7637b72c 100644 --- a/tests/test-hdr/misc/cxx11_atomic_class.cpp +++ b/tests/test-hdr/misc/cxx11_atomic_class.cpp @@ -11,9 +11,9 @@ namespace misc { class cxx11_atomic_class: public CppUnitMini::TestCase { template - void do_test_atomic_flag_mo( AtomicFlag& f, CDS_ATOMIC::memory_order order ) + void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order ) { - CDS_ATOMIC::memory_order mo_clear = convert_to_store_order(order); + atomics::memory_order mo_clear = convert_to_store_order(order); for ( int i = 0; i < 5; ++i ) { CPPUNIT_ASSERT( !f.test_and_set( order )); CPPUNIT_ASSERT( f.test_and_set( order ) ); @@ -32,12 +32,12 @@ namespace misc { f.clear(); } - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_consume ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_release ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_flag_mo( f, atomics::memory_order_relaxed ); + do_test_atomic_flag_mo( f, atomics::memory_order_consume ); + do_test_atomic_flag_mo( f, atomics::memory_order_acquire ); + do_test_atomic_flag_mo( f, atomics::memory_order_release ); + do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel ); + do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst ); } template @@ -185,12 +185,12 @@ namespace misc { } template - void do_test_atomic_type( Atomic& a, CDS_ATOMIC::memory_order order ) + void do_test_atomic_type( Atomic& a, atomics::memory_order order ) { typedef Integral integral_type; - const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order ); - const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order ); + const atomics::memory_order oLoad = convert_to_load_order( order ); + const atomics::memory_order oStore = convert_to_store_order( order ); CPPUNIT_ASSERT( a.is_lock_free() ); a.store((integral_type) 0, oStore ); @@ -210,9 +210,9 @@ namespace misc { integral_type n = integral_type(42) << (nByte * 8); integral_type expected = prev; - CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == prev ); - CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == n ); prev = n; @@ -226,9 +226,9 @@ namespace misc { integral_type n = integral_type(42) << (nByte * 8); integral_type expected = prev; - CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == prev ); - CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == n ); prev = n; @@ -239,14 +239,14 @@ namespace misc { } template - void do_test_atomic_integral( Atomic& a, CDS_ATOMIC::memory_order order ) + void do_test_atomic_integral( Atomic& a, atomics::memory_order order ) { do_test_atomic_type< Atomic, Integral >( a, order ); typedef Integral integral_type; - const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order ); - const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order ); + const atomics::memory_order oLoad = convert_to_load_order( order ); + const atomics::memory_order oStore = convert_to_store_order( order ); // fetch_xxx testing a.store( (integral_type) 0, oStore ); @@ -298,18 +298,18 @@ namespace misc { { do_test_atomic_integral(a); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_consume ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_release ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_integral( a, atomics::memory_order_relaxed ); + do_test_atomic_integral( a, atomics::memory_order_consume ); + do_test_atomic_integral( a, atomics::memory_order_acquire ); + do_test_atomic_integral( a, atomics::memory_order_release ); + do_test_atomic_integral( a, atomics::memory_order_acq_rel ); + do_test_atomic_integral( a, atomics::memory_order_seq_cst ); } template void test_atomic_integral() { - typedef CDS_ATOMIC::atomic atomic_type; + typedef atomics::atomic atomic_type; atomic_type a[8]; for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) { @@ -319,7 +319,7 @@ namespace misc { template void test_atomic_integral_volatile() { - typedef CDS_ATOMIC::atomic volatile atomic_type; + typedef atomics::atomic volatile atomic_type; atomic_type a[8]; for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) { @@ -361,10 +361,10 @@ namespace misc { } template - void do_test_atomic_bool( AtomicBool& a, CDS_ATOMIC::memory_order order ) + void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order ) { - const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order ); - const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order ); + const atomics::memory_order oLoad = convert_to_load_order( order ); + const atomics::memory_order oStore = convert_to_store_order( order ); CPPUNIT_ASSERT( a.is_lock_free() ); a.store( false, oStore ); @@ -377,9 +377,9 @@ namespace misc { CPPUNIT_ASSERT( a.load( oLoad ) == false ); bool expected = false; - CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( a.compare_exchange_weak( expected, true, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == false ); - CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !a.compare_exchange_weak( expected, false, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == true ); CPPUNIT_ASSERT( a.load( oLoad ) == true ); @@ -387,9 +387,9 @@ namespace misc { a.store( false, oStore ); expected = false; - CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( a.compare_exchange_strong( expected, true, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == false ); - CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !a.compare_exchange_strong( expected, false, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == true ); CPPUNIT_ASSERT( a.load( oLoad ) == true ); @@ -399,27 +399,27 @@ namespace misc { template - void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, CDS_ATOMIC::memory_order order ) + void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order ) { - CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order); - CDS_ATOMIC::memory_order oStore = convert_to_store_order(order); + atomics::memory_order oLoad = convert_to_load_order(order); + atomics::memory_order oStore = convert_to_store_order(order); void * p; a.store( (void *) arr, oStore ); CPPUNIT_ASSERT( *reinterpret_cast(a.load( oLoad )) == 1 ); p = arr; - CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( a.compare_exchange_weak( p, (void *)(arr + 5), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 0 ); CPPUNIT_ASSERT( *reinterpret_cast(p) == 1 ); - CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !a.compare_exchange_weak( p, (void *)(arr + 3), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *reinterpret_cast(p) == 6 ); - CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( a.compare_exchange_strong( p, (void *)(arr + 3), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *reinterpret_cast(p) == 6 ); - CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !a.compare_exchange_strong( p, (void *)(arr + 5), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 3 ); CPPUNIT_ASSERT( *reinterpret_cast(p) == 4 ); @@ -443,7 +443,7 @@ namespace misc { template void do_test_atomic_pointer_void() { - typedef typename add_volatile, Volatile>::type atomic_pointer; + typedef typename add_volatile, Volatile>::type atomic_pointer; char arr[8]; const char aSize = sizeof(arr)/sizeof(arr[0]); @@ -497,37 +497,37 @@ namespace misc { CPPUNIT_ASSERT( *reinterpret_cast(a.load()) == i - 1 ); } - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_consume ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_release ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst ); } template - void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, CDS_ATOMIC::memory_order order ) + void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order ) { typedef Integral integral_type; - CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order); - CDS_ATOMIC::memory_order oStore = convert_to_store_order(order); + atomics::memory_order oLoad = convert_to_load_order(order); + atomics::memory_order oStore = convert_to_store_order(order); integral_type * p; a.store( arr, oStore ); CPPUNIT_ASSERT( *a.load( oLoad ) == 1 ); p = arr; - CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( a.compare_exchange_weak( p, arr + 5, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 0 ); CPPUNIT_ASSERT( *p == 1 ); - CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !a.compare_exchange_weak( p, arr + 3, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *p == 6 ); - CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( a.compare_exchange_strong( p, arr + 3, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *p == 6 ); - CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !a.compare_exchange_strong( p, arr + 5, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 3 ); CPPUNIT_ASSERT( *p == 4 ); @@ -554,7 +554,7 @@ namespace misc { void test_atomic_pointer_for() { typedef Integral integral_type; - typedef typename add_volatile, Volatile>::type atomic_pointer; + typedef typename add_volatile, Volatile>::type atomic_pointer; integral_type arr[8]; const integral_type aSize = sizeof(arr)/sizeof(arr[0]); @@ -602,12 +602,12 @@ namespace misc { CPPUNIT_ASSERT( *a.load() == i - 1 ); } - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_consume ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_release ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst ); } public: @@ -615,7 +615,7 @@ namespace misc { { // Array to test different alignment - CDS_ATOMIC::atomic_flag flags[8]; + atomics::atomic_flag flags[8]; for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i ) do_test_atomic_flag( flags[i] ); } @@ -624,7 +624,7 @@ namespace misc { { // Array to test different alignment - CDS_ATOMIC::atomic_flag volatile flags[8]; + atomics::atomic_flag volatile flags[8]; for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i ) do_test_atomic_flag( flags[i] ); } @@ -638,22 +638,22 @@ namespace misc { for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) { do_test_atomic_bool( a[i] ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_consume ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_release ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_bool( a[i], atomics::memory_order_relaxed ); + do_test_atomic_bool( a[i], atomics::memory_order_consume ); + do_test_atomic_bool( a[i], atomics::memory_order_acquire ); + do_test_atomic_bool( a[i], atomics::memory_order_release ); + do_test_atomic_bool( a[i], atomics::memory_order_acq_rel ); + do_test_atomic_bool( a[i], atomics::memory_order_seq_cst ); } } void test_atomic_bool() { - test_atomic_bool_< CDS_ATOMIC::atomic >(); + test_atomic_bool_< atomics::atomic >(); } void test_atomic_bool_volatile() { - test_atomic_bool_< CDS_ATOMIC::atomic volatile >(); + test_atomic_bool_< atomics::atomic volatile >(); } void test_atomic_char() { test_atomic_integral(); } diff --git a/tests/test-hdr/misc/cxx11_atomic_func.cpp b/tests/test-hdr/misc/cxx11_atomic_func.cpp index 84298b06..724670e3 100644 --- a/tests/test-hdr/misc/cxx11_atomic_func.cpp +++ b/tests/test-hdr/misc/cxx11_atomic_func.cpp @@ -15,17 +15,17 @@ namespace misc { class cxx11_atomic_func: public CppUnitMini::TestCase { template - void do_test_atomic_flag_mo( AtomicFlag& f, CDS_ATOMIC::memory_order order ) + void do_test_atomic_flag_mo( AtomicFlag& f, atomics::memory_order order ) { - CDS_ATOMIC::memory_order mo_clear = convert_to_store_order(order); + atomics::memory_order mo_clear = convert_to_store_order(order); f.clear( convert_to_store_order(order) ); for ( int i = 0; i < 5; ++i ) { - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_flag_test_and_set_explicit( &f, order )); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_flag_test_and_set_explicit( &f, order ) ); - CDS_ATOMIC::atomic_flag_clear_explicit( &f, mo_clear ); - CDS_ATOMIC::atomic_flag_clear_explicit( &f, mo_clear ); + CPPUNIT_ASSERT( !atomics::atomic_flag_test_and_set_explicit( &f, order )); + CPPUNIT_ASSERT( atomics::atomic_flag_test_and_set_explicit( &f, order ) ); + atomics::atomic_flag_clear_explicit( &f, mo_clear ); + atomics::atomic_flag_clear_explicit( &f, mo_clear ); } //CPPUNIT_ASSERT( f.m_Flag == 0 ); } @@ -37,22 +37,22 @@ namespace misc { for ( int i = 0; i < 5; ++i ) { //CPPUNIT_ASSERT( f.m_Flag == 0 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_flag_test_and_set( &f )); + CPPUNIT_ASSERT( !atomics::atomic_flag_test_and_set( &f )); //CPPUNIT_ASSERT( f.m_Flag != 0 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_flag_test_and_set( &f ) ); + CPPUNIT_ASSERT( atomics::atomic_flag_test_and_set( &f ) ); //CPPUNIT_ASSERT( f.m_Flag != 0 ); - CDS_ATOMIC::atomic_flag_clear(&f); + atomics::atomic_flag_clear(&f); //CPPUNIT_ASSERT( f.m_Flag == 0 ); - CDS_ATOMIC::atomic_flag_clear(&f); + atomics::atomic_flag_clear(&f); } //CPPUNIT_ASSERT( f.m_Flag == 0 ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_consume ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_release ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_flag_mo( f, CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_flag_mo( f, atomics::memory_order_relaxed ); + do_test_atomic_flag_mo( f, atomics::memory_order_consume ); + do_test_atomic_flag_mo( f, atomics::memory_order_acquire ); + do_test_atomic_flag_mo( f, atomics::memory_order_release ); + do_test_atomic_flag_mo( f, atomics::memory_order_acq_rel ); + do_test_atomic_flag_mo( f, atomics::memory_order_seq_cst ); } template @@ -60,51 +60,51 @@ namespace misc { { typedef Integral integral_type; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) ); - CDS_ATOMIC::atomic_store( &a, (integral_type) 0 ); + CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) ); + atomics::atomic_store( &a, (integral_type) 0 ); CPPUNIT_ASSERT( a == 0 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { integral_type n = integral_type(42) << (nByte * 8); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, n ) == 0 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, (integral_type) 0 ) == n ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, n ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, (integral_type) 0 ) == n ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 ); } - integral_type prev = CDS_ATOMIC::atomic_load( &a ); + integral_type prev = atomics::atomic_load( &a ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { integral_type n = integral_type(42) << (nByte * 8); integral_type expected = prev; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, n)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &expected, n)); CPPUNIT_ASSERT( expected == prev ); CPPUNIT_ASSERT( expected != n ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, n) ); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &expected, n) ); CPPUNIT_ASSERT( expected == n ); prev = n; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n ); } - CDS_ATOMIC::atomic_store( &a, (integral_type) 0 ); + atomics::atomic_store( &a, (integral_type) 0 ); - prev = CDS_ATOMIC::atomic_load( &a ); + prev = atomics::atomic_load( &a ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { integral_type n = integral_type(42) << (nByte * 8); integral_type expected = prev; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, n)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &expected, n)); CPPUNIT_ASSERT( expected == prev ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, n)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &expected, n)); CPPUNIT_ASSERT( expected == n ); prev = n; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == n ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == n ); } - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, (integral_type) 0 ) == prev ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, (integral_type) 0 ) == prev ); } template @@ -115,152 +115,152 @@ namespace misc { typedef Integral integral_type; // fetch_xxx testing - CDS_ATOMIC::atomic_store( &a, (integral_type) 0 ); + atomics::atomic_store( &a, (integral_type) 0 ); // fetch_add for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte ) { - integral_type prev = CDS_ATOMIC::atomic_load( &a ); + integral_type prev = atomics::atomic_load( &a ); integral_type n = integral_type(42) << (nByte * 8); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, n) == prev); + CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, n) == prev); } // fetch_sub for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte ) { - integral_type prev = CDS_ATOMIC::atomic_load( &a ); + integral_type prev = atomics::atomic_load( &a ); integral_type n = integral_type(42) << ((nByte - 1) * 8); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, n) == prev); + CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, n) == prev); } - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == 0 ); // fetch_or / fetc_xor / fetch_and for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit ) { - integral_type prev = CDS_ATOMIC::atomic_load( &a ); + integral_type prev = atomics::atomic_load( &a ); integral_type mask = 1 << nBit; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_or( &a, mask ) == prev ); - prev = CDS_ATOMIC::atomic_load( &a ); + CPPUNIT_ASSERT( atomics::atomic_fetch_or( &a, mask ) == prev ); + prev = atomics::atomic_load( &a ); CPPUNIT_ASSERT( ( prev & mask) == mask); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_and( &a, (integral_type) ~mask ) == prev ); - prev = CDS_ATOMIC::atomic_load( &a ); + CPPUNIT_ASSERT( atomics::atomic_fetch_and( &a, (integral_type) ~mask ) == prev ); + prev = atomics::atomic_load( &a ); CPPUNIT_ASSERT_EX( integral_type(prev & mask) == integral_type(0), "prev=" << std::hex << prev << ", mask=" << std::hex << mask); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_xor( &a, mask ) == prev ); - prev = CDS_ATOMIC::atomic_load( &a ); + CPPUNIT_ASSERT( atomics::atomic_fetch_xor( &a, mask ) == prev ); + prev = atomics::atomic_load( &a ); CPPUNIT_ASSERT( ( prev & mask) == mask); } - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == (integral_type) -1 ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == (integral_type) -1 ); } template - void do_test_atomic_type( Atomic& a, CDS_ATOMIC::memory_order order ) + void do_test_atomic_type( Atomic& a, atomics::memory_order order ) { typedef Integral integral_type; - const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order ); - const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order ); + const atomics::memory_order oLoad = convert_to_load_order( order ); + const atomics::memory_order oStore = convert_to_store_order( order ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) ); - CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore ); + CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) ); + atomics::atomic_store_explicit( &a, (integral_type) 0, oStore ); CPPUNIT_ASSERT( a == 0 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { integral_type n = integral_type(42) << (nByte * 8); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, n, order ) == 0 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, (integral_type) 0, order ) == n ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, n, order ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ) == n ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 ); } - integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + integral_type prev = atomics::atomic_load_explicit( &a, oLoad ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { integral_type n = integral_type(42) << (nByte * 8); integral_type expected = prev; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == prev ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == n ); prev = n; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n ); } - CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore ); + atomics::atomic_store_explicit( &a, (integral_type) 0, oStore ); - prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + prev = atomics::atomic_load_explicit( &a, oLoad ); for ( size_t nByte = 0; nByte < sizeof(Integral); ++nByte ) { integral_type n = integral_type(42) << (nByte * 8); integral_type expected = prev; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == prev ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &expected, n, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == n ); prev = n; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == n ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == n ); } - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, (integral_type) 0, order ) == prev ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, (integral_type) 0, order ) == prev ); } template - void do_test_atomic_integral( Atomic& a, CDS_ATOMIC::memory_order order ) + void do_test_atomic_integral( Atomic& a, atomics::memory_order order ) { do_test_atomic_type< Atomic, Integral >( a, order ); typedef Integral integral_type; - const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order ); - const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order ); + const atomics::memory_order oLoad = convert_to_load_order( order ); + const atomics::memory_order oStore = convert_to_store_order( order ); // fetch_xxx testing - CDS_ATOMIC::atomic_store_explicit( &a, (integral_type) 0, oStore ); + atomics::atomic_store_explicit( &a, (integral_type) 0, oStore ); // fetch_add for ( size_t nByte = 0; nByte < sizeof(integral_type); ++nByte ) { - integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + integral_type prev = atomics::atomic_load_explicit( &a, oLoad ); integral_type n = integral_type(42) << (nByte * 8); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, n, order) == prev); + CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, n, order) == prev); } // fetch_sub for ( size_t nByte = sizeof(integral_type); nByte > 0; --nByte ) { - integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + integral_type prev = atomics::atomic_load_explicit( &a, oLoad ); integral_type n = integral_type(42) << ((nByte - 1) * 8); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, n, order ) == prev); + CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, n, order ) == prev); } - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 0 ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == 0 ); // fetch_or / fetc_xor / fetch_and for ( size_t nBit = 0; nBit < sizeof(integral_type) * 8; ++nBit ) { - integral_type prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) ;; + integral_type prev = atomics::atomic_load_explicit( &a, oLoad ) ;; integral_type mask = 1 << nBit; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_or_explicit( &a, mask, order ) == prev ); - prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + CPPUNIT_ASSERT( atomics::atomic_fetch_or_explicit( &a, mask, order ) == prev ); + prev = atomics::atomic_load_explicit( &a, oLoad ); CPPUNIT_ASSERT( ( prev & mask) == mask); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ) == prev ); - prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + CPPUNIT_ASSERT( atomics::atomic_fetch_and_explicit( &a, (integral_type) ~mask, order ) == prev ); + prev = atomics::atomic_load_explicit( &a, oLoad ); CPPUNIT_ASSERT( ( prev & mask) == 0); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_xor_explicit( &a, mask, order ) == prev ); - prev = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + CPPUNIT_ASSERT( atomics::atomic_fetch_xor_explicit( &a, mask, order ) == prev ); + prev = atomics::atomic_load_explicit( &a, oLoad ); CPPUNIT_ASSERT( ( prev & mask) == mask); } - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == (integral_type) -1 ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == (integral_type) -1 ); } template @@ -268,18 +268,18 @@ namespace misc { { do_test_atomic_integral(a); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_consume ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_release ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_integral( a, CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_integral( a, atomics::memory_order_relaxed ); + do_test_atomic_integral( a, atomics::memory_order_consume ); + do_test_atomic_integral( a, atomics::memory_order_acquire ); + do_test_atomic_integral( a, atomics::memory_order_release ); + do_test_atomic_integral( a, atomics::memory_order_acq_rel ); + do_test_atomic_integral( a, atomics::memory_order_seq_cst ); } template void test_atomic_integral() { - typedef CDS_ATOMIC::atomic atomic_type; + typedef atomics::atomic atomic_type; atomic_type a[8]; for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) { test_atomic_integral_( a[i] ); @@ -288,7 +288,7 @@ namespace misc { template void test_atomic_integral_volatile() { - typedef CDS_ATOMIC::atomic volatile atomic_type; + typedef atomics::atomic volatile atomic_type; atomic_type a[8]; for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) { test_atomic_integral_( a[i] ); @@ -298,114 +298,114 @@ namespace misc { template void do_test_atomic_bool(AtomicBool& a) { - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) ); - CDS_ATOMIC::atomic_store( &a, false ); + CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) ); + atomics::atomic_store( &a, false ); CPPUNIT_ASSERT( a == false ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == false ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == false ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, true ) == false ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, false ) == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == false ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, true ) == false ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, false ) == true ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == false ); bool expected = false; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, true)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &expected, true)); CPPUNIT_ASSERT( expected == false ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &expected, false)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &expected, false)); CPPUNIT_ASSERT( expected == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true ); - CDS_ATOMIC::atomic_store( &a, false ); + atomics::atomic_store( &a, false ); expected = false; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, true)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &expected, true)); CPPUNIT_ASSERT( expected == false ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &expected, false)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &expected, false)); CPPUNIT_ASSERT( expected == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == true ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, false ) == true ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, false ) == true ); } template - void do_test_atomic_bool( AtomicBool& a, CDS_ATOMIC::memory_order order ) + void do_test_atomic_bool( AtomicBool& a, atomics::memory_order order ) { - const CDS_ATOMIC::memory_order oLoad = convert_to_load_order( order ); - const CDS_ATOMIC::memory_order oStore = convert_to_store_order( order ); + const atomics::memory_order oLoad = convert_to_load_order( order ); + const atomics::memory_order oStore = convert_to_store_order( order ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_is_lock_free( &a ) ); - CDS_ATOMIC::atomic_store_explicit( &a, false, oStore ); + CPPUNIT_ASSERT( atomics::atomic_is_lock_free( &a ) ); + atomics::atomic_store_explicit( &a, false, oStore ); CPPUNIT_ASSERT( a == false ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == false ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == false ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, true, order ) == false ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, false, order ) == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == false ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, true, order ) == false ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, false, order ) == true ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == false ); bool expected = false; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &expected, true, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == false ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &expected, false, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true ); - CDS_ATOMIC::atomic_store( &a, false ); + atomics::atomic_store( &a, false ); expected = false; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &expected, true, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == false ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, CDS_ATOMIC::memory_order_relaxed)); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &expected, false, order, atomics::memory_order_relaxed)); CPPUNIT_ASSERT( expected == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == true ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == true ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, false, order ) == true ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, false, order ) == true ); } template - void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, CDS_ATOMIC::memory_order order ) + void test_atomic_pointer_for_( Atomic& a, Integral * arr, Integral aSize, atomics::memory_order order ) { typedef Integral integral_type; - CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order); - CDS_ATOMIC::memory_order oStore = convert_to_store_order(order); + atomics::memory_order oLoad = convert_to_load_order(order); + atomics::memory_order oStore = convert_to_store_order(order); integral_type * p; - CDS_ATOMIC::atomic_store_explicit( &a, arr, oStore ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 1 ); + atomics::atomic_store_explicit( &a, arr, oStore ); + CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == 1 ); p = arr; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 0 ); CPPUNIT_ASSERT( *p == 1 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *p == 6 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 3, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *p == 6 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, &p, arr + 5, order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 3 ); CPPUNIT_ASSERT( *p == 4 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange_explicit( &a, arr, order ) == arr + 3 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == arr ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == 1 ); + CPPUNIT_ASSERT( atomics::atomic_exchange_explicit( &a, arr, order ) == arr + 3 ); + CPPUNIT_ASSERT( atomics::atomic_load_explicit( &a, oLoad ) == arr ); + CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == 1 ); for ( integral_type i = 1; i < aSize; ++i ) { - integral_type * p = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + integral_type * p = atomics::atomic_load_explicit( &a, oLoad ); CPPUNIT_ASSERT( *p == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, 1, order ) == p ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == i + 1 ); + CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, 1, order ) == p ); + CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == i + 1 ); } for ( integral_type i = aSize; i > 1; --i ) { - integral_type * p = CDS_ATOMIC::atomic_load_explicit( &a, oLoad ); + integral_type * p = atomics::atomic_load_explicit( &a, oLoad ); CPPUNIT_ASSERT( *p == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, 1, order ) == p ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load_explicit( &a, oLoad ) == i - 1 ); + CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, 1, order ) == p ); + CPPUNIT_ASSERT( *atomics::atomic_load_explicit( &a, oLoad ) == i - 1 ); } } @@ -413,7 +413,7 @@ namespace misc { void test_atomic_pointer_for() { typedef Integral integral_type; - typedef typename add_volatile, Volatile>::type atomic_pointer; + typedef typename add_volatile, Volatile>::type atomic_pointer; integral_type arr[8]; const integral_type aSize = sizeof(arr)/sizeof(arr[0]); @@ -424,93 +424,93 @@ namespace misc { atomic_pointer a; integral_type * p; - CDS_ATOMIC::atomic_store( &a, arr ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == 1 ); + atomics::atomic_store( &a, arr ); + CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == 1 ); p = arr; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, &p, arr + 5 )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, &p, arr + 5 )); CPPUNIT_ASSERT( p == arr + 0 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, &p, arr + 3 )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, &p, arr + 3 )); CPPUNIT_ASSERT( p == arr + 5 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, &p, arr + 3 )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, &p, arr + 3 )); CPPUNIT_ASSERT( p == arr + 5 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, &p, arr + 5 )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, &p, arr + 5 )); CPPUNIT_ASSERT( p == arr + 3 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_exchange( &a, arr ) == arr + 3 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_load( &a ) == arr ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == 1 ); + CPPUNIT_ASSERT( atomics::atomic_exchange( &a, arr ) == arr + 3 ); + CPPUNIT_ASSERT( atomics::atomic_load( &a ) == arr ); + CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == 1 ); for ( integral_type i = 1; i < aSize; ++i ) { - integral_type * p = CDS_ATOMIC::atomic_load( &a ); + integral_type * p = atomics::atomic_load( &a ); CPPUNIT_ASSERT( *p == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, 1 ) == p ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == i + 1 ); + CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, 1 ) == p ); + CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == i + 1 ); } for ( integral_type i = aSize; i > 1; --i ) { - integral_type * p = CDS_ATOMIC::atomic_load( &a ); + integral_type * p = atomics::atomic_load( &a ); CPPUNIT_ASSERT( *p == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, 1 ) == p ); - CPPUNIT_ASSERT( *CDS_ATOMIC::atomic_load( &a ) == i - 1 ); + CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, 1 ) == p ); + CPPUNIT_ASSERT( *atomics::atomic_load( &a ) == i - 1 ); } - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_consume ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_release ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel ); - test_atomic_pointer_for_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_relaxed ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_consume ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acquire ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_release ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_acq_rel ); + test_atomic_pointer_for_( a, arr, aSize, atomics::memory_order_seq_cst ); } template - void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, CDS_ATOMIC::memory_order order ) + void do_test_atomic_pointer_void_( Atomic& a, char * arr, char aSize, atomics::memory_order order ) { - CDS_ATOMIC::memory_order oLoad = convert_to_load_order(order); - CDS_ATOMIC::memory_order oStore = convert_to_store_order(order); + atomics::memory_order oLoad = convert_to_load_order(order); + atomics::memory_order oStore = convert_to_store_order(order); char * p; - CDS_ATOMIC::atomic_store_explicit( &a, (void *) arr, oStore ); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == 1 ); + atomics::atomic_store_explicit( &a, (void *) arr, oStore ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == 1 ); p = arr; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 0 ); CPPUNIT_ASSERT( *p == 1 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *p == 6 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 3), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 5 ); CPPUNIT_ASSERT( *p == 6 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, CDS_ATOMIC::memory_order_relaxed )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong_explicit( &a, (void **) &p, (void *)(arr + 5), order, atomics::memory_order_relaxed )); CPPUNIT_ASSERT( p == arr + 3 ); CPPUNIT_ASSERT( *p == 4 ); - CPPUNIT_ASSERT( reinterpret_cast(CDS_ATOMIC::atomic_exchange_explicit( &a, (void *) arr, order )) == arr + 3 ); - CPPUNIT_ASSERT( reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == arr ); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == 1 ); + CPPUNIT_ASSERT( reinterpret_cast(atomics::atomic_exchange_explicit( &a, (void *) arr, order )) == arr + 3 ); + CPPUNIT_ASSERT( reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == arr ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == 1 ); for ( char i = 1; i < aSize; ++i ) { - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add_explicit( &a, 1, order )); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i + 1 ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == i ); + CPPUNIT_ASSERT( atomics::atomic_fetch_add_explicit( &a, 1, order )); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == i + 1 ); } for ( char i = aSize; i > 1; --i ) { - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub_explicit( &a, 1, order )); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load_explicit( &a, oLoad )) == i - 1 ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == i ); + CPPUNIT_ASSERT( atomics::atomic_fetch_sub_explicit( &a, 1, order )); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load_explicit( &a, oLoad )) == i - 1 ); } } template void do_test_atomic_pointer_void() { - typedef typename add_volatile, Volatile>::type atomic_pointer; + typedef typename add_volatile, Volatile>::type atomic_pointer; char arr[8]; const char aSize = sizeof(arr)/sizeof(arr[0]); @@ -521,54 +521,54 @@ namespace misc { atomic_pointer a; char * p; - CDS_ATOMIC::atomic_store( &a, (void *) arr ); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load( &a )) == 1 ); + atomics::atomic_store( &a, (void *) arr ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load( &a )) == 1 ); p = arr; - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 5) )); CPPUNIT_ASSERT( p == arr + 0 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_weak( &a, (void **) &p, (void *)(arr + 3) )); CPPUNIT_ASSERT( p == arr + 5 ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) )); + CPPUNIT_ASSERT( atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 3) )); CPPUNIT_ASSERT( p == arr + 5 ); - CPPUNIT_ASSERT( !CDS_ATOMIC::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) )); + CPPUNIT_ASSERT( !atomics::atomic_compare_exchange_strong( &a, (void **) &p, (void *)(arr + 5) )); CPPUNIT_ASSERT( p == arr + 3 ); - CPPUNIT_ASSERT( reinterpret_cast( CDS_ATOMIC::atomic_exchange( &a, (void *) arr )) == arr + 3 ); - CPPUNIT_ASSERT( reinterpret_cast( CDS_ATOMIC::atomic_load( &a )) == arr ); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load( &a )) == 1 ); + CPPUNIT_ASSERT( reinterpret_cast( atomics::atomic_exchange( &a, (void *) arr )) == arr + 3 ); + CPPUNIT_ASSERT( reinterpret_cast( atomics::atomic_load( &a )) == arr ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load( &a )) == 1 ); for ( char i = 1; i < aSize; ++i ) { - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load( &a )) == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_add( &a, 1 )); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load( &a )) == i + 1 ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load( &a )) == i ); + CPPUNIT_ASSERT( atomics::atomic_fetch_add( &a, 1 )); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load( &a )) == i + 1 ); } for ( char i = aSize; i > 1; --i ) { - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load( &a )) == i ); - CPPUNIT_ASSERT( CDS_ATOMIC::atomic_fetch_sub( &a, 1 )); - CPPUNIT_ASSERT( *reinterpret_cast(CDS_ATOMIC::atomic_load( &a )) == i - 1 ); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load( &a )) == i ); + CPPUNIT_ASSERT( atomics::atomic_fetch_sub( &a, 1 )); + CPPUNIT_ASSERT( *reinterpret_cast(atomics::atomic_load( &a )) == i - 1 ); } - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_consume ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_release ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_pointer_void_( a, arr, aSize, CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_relaxed ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_consume ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acquire ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_release ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_acq_rel ); + do_test_atomic_pointer_void_( a, arr, aSize, atomics::memory_order_seq_cst ); } public: void test_atomic_flag() { - CDS_ATOMIC::atomic_flag flags[8]; + atomics::atomic_flag flags[8]; for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i ) do_test_atomic_flag( flags[i] ); } void test_atomic_flag_volatile() { - CDS_ATOMIC::atomic_flag volatile flags[8]; + atomics::atomic_flag volatile flags[8]; for ( size_t i = 0; i < sizeof(flags)/sizeof(flags[0]); ++i ) do_test_atomic_flag( flags[i] ); } @@ -580,22 +580,22 @@ namespace misc { for ( size_t i = 0; i < sizeof(a)/sizeof(a[0]); ++i ) { do_test_atomic_bool( a[i] ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_relaxed ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_consume ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acquire ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_release ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_acq_rel ); - do_test_atomic_bool( a[i], CDS_ATOMIC::memory_order_seq_cst ); + do_test_atomic_bool( a[i], atomics::memory_order_relaxed ); + do_test_atomic_bool( a[i], atomics::memory_order_consume ); + do_test_atomic_bool( a[i], atomics::memory_order_acquire ); + do_test_atomic_bool( a[i], atomics::memory_order_release ); + do_test_atomic_bool( a[i], atomics::memory_order_acq_rel ); + do_test_atomic_bool( a[i], atomics::memory_order_seq_cst ); } } void test_atomic_bool() { - test_atomic_bool_ >(); + test_atomic_bool_ >(); } void test_atomic_bool_volatile() { - test_atomic_bool_ volatile >(); + test_atomic_bool_ volatile >(); } void test_atomic_char() { test_atomic_integral(); } @@ -656,19 +656,19 @@ namespace misc { void test_atomic_fence() { - CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_relaxed ); - CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_consume ); - CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_acquire ); - CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_release ); - CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_acq_rel ); - CDS_ATOMIC::atomic_thread_fence(CDS_ATOMIC::memory_order_seq_cst ); - - CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_relaxed ); - CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_consume ); - CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_acquire ); - CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_release ); - CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_acq_rel ); - CDS_ATOMIC::atomic_signal_fence(CDS_ATOMIC::memory_order_seq_cst ); + atomics::atomic_thread_fence(atomics::memory_order_relaxed ); + atomics::atomic_thread_fence(atomics::memory_order_consume ); + atomics::atomic_thread_fence(atomics::memory_order_acquire ); + atomics::atomic_thread_fence(atomics::memory_order_release ); + atomics::atomic_thread_fence(atomics::memory_order_acq_rel ); + atomics::atomic_thread_fence(atomics::memory_order_seq_cst ); + + atomics::atomic_signal_fence(atomics::memory_order_relaxed ); + atomics::atomic_signal_fence(atomics::memory_order_consume ); + atomics::atomic_signal_fence(atomics::memory_order_acquire ); + atomics::atomic_signal_fence(atomics::memory_order_release ); + atomics::atomic_signal_fence(atomics::memory_order_acq_rel ); + atomics::atomic_signal_fence(atomics::memory_order_seq_cst ); } public: diff --git a/tests/test-hdr/misc/cxx11_convert_memory_order.h b/tests/test-hdr/misc/cxx11_convert_memory_order.h index 8c0083d6..cf6be725 100644 --- a/tests/test-hdr/misc/cxx11_convert_memory_order.h +++ b/tests/test-hdr/misc/cxx11_convert_memory_order.h @@ -4,26 +4,26 @@ namespace misc { - static inline CDS_ATOMIC::memory_order convert_to_store_order( CDS_ATOMIC::memory_order order ) + static inline atomics::memory_order convert_to_store_order( atomics::memory_order order ) { switch ( order ) { - case CDS_ATOMIC::memory_order_acquire: - case CDS_ATOMIC::memory_order_consume: - return CDS_ATOMIC::memory_order_relaxed; - case CDS_ATOMIC::memory_order_acq_rel: - return CDS_ATOMIC::memory_order_release; + case atomics::memory_order_acquire: + case atomics::memory_order_consume: + return atomics::memory_order_relaxed; + case atomics::memory_order_acq_rel: + return atomics::memory_order_release; default: return order; } } - static inline CDS_ATOMIC::memory_order convert_to_load_order( CDS_ATOMIC::memory_order order ) + static inline atomics::memory_order convert_to_load_order( atomics::memory_order order ) { switch ( order ) { - case CDS_ATOMIC::memory_order_release: - return CDS_ATOMIC::memory_order_relaxed; - case CDS_ATOMIC::memory_order_acq_rel: - return CDS_ATOMIC::memory_order_acquire; + case atomics::memory_order_release: + return atomics::memory_order_relaxed; + case atomics::memory_order_acq_rel: + return atomics::memory_order_acquire; default: return order; } diff --git a/tests/unit/map2/map_delodd.cpp b/tests/unit/map2/map_delodd.cpp index 6791b8e6..6a8e3a48 100644 --- a/tests/unit/map2/map_delodd.cpp +++ b/tests/unit/map2/map_delodd.cpp @@ -147,7 +147,7 @@ namespace map2 { typedef size_t value_type; typedef std::pair pair_type; - CDS_ATOMIC::atomic m_nInsThreadCount; + atomics::atomic m_nInsThreadCount; // Inserts keys from [0..N) template @@ -210,7 +210,7 @@ namespace map2 { } } - getTest().m_nInsThreadCount.fetch_sub( 1, CDS_ATOMIC::memory_order_acquire ); + getTest().m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_acquire ); } }; @@ -296,7 +296,7 @@ namespace map2 { ++m_nDeleteFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -310,7 +310,7 @@ namespace map2 { ++m_nDeleteFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -369,7 +369,7 @@ namespace map2 { ++m_nDeleteFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -383,7 +383,7 @@ namespace map2 { ++m_nDeleteFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -456,7 +456,7 @@ namespace map2 { } } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -484,7 +484,7 @@ namespace map2 { } } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -512,7 +512,7 @@ namespace map2 { typedef InsertThread insert_thread; typedef DeleteThread delete_thread; - m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release ); + m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release ); CppUnitMini::ThreadPool pool( *this ); pool.add( new insert_thread( pool, testMap ), c_nInsThreadCount ); @@ -554,7 +554,7 @@ namespace map2 { typedef DeleteThread delete_thread; typedef ExtractThread< typename Map::gc, Map > extract_thread; - m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release ); + m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release ); CppUnitMini::ThreadPool pool( *this ); pool.add( new insert_thread( pool, testMap ), c_nInsThreadCount ); diff --git a/tests/unit/map2/map_insdel_func.cpp b/tests/unit/map2/map_insdel_func.cpp index d47a6bea..765db471 100644 --- a/tests/unit/map2/map_insdel_func.cpp +++ b/tests/unit/map2/map_insdel_func.cpp @@ -31,8 +31,8 @@ namespace map2 { struct value_type { size_t nKey; size_t nData; - CDS_ATOMIC::atomic nEnsureCall; - CDS_ATOMIC::atomic bInitialized; + atomics::atomic nEnsureCall; + atomics::atomic bInitialized; cds::OS::ThreadId threadId ; // insert thread id typedef cds::lock::Spinlock< cds::backoff::pause > lock_type; @@ -49,8 +49,8 @@ namespace map2 { value_type( value_type const& s ) : nKey(s.nKey) , nData(s.nData) - , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed)) - , bInitialized( s.bInitialized.load(CDS_ATOMIC::memory_order_relaxed) ) + , nEnsureCall(s.nEnsureCall.load(atomics::memory_order_relaxed)) + , bInitialized( s.bInitialized.load(atomics::memory_order_relaxed) ) , threadId( cds::OS::getCurrentThreadId() ) {} @@ -59,8 +59,8 @@ namespace map2 { { nKey = v.nKey; nData = v.nData; - nEnsureCall.store( v.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); - bInitialized.store(v.bInitialized.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed); + nEnsureCall.store( v.nEnsureCall.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed ); + bInitialized.store(v.bInitialized.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed); return *this; } @@ -95,7 +95,7 @@ namespace map2 { val.second.nData = val.first * 8; ++nTestFunctorRef; - val.second.bInitialized.store( true, CDS_ATOMIC::memory_order_relaxed); + val.second.bInitialized.store( true, atomics::memory_order_relaxed); } }; @@ -187,10 +187,10 @@ namespace map2 { ++nCreated; val.second.nKey = val.first; val.second.nData = val.first * 8; - val.second.bInitialized.store( true, CDS_ATOMIC::memory_order_relaxed); + val.second.bInitialized.store( true, atomics::memory_order_relaxed); } else { - val.second.nEnsureCall.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + val.second.nEnsureCall.fetch_add( 1, atomics::memory_order_relaxed ); ++nModified; } } @@ -304,7 +304,7 @@ namespace map2 { void operator ()( pair_type& item ) { while ( true ) { - if ( item.second.bInitialized.load( CDS_ATOMIC::memory_order_relaxed )) { + if ( item.second.bInitialized.load( atomics::memory_order_relaxed )) { cds::lock::scoped_lock< typename value_type::lock_type> ac( item.second.m_access ); if ( m_cnt.nKeyExpected == item.second.nKey && m_cnt.nKeyExpected * 8 == item.second.nData ) diff --git a/tests/unit/queue/intrusive_queue_reader_writer.cpp b/tests/unit/queue/intrusive_queue_reader_writer.cpp index 01cf3936..1c36934e 100644 --- a/tests/unit/queue/intrusive_queue_reader_writer.cpp +++ b/tests/unit/queue/intrusive_queue_reader_writer.cpp @@ -94,7 +94,7 @@ namespace queue { } m_fTime = m_Timer.duration() - m_fTime; - getTest().m_nProducerCount.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); + getTest().m_nProducerCount.fetch_sub( 1, atomics::memory_order_release ); } }; @@ -177,7 +177,7 @@ namespace queue { } else { ++m_nPopEmpty; - if ( getTest().m_nProducerCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 && m_Queue.empty() ) + if ( getTest().m_nProducerCount.load( atomics::memory_order_acquire ) == 0 && m_Queue.empty() ) break; } } @@ -206,7 +206,7 @@ namespace queue { protected: size_t m_nThreadPushCount; - CDS_ATOMIC::atomic m_nProducerCount; + atomics::atomic m_nProducerCount; static CDS_CONSTEXPR_CONST size_t c_nBadConsumer = 0xbadc0ffe; protected: @@ -318,7 +318,7 @@ namespace queue { CppUnitMini::ThreadPool pool( *this ); - m_nProducerCount.store( s_nWriterThreadCount, CDS_ATOMIC::memory_order_release ); + m_nProducerCount.store( s_nWriterThreadCount, atomics::memory_order_release ); // Writers must be first pool.add( new Producer( pool, testQueue ), s_nWriterThreadCount ); diff --git a/tests/unit/queue/queue_reader_writer.cpp b/tests/unit/queue/queue_reader_writer.cpp index ea36000a..93e9b5f4 100644 --- a/tests/unit/queue/queue_reader_writer.cpp +++ b/tests/unit/queue/queue_reader_writer.cpp @@ -178,7 +178,7 @@ namespace queue { protected: size_t m_nThreadPushCount; - CDS_ATOMIC::atomic m_nWriterDone; + atomics::atomic m_nWriterDone; protected: template diff --git a/tests/unit/set2/set_delodd.cpp b/tests/unit/set2/set_delodd.cpp index b4d9298a..d5dab928 100644 --- a/tests/unit/set2/set_delodd.cpp +++ b/tests/unit/set2/set_delodd.cpp @@ -147,7 +147,7 @@ namespace set2 { typedef key_thread key_type; typedef size_t value_type; - CDS_ATOMIC::atomic m_nInsThreadCount; + atomics::atomic m_nInsThreadCount; // Inserts keys from [0..N) template @@ -210,7 +210,7 @@ namespace set2 { } } - getTest().m_nInsThreadCount.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); + getTest().m_nInsThreadCount.fetch_sub( 1, atomics::memory_order_release ); } }; @@ -336,7 +336,7 @@ namespace set2 { ++m_nDeleteFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -350,7 +350,7 @@ namespace set2 { ++m_nDeleteFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -409,7 +409,7 @@ namespace set2 { ++m_nExtractFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -423,7 +423,7 @@ namespace set2 { ++m_nExtractFailed; } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -491,7 +491,7 @@ namespace set2 { xp.release(); } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -515,7 +515,7 @@ namespace set2 { xp.release(); } } - if ( getTest().m_nInsThreadCount.load( CDS_ATOMIC::memory_order_acquire ) == 0 ) + if ( getTest().m_nInsThreadCount.load( atomics::memory_order_acquire ) == 0 ) break; } } @@ -545,7 +545,7 @@ namespace set2 { typedef InsertThread insert_thread; typedef DeleteThread delete_thread; - m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release ); + m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release ); CppUnitMini::ThreadPool pool( *this ); pool.add( new insert_thread( pool, testSet ), c_nInsThreadCount ); @@ -586,7 +586,7 @@ namespace set2 { typedef DeleteThread delete_thread; typedef ExtractThread< typename Set::gc, Set > extract_thread; - m_nInsThreadCount.store( c_nInsThreadCount, CDS_ATOMIC::memory_order_release ); + m_nInsThreadCount.store( c_nInsThreadCount, atomics::memory_order_release ); CppUnitMini::ThreadPool pool( *this ); pool.add( new insert_thread( pool, testSet ), c_nInsThreadCount ); diff --git a/tests/unit/set2/set_insdel_func.h b/tests/unit/set2/set_insdel_func.h index 1f0bdf7d..999bae05 100644 --- a/tests/unit/set2/set_insdel_func.h +++ b/tests/unit/set2/set_insdel_func.h @@ -28,7 +28,7 @@ namespace set2 { struct value_type { size_t nKey; size_t nData; - CDS_ATOMIC::atomic nEnsureCall; + atomics::atomic nEnsureCall; bool volatile bInitialized; cds::OS::ThreadId threadId ; // insert thread id @@ -46,7 +46,7 @@ namespace set2 { value_type( value_type const& s ) : nKey(s.nKey) , nData(s.nData) - , nEnsureCall(s.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed)) + , nEnsureCall(s.nEnsureCall.load(atomics::memory_order_relaxed)) , bInitialized( s.bInitialized ) , threadId( cds::OS::getCurrentThreadId() ) {} @@ -56,7 +56,7 @@ namespace set2 { { nKey = v.nKey; nData = v.nData; - nEnsureCall.store( v.nEnsureCall.load(CDS_ATOMIC::memory_order_relaxed), CDS_ATOMIC::memory_order_relaxed ); + nEnsureCall.store( v.nEnsureCall.load(atomics::memory_order_relaxed), atomics::memory_order_relaxed ); bInitialized = v.bInitialized; return *this; @@ -196,7 +196,7 @@ namespace set2 { ++nCreated; } else { - val.val.nEnsureCall.fetch_add( 1, CDS_ATOMIC::memory_order_relaxed ); + val.val.nEnsureCall.fetch_add( 1, atomics::memory_order_relaxed ); ++nModified; } } diff --git a/tests/unit/stack/stack_intrusive_pushpop.cpp b/tests/unit/stack/stack_intrusive_pushpop.cpp index c4d6fad2..ec6039e5 100644 --- a/tests/unit/stack/stack_intrusive_pushpop.cpp +++ b/tests/unit/stack/stack_intrusive_pushpop.cpp @@ -40,7 +40,7 @@ namespace istack { class IntrusiveStack_PushPop: public CppUnitMini::TestCase { - CDS_ATOMIC::atomic m_nWorkingProducers; + atomics::atomic m_nWorkingProducers; static CDS_CONSTEXPR_CONST size_t c_nValArraySize = 1024; static CDS_CONSTEXPR_CONST size_t c_nBadConsumer = 0xbadc0ffe; @@ -99,7 +99,7 @@ namespace istack { ++m_nPushError; } - getTest().m_nWorkingProducers.fetch_sub( 1, CDS_ATOMIC::memory_order_release ); + getTest().m_nWorkingProducers.fetch_sub( 1, atomics::memory_order_release ); } }; @@ -147,7 +147,7 @@ namespace istack { m_nDirtyPop = 0; memset( m_arrPop, 0, sizeof(m_arrPop)); - while ( !(getTest().m_nWorkingProducers.load(CDS_ATOMIC::memory_order_acquire) == 0 && m_Stack.empty()) ) { + while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) { typename Stack::value_type * p = m_Stack.pop(); if ( p ) { p->nConsumer = m_nThreadNo; @@ -236,7 +236,7 @@ namespace istack { template void test( Stack& testStack, value_array& arrValue ) { - m_nWorkingProducers.store( s_nPushThreadCount, CDS_ATOMIC::memory_order_release ); + m_nWorkingProducers.store( s_nPushThreadCount, atomics::memory_order_release ); size_t const nPushCount = s_nStackSize / s_nPushThreadCount; typename Stack::value_type * pValStart = arrValue.get(); diff --git a/tests/unit/stack/stack_pushpop.cpp b/tests/unit/stack/stack_pushpop.cpp index 30ecdf2b..7d1f8e66 100644 --- a/tests/unit/stack/stack_pushpop.cpp +++ b/tests/unit/stack/stack_pushpop.cpp @@ -29,7 +29,7 @@ namespace stack { class Stack_PushPop: public CppUnitMini::TestCase { - CDS_ATOMIC::atomic m_nWorkingProducers; + atomics::atomic m_nWorkingProducers; static size_t const c_nValArraySize = 1024; template @@ -85,7 +85,7 @@ namespace stack { } - getTest().m_nWorkingProducers.fetch_sub(1, CDS_ATOMIC::memory_order_release); + getTest().m_nWorkingProducers.fetch_sub(1, atomics::memory_order_release); } }; @@ -134,7 +134,7 @@ namespace stack { memset( m_arrPop, 0, sizeof(m_arrPop)); SimpleValue v; - while ( !(getTest().m_nWorkingProducers.load(CDS_ATOMIC::memory_order_acquire) == 0 && m_Stack.empty()) ) { + while ( !(getTest().m_nWorkingProducers.load(atomics::memory_order_acquire) == 0 && m_Stack.empty()) ) { if ( m_Stack.pop( v )) { ++m_nPopCount; if ( v.nNo < sizeof(m_arrPop)/sizeof(m_arrPop[0]) ) @@ -236,7 +236,7 @@ namespace stack { template void test( Stack& testStack ) { - m_nWorkingProducers.store(s_nPushThreadCount, CDS_ATOMIC::memory_order_release); + m_nWorkingProducers.store(s_nPushThreadCount, atomics::memory_order_release); size_t const nPushCount = s_nStackSize / s_nPushThreadCount; CppUnitMini::ThreadPool pool( *this ); -- 2.34.1