2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
32 #define CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H
35 #include <cds/compiler/gcc/x86/cxx11_atomic32.h>
38 namespace cds { namespace cxx11_atomic {
39 namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace amd64 {
40 # ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT
41 // primitives up to 32bit + fences
42 using namespace cds::cxx11_atomic::platform::gcc::x86;
45 //-----------------------------------------------------------------------------
47 //-----------------------------------------------------------------------------
50 static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
52 static_assert( sizeof(T) == 8, "Illegal size of operand" );
53 assert( cds::details::is_aligned( pDest, 8 ));
56 fence_before(mo_success);
57 __asm__ __volatile__ (
58 "lock ; cmpxchgq %[desired], %[pDest]"
59 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
60 : [desired] "r" (desired)
62 bool success = (prev == expected);
65 fence_after(mo_success);
72 static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
74 return cas64_strong( pDest, expected, desired, mo_success, mo_fail );
78 static inline T load64( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
80 static_assert( sizeof(T) == 8, "Illegal size of operand" );
81 assert( order == memory_order_relaxed
82 || order == memory_order_consume
83 || order == memory_order_acquire
84 || order == memory_order_seq_cst
87 assert( cds::details::is_aligned( pSrc, 8 ));
90 fence_after_load( order );
96 static inline T exchange64( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
98 static_assert( sizeof(T) == 8, "Illegal size of operand" );
99 assert( cds::details::is_aligned( pDest, 8 ));
102 __asm__ __volatile__ (
103 "xchgq %[v], %[pDest]"
104 : [v] "+r" (v), [pDest] "+m" (*pDest)
110 template <typename T>
111 static inline void store64( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
113 static_assert( sizeof(T) == 8, "Illegal size of operand" );
114 assert( order == memory_order_relaxed
115 || order == memory_order_release
116 || order == memory_order_seq_cst
119 assert( cds::details::is_aligned( pDest, 8 ));
121 if (order != memory_order_seq_cst) {
126 exchange64( pDest, val, order);
130 # define CDS_ATOMIC_fetch64_add_defined
131 template <typename T>
132 static inline T fetch64_add( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
134 static_assert( sizeof(T) == 8, "Illegal size of operand" );
135 assert( cds::details::is_aligned( pDest, 8 ));
138 __asm__ __volatile__ (
139 "lock ; xaddq %[v], %[pDest]"
140 : [v] "+r" (v), [pDest] "+m" (*pDest)
146 # define CDS_ATOMIC_fetch64_sub_defined
147 template <typename T>
148 static inline T fetch64_sub( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
150 static_assert( sizeof(T) == 8, "Illegal size of operand" );
151 assert( cds::details::is_aligned( pDest, 8 ));
154 __asm__ __volatile__ (
156 "lock ; xaddq %[v], %[pDest]"
157 : [v] "+r" (v), [pDest] "+m" (*pDest)
164 //-----------------------------------------------------------------------------
165 // pointer primitives
166 //-----------------------------------------------------------------------------
168 template <typename T>
169 static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) CDS_NOEXCEPT
171 static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
173 return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order );
176 template <typename T>
177 static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) CDS_NOEXCEPT
179 static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
180 assert( order == memory_order_relaxed
181 || order == memory_order_release
182 || order == memory_order_seq_cst
186 if ( order != memory_order_seq_cst ) {
187 fence_before( order );
191 exchange_ptr( pDest, src, order );
195 template <typename T>
196 static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
198 static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
199 assert( order == memory_order_relaxed
200 || order == memory_order_consume
201 || order == memory_order_acquire
202 || order == memory_order_seq_cst
207 fence_after_load( order );
211 template <typename T>
212 static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
214 static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" );
216 return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast<uint64_t *>( &expected ), (uint64_t) desired, mo_success, mo_fail );
219 template <typename T>
220 static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
222 return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail );
225 }} // namespace gcc::amd64
227 #ifndef CDS_CXX11_INLINE_NAMESPACE_SUPPORT
228 using namespace gcc::amd64;
230 } // namespace platform
232 }} // namespace cds::cxx11_atomic
235 #endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H