2 This file is a part of libcds - Concurrent Data Structures library
4 (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016
6 Source code repo: http://github.com/khizmax/libcds/
7 Download: http://sourceforge.net/projects/libcds/files/
9 Redistribution and use in source and binary forms, with or without
10 modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 list of conditions and the following disclaimer.
15 * Redistributions in binary form must reproduce the above copyright notice,
16 this list of conditions and the following disclaimer in the documentation
17 and/or other materials provided with the distribution.
19 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23 FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H
32 #define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H
35 #include <cds/details/is_aligned.h>
38 namespace cds { namespace cxx11_atomic {
39 namespace platform { CDS_CXX11_INLINE_NAMESPACE namespace gcc { CDS_CXX11_INLINE_NAMESPACE namespace x86 {
41 static inline void fence_before( memory_order order ) CDS_NOEXCEPT
44 case memory_order_relaxed:
45 case memory_order_acquire:
46 case memory_order_consume:
48 case memory_order_release:
49 case memory_order_acq_rel:
50 CDS_COMPILER_RW_BARRIER;
52 case memory_order_seq_cst:
53 CDS_COMPILER_RW_BARRIER;
58 static inline void fence_after( memory_order order ) CDS_NOEXCEPT
61 case memory_order_acquire:
62 case memory_order_acq_rel:
63 CDS_COMPILER_RW_BARRIER;
65 case memory_order_relaxed:
66 case memory_order_consume:
67 case memory_order_release:
69 case memory_order_seq_cst:
70 CDS_COMPILER_RW_BARRIER;
76 static inline void fence_after_load(memory_order order) CDS_NOEXCEPT
79 case memory_order_relaxed:
80 case memory_order_release:
82 case memory_order_acquire:
83 case memory_order_acq_rel:
84 CDS_COMPILER_RW_BARRIER;
86 case memory_order_consume:
88 case memory_order_seq_cst:
89 __asm__ __volatile__ ( "mfence" ::: "memory" );
95 //-----------------------------------------------------------------------------
97 //-----------------------------------------------------------------------------
98 static inline void thread_fence(memory_order order) CDS_NOEXCEPT
102 case memory_order_relaxed:
103 case memory_order_consume:
105 case memory_order_release:
106 case memory_order_acquire:
107 case memory_order_acq_rel:
108 CDS_COMPILER_RW_BARRIER;
110 case memory_order_seq_cst:
111 __asm__ __volatile__ ( "mfence" ::: "memory" );
117 static inline void signal_fence(memory_order order) CDS_NOEXCEPT
119 // C++11: 29.8.8: only compiler optimization, no hardware instructions
122 case memory_order_relaxed:
124 case memory_order_consume:
125 case memory_order_release:
126 case memory_order_acquire:
127 case memory_order_acq_rel:
128 case memory_order_seq_cst:
129 CDS_COMPILER_RW_BARRIER;
135 //-----------------------------------------------------------------------------
137 //-----------------------------------------------------------------------------
139 template <typename T>
140 static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
142 static_assert( sizeof(T) == 1, "Illegal size of operand" );
145 fence_before(mo_success);
146 __asm__ __volatile__ (
147 "lock ; cmpxchgb %[desired], %[pDest]"
148 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
149 : [desired] "q" (desired)
151 bool success = (prev == expected);
154 fence_after(mo_success);
156 fence_after(mo_fail);
160 template <typename T>
161 static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
163 return cas8_strong( pDest, expected, desired, mo_success, mo_fail );
166 template <typename T>
167 static inline T exchange8( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
169 static_assert( sizeof(T) == 1, "Illegal size of operand" );
172 __asm__ __volatile__ (
173 "xchgb %[v], %[pDest]"
174 : [v] "+q" (v), [pDest] "+m" (*pDest)
180 template <typename T>
181 static inline void store8( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT
183 static_assert( sizeof(T) == 1, "Illegal size of operand" );
184 assert( order == memory_order_relaxed
185 || order == memory_order_release
186 || order == memory_order_seq_cst
188 assert( pDest != NULL );
190 if ( order != memory_order_seq_cst ) {
191 fence_before( order );
195 exchange8( pDest, src, order );
199 template <typename T>
200 static inline T load8( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
202 static_assert( sizeof(T) == 1, "Illegal size of operand" );
203 assert( order == memory_order_relaxed
204 || order == memory_order_consume
205 || order == memory_order_acquire
206 || order == memory_order_seq_cst
208 assert( pSrc != NULL );
211 fence_after_load( order );
215 # define CDS_ATOMIC_fetch8_add_defined
216 template <typename T>
217 static inline T fetch8_add( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
220 __asm__ __volatile__ (
221 "lock ; xaddb %[val], %[pDest]"
222 : [val] "+q" (val), [pDest] "+m" (*pDest)
228 # define CDS_ATOMIC_fetch8_sub_defined
229 template <typename T>
230 static inline T fetch8_sub( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
233 __asm__ __volatile__ (
235 "lock ; xaddb %[val], %[pDest]"
236 : [val] "+q" (val), [pDest] "+m" (*pDest)
242 //-----------------------------------------------------------------------------
243 // atomic flag primitives
244 //-----------------------------------------------------------------------------
246 typedef bool atomic_flag_type;
247 static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT
249 return exchange8( pFlag, true, order );
252 static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) CDS_NOEXCEPT
254 store8( pFlag, false, order );
257 //-----------------------------------------------------------------------------
259 //-----------------------------------------------------------------------------
261 template <typename T>
262 static inline T exchange16( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
264 static_assert( sizeof(T) == 2, "Illegal size of operand" );
265 assert( cds::details::is_aligned( pDest, 2 ));
268 __asm__ __volatile__ (
269 "xchgw %[v], %[pDest]"
270 : [v] "+q" (v), [pDest] "+m" (*pDest)
276 template <typename T>
277 static inline void store16( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT
279 static_assert( sizeof(T) == 2, "Illegal size of operand" );
280 assert( order == memory_order_relaxed
281 || order == memory_order_release
282 || order == memory_order_seq_cst
284 assert( pDest != NULL );
285 assert( cds::details::is_aligned( pDest, 2 ));
287 if ( order != memory_order_seq_cst ) {
288 fence_before( order );
292 exchange16( pDest, src, order );
296 template <typename T>
297 static inline T load16( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
299 static_assert( sizeof(T) == 2, "Illegal size of operand" );
300 assert( order == memory_order_relaxed
301 || order == memory_order_consume
302 || order == memory_order_acquire
303 || order == memory_order_seq_cst
305 assert( pSrc != NULL );
306 assert( cds::details::is_aligned( pSrc, 2 ));
309 fence_after_load( order );
313 template <typename T>
314 static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
316 static_assert( sizeof(T) == 2, "Illegal size of operand" );
317 assert( cds::details::is_aligned( pDest, 2 ));
320 fence_before(mo_success);
321 __asm__ __volatile__ (
322 "lock ; cmpxchgw %[desired], %[pDest]"
323 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
324 : [desired] "q" (desired)
326 bool success = prev == expected;
328 fence_after(mo_success);
330 fence_after(mo_fail);
337 template <typename T>
338 static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
340 return cas16_strong( pDest, expected, desired, mo_success, mo_fail );
343 # define CDS_ATOMIC_fetch16_add_defined
344 template <typename T>
345 static inline T fetch16_add( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
347 static_assert( sizeof(T) == 2, "Illegal size of operand" );
348 assert( cds::details::is_aligned( pDest, 2 ));
351 __asm__ __volatile__ (
352 "lock ; xaddw %[val], %[pDest]"
353 : [val] "+q" (val), [pDest] "+m" (*pDest)
359 # define CDS_ATOMIC_fetch16_sub_defined
360 template <typename T>
361 static inline T fetch16_sub( T volatile * pDest, T val, memory_order order ) CDS_NOEXCEPT
363 static_assert( sizeof(T) == 2, "Illegal size of operand" );
364 assert( cds::details::is_aligned( pDest, 2 ));
367 __asm__ __volatile__ (
369 "lock ; xaddw %[val], %[pDest]"
370 : [val] "+q" (val), [pDest] "+m" (*pDest)
376 //-----------------------------------------------------------------------------
378 //-----------------------------------------------------------------------------
380 template <typename T>
381 static inline T exchange32( T volatile * pDest, T v, memory_order order ) CDS_NOEXCEPT
383 static_assert( sizeof(T) == 4, "Illegal size of operand" );
384 assert( cds::details::is_aligned( pDest, 4 ));
387 __asm__ __volatile__ (
388 "xchgl %[v], %[pDest]"
389 : [v] "+r" (v), [pDest] "+m" (*pDest)
395 template <typename T>
396 static inline void store32( T volatile * pDest, T src, memory_order order ) CDS_NOEXCEPT
398 static_assert( sizeof(T) == 4, "Illegal size of operand" );
399 assert( order == memory_order_relaxed
400 || order == memory_order_release
401 || order == memory_order_seq_cst
403 assert( pDest != NULL );
404 assert( cds::details::is_aligned( pDest, 4 ));
406 if ( order != memory_order_seq_cst ) {
407 fence_before( order );
411 exchange32( pDest, src, order );
415 template <typename T>
416 static inline T load32( T volatile const * pSrc, memory_order order ) CDS_NOEXCEPT
418 static_assert( sizeof(T) == 4, "Illegal size of operand" );
419 assert( order == memory_order_relaxed
420 || order == memory_order_consume
421 || order == memory_order_acquire
422 || order == memory_order_seq_cst
424 assert( pSrc != NULL );
425 assert( cds::details::is_aligned( pSrc, 4 ));
428 fence_after_load( order );
432 template <typename T>
433 static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
435 static_assert( sizeof(T) == 4, "Illegal size of operand" );
436 assert( cds::details::is_aligned( pDest, 4 ));
439 fence_before(mo_success);
440 __asm__ __volatile__ (
441 "lock ; cmpxchgl %[desired], %[pDest]"
442 : [prev] "+a" (prev), [pDest] "+m" (*pDest)
443 : [desired] "r" (desired)
445 bool success = prev == expected;
447 fence_after(mo_success);
449 fence_after(mo_fail);
455 template <typename T>
456 static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) CDS_NOEXCEPT
458 return cas32_strong( pDest, expected, desired, mo_success, mo_fail );
461 // fetch_xxx may be emulated via cas32
462 // If the platform has special fetch_xxx instruction
463 // then it should define CDS_ATOMIC_fetch32_xxx_defined macro
465 # define CDS_ATOMIC_fetch32_add_defined
466 template <typename T>
467 static inline T fetch32_add( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
469 static_assert( sizeof(T) == 4, "Illegal size of operand" );
470 assert( cds::details::is_aligned( pDest, 4 ));
473 __asm__ __volatile__ (
474 "lock ; xaddl %[v], %[pDest]"
475 : [v] "+r" (v), [pDest] "+m" (*pDest)
481 # define CDS_ATOMIC_fetch32_sub_defined
482 template <typename T>
483 static inline T fetch32_sub( T volatile * pDest, T v, memory_order order) CDS_NOEXCEPT
485 static_assert( sizeof(T) == 4, "Illegal size of operand" );
486 assert( cds::details::is_aligned( pDest, 4 ));
489 __asm__ __volatile__ (
491 "lock ; xaddl %[v], %[pDest]"
492 : [v] "+r" (v), [pDest] "+m" (*pDest)
498 }}} // namespace platform::gcc::x86
499 }} // namespace cds::cxx11_atomic
502 #endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H